important change for single processor for multi-chain
[unres.git] / source / unres / src_MD-M / unres.F
1 CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
2 C                                                                              C
3 C                                U N R E S                                     C
4 C                                                                              C
5 C Program to carry out conformational search of proteins in an united-residue  C
6 C approximation.                                                               C
7 C                                                                              C
8 CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
9       implicit real*8 (a-h,o-z)
10       include 'DIMENSIONS'
11
12
13 #ifdef MPI
14       include 'mpif.h'
15       include 'COMMON.SETUP'
16 #endif
17       include 'COMMON.TIME1'
18       include 'COMMON.INTERACT'
19       include 'COMMON.NAMES'
20       include 'COMMON.GEO'
21       include 'COMMON.HEADER'
22       include 'COMMON.CONTROL'
23       include 'COMMON.CONTACTS'
24       include 'COMMON.CHAIN'
25       include 'COMMON.VAR'
26       include 'COMMON.IOUNITS'
27       include 'COMMON.FFIELD'
28       include 'COMMON.REMD'
29       include 'COMMON.MD'
30       include 'COMMON.SBRIDGE'
31       double precision hrtime,mintime,sectime
32       character*64 text_mode_calc(-2:14) /'test',
33      & 'SC rotamer distribution',
34      & 'Energy evaluation or minimization',
35      & 'Regularization of PDB structure',
36      & 'Threading of a sequence on PDB structures',
37      & 'Monte Carlo (with minimization) ',
38      & 'Energy minimization of multiple conformations',
39      & 'Checking energy gradient',
40      & 'Entropic sampling Monte Carlo (with minimization)',
41      & 'Energy map',
42      & 'CSA calculations',
43      & 'Not used 9',
44      & 'Not used 10',
45      & 'Soft regularization of PDB structure',
46      & 'Mesoscopic molecular dynamics (MD) ',
47      & 'Not used 13',
48      & 'Replica exchange molecular dynamics (REMD)'/
49       external ilen
50
51 c      call memmon_print_usage()
52
53       call init_task
54       if (me.eq.king)
55      & write(iout,*)'### LAST MODIFIED  4/25/08 7:29PM by adam'  
56       if (me.eq.king) call cinfo
57 C Read force field parameters and job setup data
58       call readrtns
59       call flush(iout)
60 C
61       if (me.eq.king .or. .not. out1file) then
62        write (iout,'(2a/)') 
63      & text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))),
64      & ' calculation.' 
65        if (minim) write (iout,'(a)') 
66      &  'Conformations will be energy-minimized.'
67        write (iout,'(80(1h*)/)') 
68       endif
69       call flush(iout)
70 C
71       if (modecalc.eq.-2) then
72         call test
73         stop
74       else if (modecalc.eq.-1) then
75         write(iout,*) "call check_sc_map next"
76         call check_bond
77         stop
78       endif
79 #ifdef MPI
80       if (fg_rank.gt.0) then
81 C Fine-grain slaves just do energy and gradient components.
82         call ergastulum ! slave workhouse in Latin
83       else
84 #endif
85       if (modecalc.eq.0) then
86         call exec_eeval_or_minim
87       else if (modecalc.eq.1) then
88         call exec_regularize
89       else if (modecalc.eq.2) then
90         call exec_thread
91       else if (modecalc.eq.3 .or. modecalc .eq.6) then
92         call exec_MC
93       else if (modecalc.eq.4) then
94         call exec_mult_eeval_or_minim
95       else if (modecalc.eq.5) then
96         call exec_checkgrad
97       else if (ModeCalc.eq.7) then
98         call exec_map
99       else if (ModeCalc.eq.8) then
100         call exec_CSA
101       else if (modecalc.eq.11) then
102         call exec_softreg
103       else if (modecalc.eq.12) then
104         call exec_MD
105       else if (modecalc.eq.14) then
106 #ifdef MPI
107         call exec_MREMD
108 #else
109         write (iout,*) "Need a parallel version to run MREMD."
110         stop
111 #endif
112       else
113         write (iout,'(a)') 'This calculation type is not supported',
114      &   ModeCalc
115       endif
116 #ifdef MPI
117       endif
118 C Finish task.
119       if (fg_rank.eq.0) call finish_task
120 c      call memmon_print_usage()
121 #ifdef TIMING
122        call print_detailed_timing
123 #endif
124       call MPI_Finalize(ierr)
125       stop 'Bye Bye...'
126 #else
127       call dajczas(tcpu(),hrtime,mintime,sectime)
128       stop '********** Program terminated normally.'
129 #endif
130       end
131 c--------------------------------------------------------------------------
132       subroutine exec_MD
133       include 'DIMENSIONS'
134 #ifdef MPI
135       include "mpif.h"
136 #endif
137       include 'COMMON.SETUP'
138       include 'COMMON.CONTROL'
139       include 'COMMON.IOUNITS'
140       if (me.eq.king .or. .not. out1file)
141      &   write (iout,*) "Calling chainbuild"
142       call chainbuild
143       call MD
144       return
145       end
146 c---------------------------------------------------------------------------
147 #ifdef MPI
148       subroutine exec_MREMD
149       include 'DIMENSIONS'
150 #ifdef MPI
151       include "mpif.h"
152 #endif
153       include 'COMMON.SETUP'
154       include 'COMMON.CONTROL'
155       include 'COMMON.IOUNITS'
156       include 'COMMON.REMD'
157       if (me.eq.king .or. .not. out1file)
158      &   write (iout,*) "Calling chainbuild"
159       call chainbuild
160       if (me.eq.king .or. .not. out1file)
161      &   write (iout,*) "Calling REMD"
162       if (remd_mlist) then 
163         call MREMD
164       else
165         do i=1,nrep
166           remd_m(i)=1
167         enddo
168         call MREMD
169       endif
170       return
171       end
172 #endif
173 c---------------------------------------------------------------------------
174       subroutine exec_eeval_or_minim
175       implicit real*8 (a-h,o-z)
176       include 'DIMENSIONS'
177 #ifdef MPI
178       include 'mpif.h'
179 #endif
180       include 'COMMON.SETUP'
181       include 'COMMON.TIME1'
182       include 'COMMON.INTERACT'
183       include 'COMMON.NAMES'
184       include 'COMMON.GEO'
185       include 'COMMON.HEADER'
186       include 'COMMON.CONTROL'
187       include 'COMMON.CONTACTS'
188       include 'COMMON.CHAIN'
189       include 'COMMON.VAR'
190       include 'COMMON.IOUNITS'
191       include 'COMMON.FFIELD'
192       include 'COMMON.REMD'
193       include 'COMMON.MD'
194       include 'COMMON.SBRIDGE'
195       common /srutu/ icall
196       double precision energy(0:n_ene)
197       double precision energy_long(0:n_ene),energy_short(0:n_ene)
198       double precision varia(maxvar)
199       if (indpdb.eq.0)     call chainbuild
200       print *,'dc',c(1,1)
201       if (indpdb.ne.0) then
202       dc(1,0)=c(1,1)
203       dc(2,0)=c(2,1)
204       dc(3,0)=c(3,1)
205       endif
206 #ifdef MPI
207       time00=MPI_Wtime()
208 #else
209       time00=tcpu()
210 #endif
211       call chainbuild_cart
212       print *,'dc',dc(1,0),dc(2,0),dc(3,0)
213       if (split_ene) then
214        print *,"Processor",myrank," after chainbuild"
215        icall=1
216        call etotal_long(energy_long(0))
217        write (iout,*) "Printing long range energy"
218        call enerprint(energy_long(0))
219        call etotal_short(energy_short(0))
220        write (iout,*) "Printing short range energy"
221        call enerprint(energy_short(0))
222        do i=0,n_ene
223          energy(i)=energy_long(i)+energy_short(i)
224          write (iout,*) i,energy_long(i),energy_short(i),energy(i)
225        enddo
226        write (iout,*) "Printing long+short range energy"
227        call enerprint(energy(0))
228       endif
229       call etotal(energy(0))
230 #ifdef MPI
231       time_ene=MPI_Wtime()-time00
232 #else 
233       time_ene=tcpu()-time00
234 #endif
235       write (iout,*) "Time for energy evaluation",time_ene
236       print *,"after etotal"
237       etota = energy(0)
238       etot =etota
239       call enerprint(energy(0))
240       call hairpin(.true.,nharp,iharp)
241         print *,'after hairpin'
242       call secondary2(.true.)
243         print *,'after secondary'
244       if (minim) then
245 crc overlap test
246         if (overlapsc) then 
247           print *, 'Calling OVERLAP_SC'
248           call overlap_sc(fail)
249         endif 
250
251         if (searchsc) then 
252           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
253           print *,'SC_move',nft_sc,etot
254           write(iout,*) 'SC_move',nft_sc,etot
255         endif 
256
257         if (dccart) then
258           print *, 'Calling MINIM_DC'
259 #ifdef MPI
260           time1=MPI_WTIME()
261 #else
262           time1=tcpu()
263 #endif
264           call minim_dc(etot,iretcode,nfun)
265         else
266           if (indpdb.ne.0) then 
267             call bond_regular
268             call chainbuild
269           endif
270           call geom_to_var(nvar,varia)
271           print *,'Calling MINIMIZE.'
272 #ifdef MPI
273           time1=MPI_WTIME()
274 #else
275           time1=tcpu()
276 #endif
277           call minimize(etot,varia,iretcode,nfun)
278         endif
279         print *,'SUMSL return code is',iretcode,' eval ',nfun
280 #ifdef MPI
281         evals=nfun/(MPI_WTIME()-time1)
282 #else
283         evals=nfun/(tcpu()-time1)
284 #endif
285         print *,'# eval/s',evals
286         print *,'refstr=',refstr
287         call hairpin(.false.,nharp,iharp)
288         print *,'after hairpin'
289         call secondary2(.true.)
290         print *,'after secondary'
291         call etotal(energy(0))
292         etot = energy(0)
293         call enerprint(energy(0))
294
295         call intout
296         call briefout(0,etot)
297         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
298           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
299           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
300           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
301       else
302         print *,'refstr=',refstr
303         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
304         call briefout(0,etot)
305       endif
306       if (outpdb) call pdbout(etot,titel(:32),ipdb)
307       if (outmol2) call mol2out(etot,titel(:32))
308       return
309       end
310 c---------------------------------------------------------------------------
311       subroutine exec_regularize
312       implicit real*8 (a-h,o-z)
313       include 'DIMENSIONS'
314 #ifdef MPI
315       include 'mpif.h'
316 #endif
317       include 'COMMON.SETUP'
318       include 'COMMON.TIME1'
319       include 'COMMON.INTERACT'
320       include 'COMMON.NAMES'
321       include 'COMMON.GEO'
322       include 'COMMON.HEADER'
323       include 'COMMON.CONTROL'
324       include 'COMMON.CONTACTS'
325       include 'COMMON.CHAIN'
326       include 'COMMON.VAR'
327       include 'COMMON.IOUNITS'
328       include 'COMMON.FFIELD'
329       include 'COMMON.REMD'
330       include 'COMMON.MD'
331       include 'COMMON.SBRIDGE'
332       double precision energy(0:n_ene)
333
334       call gen_dist_constr
335       call sc_conf
336       call intout
337       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
338       call etotal(energy(0))
339       energy(0)=energy(0)-energy(14)
340       etot=energy(0)
341       call enerprint(energy(0))
342       call intout
343       call briefout(0,etot)
344       if (outpdb) call pdbout(etot,titel(:32),ipdb)
345       if (outmol2) call mol2out(etot,titel(:32))
346       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
347       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
348       return
349       end
350 c---------------------------------------------------------------------------
351       subroutine exec_thread
352       include 'DIMENSIONS'
353 #ifdef MP
354       include "mpif.h"
355 #endif
356       include "COMMON.SETUP"
357       call thread_seq
358       return
359       end
360 c---------------------------------------------------------------------------
361       subroutine exec_MC
362       implicit real*8 (a-h,o-z)
363       include 'DIMENSIONS'
364       character*10 nodeinfo
365       double precision varia(maxvar)
366 #ifdef MPI
367       include "mpif.h"
368 #endif
369       include "COMMON.SETUP"
370       include 'COMMON.CONTROL'
371       call mcm_setup
372       if (minim) then
373 #ifdef MPI
374         if (modecalc.eq.3) then
375           call do_mcm(ipar)
376         else
377           call entmcm
378         endif
379 #else
380         if (modecalc.eq.3) then
381           call do_mcm(ipar)
382         else
383           call entmcm
384         endif
385 #endif
386       else
387         call monte_carlo
388       endif
389       return
390       end
391 c---------------------------------------------------------------------------
392       subroutine exec_mult_eeval_or_minim
393       implicit real*8 (a-h,o-z)
394       include 'DIMENSIONS'
395 #ifdef MPI
396       include 'mpif.h'
397       dimension muster(mpi_status_size)
398 #endif
399       include 'COMMON.SETUP'
400       include 'COMMON.TIME1'
401       include 'COMMON.INTERACT'
402       include 'COMMON.NAMES'
403       include 'COMMON.GEO'
404       include 'COMMON.HEADER'
405       include 'COMMON.CONTROL'
406       include 'COMMON.CONTACTS'
407       include 'COMMON.CHAIN'
408       include 'COMMON.VAR'
409       include 'COMMON.IOUNITS'
410       include 'COMMON.FFIELD'
411       include 'COMMON.REMD'
412       include 'COMMON.MD'
413       include 'COMMON.SBRIDGE'
414       double precision varia(maxvar)
415       dimension ind(6)
416       double precision energy(0:max_ene)
417       logical eof
418       eof=.false.
419 #ifdef MPI
420       if(me.ne.king) then
421         call minim_mcmf
422         return
423       endif
424
425       close (intin)
426       open(intin,file=intinname,status='old')
427       write (istat,'(a5,20a12)')"#    ",
428      &  (wname(print_order(i)),i=1,nprint_ene)
429       if (refstr) then
430         write (istat,'(a5,20a12)')"#    ",
431      &   (ename(print_order(i)),i=1,nprint_ene),
432      &   "ETOT total","RMSD","nat.contact","nnt.contact"        
433       else
434         write (istat,'(a5,20a12)')"#    ",
435      &    (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
436       endif
437
438       if (.not.minim) then
439         do while (.not. eof)
440           if (read_cart) then
441             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
442             call read_x(intin,*11)
443 #ifdef MPI
444 c Broadcast the order to compute internal coordinates to the slaves.
445             if (nfgtasks.gt.1)
446      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
447 #endif
448             call int_from_cart1(.false.)
449           else
450             read (intin,'(i5)',end=1100,err=1100) iconf
451             call read_angles(intin,*11)
452             call geom_to_var(nvar,varia)
453             call chainbuild
454           endif
455           write (iout,'(a,i7)') 'Conformation #',iconf
456           call etotal(energy(0))
457           call briefout(iconf,energy(0))
458           call enerprint(energy(0))
459           etot=energy(0)
460           if (refstr) then 
461             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
462             write (istat,'(i5,20(f12.3))') iconf,
463      &      (energy(print_order(i)),i=1,nprint_ene),etot,
464      &       rms,frac,frac_nn,co
465 cjlee end
466           else
467             write (istat,'(i5,16(f12.3))') iconf,
468      &     (energy(print_order(i)),i=1,nprint_ene),etot
469           endif
470         enddo
471 1100    continue
472         goto 1101
473       endif
474
475       mm=0
476       imm=0
477       nft=0
478       ene0=0.0d0
479       n=0
480       iconf=0
481 c      do n=1,nzsc
482       do while (.not. eof)
483         mm=mm+1
484         if (mm.lt.nodes) then
485           if (read_cart) then
486             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
487             call read_x(intin,*11)
488 #ifdef MPI
489 c Broadcast the order to compute internal coordinates to the slaves.
490             if (nfgtasks.gt.1) 
491      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
492 #endif
493             call int_from_cart1(.false.)
494           else
495             read (intin,'(i5)',end=11,err=11) iconf
496             call read_angles(intin,*11)
497             call geom_to_var(nvar,varia)
498             call chainbuild
499           endif
500           write (iout,'(a,i7)') 'Conformation #',iconf
501           n=n+1
502          imm=imm+1
503          ind(1)=1
504          ind(2)=n
505          ind(3)=0
506          ind(4)=0
507          ind(5)=0
508          ind(6)=0
509          ene0=0.0d0
510          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,
511      *                  ierr)
512          call mpi_send(varia,nvar,mpi_double_precision,mm,
513      *                  idreal,CG_COMM,ierr)
514          call mpi_send(ene0,1,mpi_double_precision,mm,
515      *                  idreal,CG_COMM,ierr)
516 c         print *,'task ',n,' sent to worker ',mm,nvar
517         else
518          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,
519      *                 CG_COMM,muster,ierr)
520          man=muster(mpi_source)
521 c         print *,'receiving result from worker ',man,' (',iii1,iii,')'
522          call mpi_recv(varia,nvar,mpi_double_precision, 
523      *               man,idreal,CG_COMM,muster,ierr)
524          call mpi_recv(ene,1,
525      *               mpi_double_precision,man,idreal,
526      *               CG_COMM,muster,ierr)
527          call mpi_recv(ene0,1,
528      *               mpi_double_precision,man,idreal,
529      *               CG_COMM,muster,ierr)
530 c         print *,'result received from worker ',man,' sending now'
531
532           call var_to_geom(nvar,varia)
533           call chainbuild
534           call etotal(energy(0))
535           iconf=ind(2)
536           write (iout,*)
537           write (iout,*)
538           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
539
540           etot=energy(0)
541           call enerprint(energy(0))
542           call briefout(it,etot)
543 c          if (minim) call briefout(it,etot)
544           if (refstr) then 
545             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
546             write (istat,'(i5,19(f12.3))') iconf,
547      &     (energy(print_order(i)),i=1,nprint_ene),etot,
548      &     rms,frac,frac_nn,co
549           else
550             write (istat,'(i5,15(f12.3))') iconf,
551      &     (energy(print_order(i)),i=1,nprint_ene),etot
552           endif
553
554           imm=imm-1
555           if (read_cart) then
556             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
557             call read_x(intin,*11)
558 #ifdef MPI
559 c Broadcast the order to compute internal coordinates to the slaves.
560             if (nfgtasks.gt.1)
561      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
562 #endif
563             call int_from_cart1(.false.)
564           else
565             read (intin,'(i5)',end=1101,err=1101) iconf
566             call read_angles(intin,*11)
567             call geom_to_var(nvar,varia)
568             call chainbuild
569           endif
570           n=n+1
571           imm=imm+1
572           ind(1)=1
573           ind(2)=n
574           ind(3)=0
575           ind(4)=0
576           ind(5)=0
577           ind(6)=0
578           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,
579      *                  ierr)
580           call mpi_send(varia,nvar,mpi_double_precision,man, 
581      *                  idreal,CG_COMM,ierr)
582           call mpi_send(ene0,1,mpi_double_precision,man,
583      *                  idreal,CG_COMM,ierr)
584           nf_mcmf=nf_mcmf+ind(4)
585           nmin=nmin+1
586         endif
587       enddo
588 11    continue
589       do j=1,imm
590         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,
591      *               CG_COMM,muster,ierr)
592         man=muster(mpi_source)
593         call mpi_recv(varia,nvar,mpi_double_precision, 
594      *               man,idreal,CG_COMM,muster,ierr)
595         call mpi_recv(ene,1,
596      *               mpi_double_precision,man,idreal,
597      *               CG_COMM,muster,ierr)
598         call mpi_recv(ene0,1,
599      *               mpi_double_precision,man,idreal,
600      *               CG_COMM,muster,ierr)
601
602         call var_to_geom(nvar,varia)
603         call chainbuild
604         call etotal(energy(0))
605         iconf=ind(2)
606         write (iout,*)
607         write (iout,*)
608         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
609
610         etot=energy(0)
611         call enerprint(energy(0))
612         call briefout(it,etot)
613         if (refstr) then 
614           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
615           write (istat,'(i5,19(f12.3))') iconf,
616      &   (energy(print_order(i)),i=1,nprint_ene),etot,
617      &   rms,frac,frac_nn,co
618         else
619           write (istat,'(i5,15(f12.3))') iconf,
620      &    (energy(print_order(i)),i=1,nprint_ene),etot
621         endif
622         nmin=nmin+1
623       enddo
624 1101  continue
625       do i=1, nodes-1
626          ind(1)=0
627          ind(2)=0
628          ind(3)=0
629          ind(4)=0
630          ind(5)=0
631          ind(6)=0
632          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,
633      *                  ierr)
634       enddo
635 #else
636       close (intin)
637       open(intin,file=intinname,status='old')
638       write (istat,'(a5,20a12)')"#    ",
639      &   (wname(print_order(i)),i=1,nprint_ene)
640       write (istat,'("#    ",20(1pe12.4))')
641      &   (weights(print_order(i)),i=1,nprint_ene)
642       if (refstr) then
643         write (istat,'(a5,20a12)')"#    ",
644      &   (ename(print_order(i)),i=1,nprint_ene),
645      &   "ETOT total","RMSD","nat.contact","nnt.contact"
646       else
647         write (istat,'(a5,14a12)')"#    ",
648      &   (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
649       endif
650       do while (.not. eof)
651           if (read_cart) then
652             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
653             call read_x(intin,*11)
654 #ifdef MPI
655 c Broadcast the order to compute internal coordinates to the slaves.
656             if (nfgtasks.gt.1)
657      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
658 #endif
659             call int_from_cart1(.false.)
660           else
661             read (intin,'(i5)',end=11,err=11) iconf
662             call read_angles(intin,*11)
663             call geom_to_var(nvar,varia)
664             call chainbuild
665           endif
666         write (iout,'(a,i7)') 'Conformation #',iconf
667         if (minim) call minimize(etot,varia,iretcode,nfun)
668         call etotal(energy(0))
669
670         etot=energy(0)
671         call enerprint(energy(0))
672         if (minim) call briefout(it,etot) 
673         if (refstr) then 
674           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
675           write (istat,'(i5,18(f12.3))') iconf,
676      &   (energy(print_order(i)),i=1,nprint_ene),
677      &   etot,rms,frac,frac_nn,co
678 cjlee end
679         else
680           write (istat,'(i5,14(f12.3))') iconf,
681      &   (energy(print_order(i)),i=1,nprint_ene),etot
682         endif
683       enddo
684    11 continue
685 #endif
686       return
687       end
688 c---------------------------------------------------------------------------
689       subroutine exec_checkgrad
690       implicit real*8 (a-h,o-z)
691       include 'DIMENSIONS'
692 #ifdef MPI
693       include 'mpif.h'
694 #endif
695       include 'COMMON.SETUP'
696       include 'COMMON.TIME1'
697       include 'COMMON.INTERACT'
698       include 'COMMON.NAMES'
699       include 'COMMON.GEO'
700       include 'COMMON.HEADER'
701       include 'COMMON.CONTROL'
702       include 'COMMON.CONTACTS'
703       include 'COMMON.CHAIN'
704       include 'COMMON.VAR'
705       include 'COMMON.IOUNITS'
706       include 'COMMON.FFIELD'
707       include 'COMMON.REMD'
708       include 'COMMON.MD'
709       include 'COMMON.SBRIDGE'
710       common /srutu/ icall
711       double precision energy(0:max_ene)
712 c      do i=2,nres
713 c        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
714 c        if (itype(i).ne.10) 
715 c     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
716 c      enddo
717       if (indpdb.eq.0) call chainbuild
718 c      do i=0,nres
719 c        do j=1,3
720 c          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
721 c        enddo
722 c      enddo
723 c      do i=1,nres-1
724 c        if (itype(i).ne.10) then
725 c          do j=1,3
726 c            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
727 c          enddo
728 c        endif
729 c      enddo
730 c      do j=1,3
731 c        dc(j,0)=ran_number(-0.2d0,0.2d0)
732 c      enddo
733       usampl=.true.
734       totT=1.d0
735       eq_time=0.0d0
736       call read_fragments
737       call chainbuild_cart
738       call cartprint
739       call intout
740       icall=1
741       call etotal(energy(0))
742       etot = energy(0)
743       call enerprint(energy(0))
744       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
745       print *,'icheckgrad=',icheckgrad
746       goto (10,20,30) icheckgrad
747   10  call check_ecartint
748       return
749   20  call check_cartgrad
750       return
751   30  call check_eint
752       return
753       end
754 c---------------------------------------------------------------------------
755       subroutine exec_map
756 C Energy maps
757       call map_read
758       call map
759       return
760       end
761 c---------------------------------------------------------------------------
762       subroutine exec_CSA
763 #ifdef MPI
764       include "mpif.h"
765 #endif
766       include 'DIMENSIONS'
767       include 'COMMON.IOUNITS'
768 C Conformational Space Annealling programmed by Jooyoung Lee.
769 C This method works only with parallel machines!
770 #ifdef MPI
771       call together
772 #else
773       write (iout,*) "CSA works on parallel machines only"
774 #endif
775       return
776       end
777 c---------------------------------------------------------------------------
778       subroutine exec_softreg
779       include 'DIMENSIONS'
780       include 'COMMON.IOUNITS'
781       include 'COMMON.CONTROL'
782       double precision energy(0:max_ene)
783       call chainbuild
784       call etotal(energy(0))
785       call enerprint(energy(0))
786       if (.not.lsecondary) then
787         write(iout,*) 'Calling secondary structure recognition'
788         call secondary2(debug)
789       else
790         write(iout,*) 'Using secondary structure supplied in pdb'
791       endif
792
793       call softreg
794
795       call etotal(energy(0))
796       etot=energy(0)
797       call enerprint(energy(0))
798       call intout
799       call briefout(0,etot)
800       call secondary2(.true.)
801       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
802       return
803       end