Merge branch 'devel' into AFM
[unres.git] / source / unres / src_MD-M / unres.F
1 CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
2 C                                                                              C
3 C                                U N R E S                                     C
4 C                                                                              C
5 C Program to carry out conformational search of proteins in an united-residue  C
6 C approximation.                                                               C
7 C                                                                              C
8 CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
9       implicit real*8 (a-h,o-z)
10       include 'DIMENSIONS'
11
12
13 #ifdef MPI
14       include 'mpif.h'
15       include 'COMMON.SETUP'
16 #endif
17       include 'COMMON.TIME1'
18       include 'COMMON.INTERACT'
19       include 'COMMON.NAMES'
20       include 'COMMON.GEO'
21       include 'COMMON.HEADER'
22       include 'COMMON.CONTROL'
23       include 'COMMON.CONTACTS'
24       include 'COMMON.CHAIN'
25       include 'COMMON.VAR'
26       include 'COMMON.IOUNITS'
27       include 'COMMON.FFIELD'
28       include 'COMMON.REMD'
29       include 'COMMON.MD'
30       include 'COMMON.SBRIDGE'
31       double precision hrtime,mintime,sectime
32       character*64 text_mode_calc(-2:14) /'test',
33      & 'SC rotamer distribution',
34      & 'Energy evaluation or minimization',
35      & 'Regularization of PDB structure',
36      & 'Threading of a sequence on PDB structures',
37      & 'Monte Carlo (with minimization) ',
38      & 'Energy minimization of multiple conformations',
39      & 'Checking energy gradient',
40      & 'Entropic sampling Monte Carlo (with minimization)',
41      & 'Energy map',
42      & 'CSA calculations',
43      & 'Not used 9',
44      & 'Not used 10',
45      & 'Soft regularization of PDB structure',
46      & 'Mesoscopic molecular dynamics (MD) ',
47      & 'Not used 13',
48      & 'Replica exchange molecular dynamics (REMD)'/
49       external ilen
50
51 c      call memmon_print_usage()
52
53       call init_task
54       if (me.eq.king)
55      & write(iout,*)'### LAST MODIFIED  4/25/08 7:29PM by adam'  
56       if (me.eq.king) call cinfo
57 C Read force field parameters and job setup data
58       call readrtns
59 C
60       if (me.eq.king .or. .not. out1file) then
61        write (iout,'(2a/)') 
62      & text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))),
63      & ' calculation.' 
64        if (minim) write (iout,'(a)') 
65      &  'Conformations will be energy-minimized.'
66        write (iout,'(80(1h*)/)') 
67       endif
68       call flush(iout)
69 C
70       if (modecalc.eq.-2) then
71         call test
72         stop
73       else if (modecalc.eq.-1) then
74         write(iout,*) "call check_sc_map next"
75         call check_bond
76         stop
77       endif
78 #ifdef MPI
79       if (fg_rank.gt.0) then
80 C Fine-grain slaves just do energy and gradient components.
81         call ergastulum ! slave workhouse in Latin
82       else
83 #endif
84       if (modecalc.eq.0) then
85         call exec_eeval_or_minim
86       else if (modecalc.eq.1) then
87         call exec_regularize
88       else if (modecalc.eq.2) then
89         call exec_thread
90       else if (modecalc.eq.3 .or. modecalc .eq.6) then
91         call exec_MC
92       else if (modecalc.eq.4) then
93         call exec_mult_eeval_or_minim
94       else if (modecalc.eq.5) then
95         call exec_checkgrad
96       else if (ModeCalc.eq.7) then
97         call exec_map
98       else if (ModeCalc.eq.8) then
99         call exec_CSA
100       else if (modecalc.eq.11) then
101         call exec_softreg
102       else if (modecalc.eq.12) then
103         call exec_MD
104       else if (modecalc.eq.14) then
105 #ifdef MPI
106         call exec_MREMD
107 #else
108         write (iout,*) "Need a parallel version to run MREMD."
109         stop
110 #endif
111       else
112         write (iout,'(a)') 'This calculation type is not supported',
113      &   ModeCalc
114       endif
115 #ifdef MPI
116       endif
117 C Finish task.
118       if (fg_rank.eq.0) call finish_task
119 c      call memmon_print_usage()
120 #ifdef TIMING
121        call print_detailed_timing
122 #endif
123       call MPI_Finalize(ierr)
124       stop 'Bye Bye...'
125 #else
126       call dajczas(tcpu(),hrtime,mintime,sectime)
127       stop '********** Program terminated normally.'
128 #endif
129       end
130 c--------------------------------------------------------------------------
131       subroutine exec_MD
132       include 'DIMENSIONS'
133 #ifdef MPI
134       include "mpif.h"
135 #endif
136       include 'COMMON.SETUP'
137       include 'COMMON.CONTROL'
138       include 'COMMON.IOUNITS'
139 c      if (me.eq.king .or. .not. out1file) then
140 c        write (iout,*) "Calling chainbuild"
141 c        call flush(iout)
142 c      endif
143       call chainbuild
144 c      if (me.eq.king .or. .not. out1file) then
145 c        write (iout,*) "Calling MD"
146 c        call flush(iout)
147 c      endif
148       call MD
149       return
150       end
151 c---------------------------------------------------------------------------
152 #ifdef MPI
153       subroutine exec_MREMD
154       include 'DIMENSIONS'
155 #ifdef MPI
156       include "mpif.h"
157 #endif
158       include 'COMMON.SETUP'
159       include 'COMMON.CONTROL'
160       include 'COMMON.IOUNITS'
161       include 'COMMON.REMD'
162       if (me.eq.king .or. .not. out1file)
163      &   write (iout,*) "Calling chainbuild"
164       call chainbuild
165       if (me.eq.king .or. .not. out1file)
166      &   write (iout,*) "Calling REMD"
167       if (remd_mlist) then 
168         call MREMD
169       else
170         do i=1,nrep
171           remd_m(i)=1
172         enddo
173         call MREMD
174       endif
175       return
176       end
177 #endif
178 c---------------------------------------------------------------------------
179       subroutine exec_eeval_or_minim
180       implicit real*8 (a-h,o-z)
181       include 'DIMENSIONS'
182 #ifdef MPI
183       include 'mpif.h'
184 #endif
185       include 'COMMON.SETUP'
186       include 'COMMON.TIME1'
187       include 'COMMON.INTERACT'
188       include 'COMMON.NAMES'
189       include 'COMMON.GEO'
190       include 'COMMON.HEADER'
191       include 'COMMON.CONTROL'
192       include 'COMMON.CONTACTS'
193       include 'COMMON.CHAIN'
194       include 'COMMON.VAR'
195       include 'COMMON.IOUNITS'
196       include 'COMMON.FFIELD'
197       include 'COMMON.REMD'
198       include 'COMMON.MD'
199       include 'COMMON.SBRIDGE'
200       common /srutu/ icall
201       double precision energy(0:n_ene)
202       double precision energy_long(0:n_ene),energy_short(0:n_ene)
203       double precision varia(maxvar)
204       if (indpdb.eq.0)     call chainbuild
205       print *,'dc',c(1,1)
206       if (indpdb.ne.0) then
207       dc(1,0)=c(1,1)
208       dc(2,0)=c(2,1)
209       dc(3,0)=c(3,1)
210       endif
211 #ifdef MPI
212       time00=MPI_Wtime()
213 #else
214       time00=tcpu()
215 #endif
216       call chainbuild_cart
217       print *,'dc',dc(1,0),dc(2,0),dc(3,0)
218       if (split_ene) then
219        print *,"Processor",myrank," after chainbuild"
220        icall=1
221        call etotal_long(energy_long(0))
222        write (iout,*) "Printing long range energy"
223        call enerprint(energy_long(0))
224        call etotal_short(energy_short(0))
225        write (iout,*) "Printing short range energy"
226        call enerprint(energy_short(0))
227        do i=0,n_ene
228          energy(i)=energy_long(i)+energy_short(i)
229          write (iout,*) i,energy_long(i),energy_short(i),energy(i)
230        enddo
231        write (iout,*) "Printing long+short range energy"
232        call enerprint(energy(0))
233       endif
234       call etotal(energy(0))
235 #ifdef MPI
236       time_ene=MPI_Wtime()-time00
237 #else 
238       time_ene=tcpu()-time00
239 #endif
240       write (iout,*) "Time for energy evaluation",time_ene
241       print *,"after etotal"
242       etota = energy(0)
243       etot =etota
244       call enerprint(energy(0))
245       call hairpin(.true.,nharp,iharp)
246         print *,'after hairpin'
247       call secondary2(.true.)
248         print *,'after secondary'
249       if (minim) then
250 crc overlap test
251         if (overlapsc) then 
252           print *, 'Calling OVERLAP_SC'
253           call overlap_sc(fail)
254         endif 
255
256         if (searchsc) then 
257           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
258           print *,'SC_move',nft_sc,etot
259           write(iout,*) 'SC_move',nft_sc,etot
260         endif 
261
262         if (dccart) then
263           print *, 'Calling MINIM_DC'
264 #ifdef MPI
265           time1=MPI_WTIME()
266 #else
267           time1=tcpu()
268 #endif
269           call minim_dc(etot,iretcode,nfun)
270         else
271           if (indpdb.ne.0) then 
272             call bond_regular
273             call chainbuild
274           endif
275           call geom_to_var(nvar,varia)
276           print *,'Calling MINIMIZE.'
277 #ifdef MPI
278           time1=MPI_WTIME()
279 #else
280           time1=tcpu()
281 #endif
282           call minimize(etot,varia,iretcode,nfun)
283         endif
284         print *,'SUMSL return code is',iretcode,' eval ',nfun
285 #ifdef MPI
286         evals=nfun/(MPI_WTIME()-time1)
287 #else
288         evals=nfun/(tcpu()-time1)
289 #endif
290         print *,'# eval/s',evals
291         print *,'refstr=',refstr
292         call hairpin(.false.,nharp,iharp)
293         print *,'after hairpin'
294         call secondary2(.true.)
295         print *,'after secondary'
296         call etotal(energy(0))
297         etot = energy(0)
298         call enerprint(energy(0))
299
300         call intout
301         call briefout(0,etot)
302         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
303           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
304           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
305           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
306       else
307         print *,'refstr=',refstr
308         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
309         call briefout(0,etot)
310       endif
311       if (outpdb) call pdbout(etot,titel(:32),ipdb)
312       if (outmol2) call mol2out(etot,titel(:32))
313       return
314       end
315 c---------------------------------------------------------------------------
316       subroutine exec_regularize
317       implicit real*8 (a-h,o-z)
318       include 'DIMENSIONS'
319 #ifdef MPI
320       include 'mpif.h'
321 #endif
322       include 'COMMON.SETUP'
323       include 'COMMON.TIME1'
324       include 'COMMON.INTERACT'
325       include 'COMMON.NAMES'
326       include 'COMMON.GEO'
327       include 'COMMON.HEADER'
328       include 'COMMON.CONTROL'
329       include 'COMMON.CONTACTS'
330       include 'COMMON.CHAIN'
331       include 'COMMON.VAR'
332       include 'COMMON.IOUNITS'
333       include 'COMMON.FFIELD'
334       include 'COMMON.REMD'
335       include 'COMMON.MD'
336       include 'COMMON.SBRIDGE'
337       double precision energy(0:n_ene)
338
339       call gen_dist_constr
340       call sc_conf
341       call intout
342       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
343       call etotal(energy(0))
344       energy(0)=energy(0)-energy(14)
345       etot=energy(0)
346       call enerprint(energy(0))
347       call intout
348       call briefout(0,etot)
349       if (outpdb) call pdbout(etot,titel(:32),ipdb)
350       if (outmol2) call mol2out(etot,titel(:32))
351       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
352       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
353       return
354       end
355 c---------------------------------------------------------------------------
356       subroutine exec_thread
357       include 'DIMENSIONS'
358 #ifdef MP
359       include "mpif.h"
360 #endif
361       include "COMMON.SETUP"
362       call thread_seq
363       return
364       end
365 c---------------------------------------------------------------------------
366       subroutine exec_MC
367       implicit real*8 (a-h,o-z)
368       include 'DIMENSIONS'
369       character*10 nodeinfo
370       double precision varia(maxvar)
371 #ifdef MPI
372       include "mpif.h"
373 #endif
374       include "COMMON.SETUP"
375       include 'COMMON.CONTROL'
376       call mcm_setup
377       if (minim) then
378 #ifdef MPI
379         if (modecalc.eq.3) then
380           call do_mcm(ipar)
381         else
382           call entmcm
383         endif
384 #else
385         if (modecalc.eq.3) then
386           call do_mcm(ipar)
387         else
388           call entmcm
389         endif
390 #endif
391       else
392         call monte_carlo
393       endif
394       return
395       end
396 c---------------------------------------------------------------------------
397       subroutine exec_mult_eeval_or_minim
398       implicit real*8 (a-h,o-z)
399       include 'DIMENSIONS'
400 #ifdef MPI
401       include 'mpif.h'
402       dimension muster(mpi_status_size)
403 #endif
404       include 'COMMON.SETUP'
405       include 'COMMON.TIME1'
406       include 'COMMON.INTERACT'
407       include 'COMMON.NAMES'
408       include 'COMMON.GEO'
409       include 'COMMON.HEADER'
410       include 'COMMON.CONTROL'
411       include 'COMMON.CONTACTS'
412       include 'COMMON.CHAIN'
413       include 'COMMON.VAR'
414       include 'COMMON.IOUNITS'
415       include 'COMMON.FFIELD'
416       include 'COMMON.REMD'
417       include 'COMMON.MD'
418       include 'COMMON.SBRIDGE'
419       double precision varia(maxvar)
420       dimension ind(6)
421       double precision energy(0:max_ene)
422       logical eof
423       eof=.false.
424 #ifdef MPI
425       if(me.ne.king) then
426         call minim_mcmf
427         return
428       endif
429
430       close (intin)
431       open(intin,file=intinname,status='old')
432       write (istat,'(a5,20a12)')"#    ",
433      &  (wname(print_order(i)),i=1,nprint_ene)
434       if (refstr) then
435         write (istat,'(a5,20a12)')"#    ",
436      &   (ename(print_order(i)),i=1,nprint_ene),
437      &   "ETOT total","RMSD","nat.contact","nnt.contact"        
438       else
439         write (istat,'(a5,20a12)')"#    ",
440      &    (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
441       endif
442
443       if (.not.minim) then
444         do while (.not. eof)
445           if (read_cart) then
446             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
447             call read_x(intin,*11)
448 #ifdef MPI
449 c Broadcast the order to compute internal coordinates to the slaves.
450             if (nfgtasks.gt.1)
451      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
452 #endif
453             call int_from_cart1(.false.)
454           else
455             read (intin,'(i5)',end=1100,err=1100) iconf
456             call read_angles(intin,*11)
457             call geom_to_var(nvar,varia)
458             call chainbuild
459           endif
460           write (iout,'(a,i7)') 'Conformation #',iconf
461           call etotal(energy(0))
462           call briefout(iconf,energy(0))
463           call enerprint(energy(0))
464           etot=energy(0)
465           if (refstr) then 
466             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
467             write (istat,'(i5,20(f12.3))') iconf,
468      &      (energy(print_order(i)),i=1,nprint_ene),etot,
469      &       rms,frac,frac_nn,co
470 cjlee end
471           else
472             write (istat,'(i5,16(f12.3))') iconf,
473      &     (energy(print_order(i)),i=1,nprint_ene),etot
474           endif
475         enddo
476 1100    continue
477         goto 1101
478       endif
479
480       mm=0
481       imm=0
482       nft=0
483       ene0=0.0d0
484       n=0
485       iconf=0
486 c      do n=1,nzsc
487       do while (.not. eof)
488         mm=mm+1
489         if (mm.lt.nodes) then
490           if (read_cart) then
491             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
492             call read_x(intin,*11)
493 #ifdef MPI
494 c Broadcast the order to compute internal coordinates to the slaves.
495             if (nfgtasks.gt.1) 
496      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
497 #endif
498             call int_from_cart1(.false.)
499           else
500             read (intin,'(i5)',end=11,err=11) iconf
501             call read_angles(intin,*11)
502             call geom_to_var(nvar,varia)
503             call chainbuild
504           endif
505           write (iout,'(a,i7)') 'Conformation #',iconf
506           n=n+1
507          imm=imm+1
508          ind(1)=1
509          ind(2)=n
510          ind(3)=0
511          ind(4)=0
512          ind(5)=0
513          ind(6)=0
514          ene0=0.0d0
515          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,
516      *                  ierr)
517          call mpi_send(varia,nvar,mpi_double_precision,mm,
518      *                  idreal,CG_COMM,ierr)
519          call mpi_send(ene0,1,mpi_double_precision,mm,
520      *                  idreal,CG_COMM,ierr)
521 c         print *,'task ',n,' sent to worker ',mm,nvar
522         else
523          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,
524      *                 CG_COMM,muster,ierr)
525          man=muster(mpi_source)
526 c         print *,'receiving result from worker ',man,' (',iii1,iii,')'
527          call mpi_recv(varia,nvar,mpi_double_precision, 
528      *               man,idreal,CG_COMM,muster,ierr)
529          call mpi_recv(ene,1,
530      *               mpi_double_precision,man,idreal,
531      *               CG_COMM,muster,ierr)
532          call mpi_recv(ene0,1,
533      *               mpi_double_precision,man,idreal,
534      *               CG_COMM,muster,ierr)
535 c         print *,'result received from worker ',man,' sending now'
536
537           call var_to_geom(nvar,varia)
538           call chainbuild
539           call etotal(energy(0))
540           iconf=ind(2)
541           write (iout,*)
542           write (iout,*)
543           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
544
545           etot=energy(0)
546           call enerprint(energy(0))
547           call briefout(it,etot)
548 c          if (minim) call briefout(it,etot)
549           if (refstr) then 
550             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
551             write (istat,'(i5,19(f12.3))') iconf,
552      &     (energy(print_order(i)),i=1,nprint_ene),etot,
553      &     rms,frac,frac_nn,co
554           else
555             write (istat,'(i5,15(f12.3))') iconf,
556      &     (energy(print_order(i)),i=1,nprint_ene),etot
557           endif
558
559           imm=imm-1
560           if (read_cart) then
561             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
562             call read_x(intin,*11)
563 #ifdef MPI
564 c Broadcast the order to compute internal coordinates to the slaves.
565             if (nfgtasks.gt.1)
566      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
567 #endif
568             call int_from_cart1(.false.)
569           else
570             read (intin,'(i5)',end=1101,err=1101) iconf
571             call read_angles(intin,*11)
572             call geom_to_var(nvar,varia)
573             call chainbuild
574           endif
575           n=n+1
576           imm=imm+1
577           ind(1)=1
578           ind(2)=n
579           ind(3)=0
580           ind(4)=0
581           ind(5)=0
582           ind(6)=0
583           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,
584      *                  ierr)
585           call mpi_send(varia,nvar,mpi_double_precision,man, 
586      *                  idreal,CG_COMM,ierr)
587           call mpi_send(ene0,1,mpi_double_precision,man,
588      *                  idreal,CG_COMM,ierr)
589           nf_mcmf=nf_mcmf+ind(4)
590           nmin=nmin+1
591         endif
592       enddo
593 11    continue
594       do j=1,imm
595         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,
596      *               CG_COMM,muster,ierr)
597         man=muster(mpi_source)
598         call mpi_recv(varia,nvar,mpi_double_precision, 
599      *               man,idreal,CG_COMM,muster,ierr)
600         call mpi_recv(ene,1,
601      *               mpi_double_precision,man,idreal,
602      *               CG_COMM,muster,ierr)
603         call mpi_recv(ene0,1,
604      *               mpi_double_precision,man,idreal,
605      *               CG_COMM,muster,ierr)
606
607         call var_to_geom(nvar,varia)
608         call chainbuild
609         call etotal(energy(0))
610         iconf=ind(2)
611         write (iout,*)
612         write (iout,*)
613         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
614
615         etot=energy(0)
616         call enerprint(energy(0))
617         call briefout(it,etot)
618         if (refstr) then 
619           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
620           write (istat,'(i5,19(f12.3))') iconf,
621      &   (energy(print_order(i)),i=1,nprint_ene),etot,
622      &   rms,frac,frac_nn,co
623         else
624           write (istat,'(i5,15(f12.3))') iconf,
625      &    (energy(print_order(i)),i=1,nprint_ene),etot
626         endif
627         nmin=nmin+1
628       enddo
629 1101  continue
630       do i=1, nodes-1
631          ind(1)=0
632          ind(2)=0
633          ind(3)=0
634          ind(4)=0
635          ind(5)=0
636          ind(6)=0
637          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,
638      *                  ierr)
639       enddo
640 #else
641       close (intin)
642       open(intin,file=intinname,status='old')
643       write (istat,'(a5,20a12)')"#    ",
644      &   (wname(print_order(i)),i=1,nprint_ene)
645       write (istat,'("#    ",20(1pe12.4))')
646      &   (weights(print_order(i)),i=1,nprint_ene)
647       if (refstr) then
648         write (istat,'(a5,20a12)')"#    ",
649      &   (ename(print_order(i)),i=1,nprint_ene),
650      &   "ETOT total","RMSD","nat.contact","nnt.contact"
651       else
652         write (istat,'(a5,14a12)')"#    ",
653      &   (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
654       endif
655       do while (.not. eof)
656           if (read_cart) then
657             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
658             call read_x(intin,*11)
659 #ifdef MPI
660 c Broadcast the order to compute internal coordinates to the slaves.
661             if (nfgtasks.gt.1)
662      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
663 #endif
664             call int_from_cart1(.false.)
665           else
666             read (intin,'(i5)',end=11,err=11) iconf
667             call read_angles(intin,*11)
668             call geom_to_var(nvar,varia)
669             call chainbuild
670           endif
671         write (iout,'(a,i7)') 'Conformation #',iconf
672         if (minim) call minimize(etot,varia,iretcode,nfun)
673         call etotal(energy(0))
674
675         etot=energy(0)
676         call enerprint(energy(0))
677         if (minim) call briefout(it,etot) 
678         if (refstr) then 
679           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
680           write (istat,'(i5,18(f12.3))') iconf,
681      &   (energy(print_order(i)),i=1,nprint_ene),
682      &   etot,rms,frac,frac_nn,co
683 cjlee end
684         else
685           write (istat,'(i5,14(f12.3))') iconf,
686      &   (energy(print_order(i)),i=1,nprint_ene),etot
687         endif
688       enddo
689    11 continue
690 #endif
691       return
692       end
693 c---------------------------------------------------------------------------
694       subroutine exec_checkgrad
695       implicit real*8 (a-h,o-z)
696       include 'DIMENSIONS'
697 #ifdef MPI
698       include 'mpif.h'
699 #endif
700       include 'COMMON.SETUP'
701       include 'COMMON.TIME1'
702       include 'COMMON.INTERACT'
703       include 'COMMON.NAMES'
704       include 'COMMON.GEO'
705       include 'COMMON.HEADER'
706       include 'COMMON.CONTROL'
707       include 'COMMON.CONTACTS'
708       include 'COMMON.CHAIN'
709       include 'COMMON.VAR'
710       include 'COMMON.IOUNITS'
711       include 'COMMON.FFIELD'
712       include 'COMMON.REMD'
713       include 'COMMON.MD'
714       include 'COMMON.SBRIDGE'
715       common /srutu/ icall
716       double precision energy(0:max_ene)
717       print *,"A TU?"
718 c      do i=2,nres
719 c        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
720 c        if (itype(i).ne.10) 
721 c     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
722 c      enddo
723       if (indpdb.eq.0) call chainbuild
724 c      do i=0,nres
725 c        do j=1,3
726 c          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
727 c        enddo
728 c      enddo
729 c      do i=1,nres-1
730 c        if (itype(i).ne.10) then
731 c          do j=1,3
732 c            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
733 c          enddo
734 c        endif
735 c      enddo
736 c      do j=1,3
737 c        dc(j,0)=ran_number(-0.2d0,0.2d0)
738 c      enddo
739       usampl=.true.
740       totT=1.d0
741       eq_time=0.0d0
742       call read_fragments
743       print *, "AFTER read fragments"
744       call chainbuild_cart
745       print *,"chainbuild_cart"
746       call cartprint
747       print *,"After cartprint"
748       call intout
749       icall=1
750       print *,"before ETOT"
751       call etotal(energy(0))
752       etot = energy(0)
753       call enerprint(energy(0))
754       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
755       print *,'icheckgrad=',icheckgrad
756       goto (10,20,30) icheckgrad
757   10  call check_ecartint
758       call check_ecartint
759       return
760   20  call check_cartgrad
761       return
762   30  call check_eint
763       return
764       end
765 c---------------------------------------------------------------------------
766       subroutine exec_map
767 C Energy maps
768       call map_read
769       call map
770       return
771       end
772 c---------------------------------------------------------------------------
773       subroutine exec_CSA
774 #ifdef MPI
775       include "mpif.h"
776 #endif
777       include 'DIMENSIONS'
778       include 'COMMON.IOUNITS'
779 C Conformational Space Annealling programmed by Jooyoung Lee.
780 C This method works only with parallel machines!
781 #ifdef MPI
782       call together
783 #else
784       write (iout,*) "CSA works on parallel machines only"
785 #endif
786       return
787       end
788 c---------------------------------------------------------------------------
789       subroutine exec_softreg
790       include 'DIMENSIONS'
791       include 'COMMON.IOUNITS'
792       include 'COMMON.CONTROL'
793       double precision energy(0:max_ene)
794       call chainbuild
795       call etotal(energy(0))
796       call enerprint(energy(0))
797       if (.not.lsecondary) then
798         write(iout,*) 'Calling secondary structure recognition'
799         call secondary2(debug)
800       else
801         write(iout,*) 'Using secondary structure supplied in pdb'
802       endif
803
804       call softreg
805
806       call etotal(energy(0))
807       etot=energy(0)
808       call enerprint(energy(0))
809       call intout
810       call briefout(0,etot)
811       call secondary2(.true.)
812       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
813       return
814       end