working torsional
[unres4.git] / source / unres / unres.f90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80 !
81       if (me.eq.king .or. .not. out1file) then
82        write (iout,'(2a/)') &
83        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
84        ' calculation.' 
85        if (minim) write (iout,'(a)') &
86        'Conformations will be energy-minimized.'
87        write (iout,'(80(1h*)/)') 
88       endif
89       call flush(iout)
90 !
91       if (modecalc.eq.-2) then
92         call test
93         stop
94       else if (modecalc.eq.-1) then
95         write(iout,*) "call check_sc_map next"
96         call check_bond
97         stop
98       endif
99 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
100
101 #ifdef MPI
102       if (fg_rank.gt.0) then
103 ! Fine-grain slaves just do energy and gradient components.
104         call ergastulum ! slave workhouse in Latin
105       else
106 #endif
107       if (modecalc.eq.0) then
108         call exec_eeval_or_minim
109       else if (modecalc.eq.1) then
110         call exec_regularize
111       else if (modecalc.eq.2) then
112         call exec_thread
113       else if (modecalc.eq.3 .or. modecalc .eq.6) then
114         call exec_MC
115       else if (modecalc.eq.4) then
116         call exec_mult_eeval_or_minim
117       else if (modecalc.eq.5) then
118         call exec_checkgrad
119       else if (ModeCalc.eq.7) then
120         call exec_map
121       else if (ModeCalc.eq.8) then
122         call exec_CSA
123       else if (modecalc.eq.11) then
124         call exec_softreg
125       else if (modecalc.eq.12) then
126         call exec_MD
127       else if (modecalc.eq.14) then
128         call exec_MREMD
129       else
130         write (iout,'(a)') 'This calculation type is not supported',&
131          ModeCalc
132       endif
133
134 #ifdef MPI
135       endif
136 ! Finish task.
137       if (fg_rank.eq.0) call finish_task
138 !      call memmon_print_usage()
139 #ifdef TIMING
140        call print_detailed_timing
141 #endif
142       call MPI_Finalize(ierr)
143       stop 'Bye Bye...'
144 #else
145       call dajczas(tcpu(),hrtime,mintime,sectime)
146       stop '********** Program terminated normally.'
147 #endif
148
149       end program !UNRES
150 !-----------------------------------------------------------------------------
151 !
152 !-----------------------------------------------------------------------------
153       subroutine exec_MD
154       use MPI_data      !include 'COMMON.SETUP'
155       use control_data  !include 'COMMON.CONTROL'
156       use geometry, only:chainbuild
157       use MDyn
158       use io_units      !include 'COMMON.IOUNITS'
159 !      use io_common
160       implicit none
161 !      include 'DIMENSIONS'
162 #ifdef MPI
163       include "mpif.h"
164 #endif
165       print *,'Start MD'
166       call alloc_MD_arrays
167 !      if (me.eq.king .or. .not. out1file) &
168 !         write (iout,*) "Calling chainbuild"
169 !      call chainbuild
170       call MD
171       return
172       end subroutine exec_MD
173 !---------------------------------------------------------------------------
174       subroutine exec_MREMD
175       use MPI_data      !include 'COMMON.SETUP'
176       use control_data  !include 'COMMON.CONTROL'
177       use io_units      !include 'COMMON.IOUNITS'
178 !      use io_common
179       use REMD_data     !include 'COMMON.REMD'
180       use geometry, only:chainbuild
181       use MREMDyn
182
183       implicit none
184 !      include 'DIMENSIONS'
185 #ifdef MPI
186       include "mpif.h"
187 #endif
188
189       integer :: i
190       call alloc_MD_arrays
191       call alloc_MREMD_arrays
192
193 !     if (me.eq.king .or. .not. out1file) &
194 !         write (iout,*) "Calling chainbuild"
195 !      call chainbuild
196       if (me.eq.king .or. .not. out1file) &
197          write (iout,*) "Calling REMD"
198       if (remd_mlist) then 
199         call MREMD
200       else
201         do i=1,nrep
202           remd_m(i)=1
203         enddo
204         call MREMD
205       endif
206       return
207       end subroutine exec_MREMD
208 !-----------------------------------------------------------------------------
209       subroutine exec_eeval_or_minim
210       use MPI_data              !include 'COMMON.SETUP'
211       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
212       use io_units      !include 'COMMON.IOUNITS'
213       use names
214 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
215       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
216 !      use REMD         !include 'COMMON.REMD'
217 !      use MD           !include 'COMMON.MD'
218
219       use energy_data
220
221       use io_base
222       use geometry, only:chainbuild
223       use energy
224       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
225       use minimm, only:minimize,minim_dc,sc_move
226       use compare_data !el
227       use comm_srutu
228       implicit none
229 !      implicit real*8 (a-h,o-z)
230 !      include 'DIMENSIONS'
231 #ifdef MPI
232       include 'mpif.h'
233 #endif
234       integer :: i
235 !el      common /srutu/ icall
236       real(kind=8) :: energy_(0:n_ene)
237       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
238       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
239       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
240       integer :: nharp,nft_sc,iretcode,nfun
241       integer,dimension(4,nres/3) :: iharp      !(4,nres/3)(4,maxres/3)
242       logical :: fail
243       real(kind=8) :: rms,frac,frac_nn,co
244
245       integer :: j,k
246       call alloc_compare_arrays
247       if (indpdb.eq.0) then 
248       call chainbuild
249       write(iout,*) 'Warning: Calling chainbuild'
250       endif
251 #ifdef MPI
252       time00=MPI_Wtime()
253 #endif
254       call chainbuild_cart
255 !      write(iout,*)"in exec_eeval or minimim",split_ene
256 !      do j=1,2*nres+2
257 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
258 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
259 !      enddo
260       if (split_ene) then
261 !      write(iout,*)"in exec_eeval or minimim"
262
263        print *,"Processor",myrank," after chainbuild"
264        icall=1
265
266        call etotal_long(energy_long)
267        write (iout,*) "Printing long range energy"
268        call enerprint(energy_long)
269
270        call etotal_short(energy_short)
271        write (iout,*) "Printing short range energy"
272        call enerprint(energy_short)
273        do i=0,n_ene
274          energy_(i)=energy_long(i)+energy_short(i)
275          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
276        enddo
277        write (iout,*) "Printing long+short range energy"
278        call enerprint(energy_)
279       endif
280
281       call etotal(energy_)
282 #ifdef MPI
283       time_ene=MPI_Wtime()-time00
284 #endif
285       write (iout,*) "Time for energy evaluation",time_ene
286       print *,"after etotal"
287       etota = energy_(0)
288       etot = etota
289       call enerprint(energy_)
290       call hairpin(.true.,nharp,iharp)
291       call secondary2(.true.)
292       if (minim) then
293 !rc overlap test
294         if (overlapsc) then 
295           print *, 'Calling OVERLAP_SC'
296           call overlap_sc(fail)
297         endif 
298
299         if (searchsc) then 
300           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
301           print *,'SC_move',nft_sc,etot
302           write(iout,*) 'SC_move',nft_sc,etot
303         endif 
304
305         if (dccart) then
306           print *, 'Calling MINIM_DC'
307 #ifdef MPI
308           time1=MPI_WTIME()
309 #endif
310 !    call check_ecartint !el
311           call minim_dc(etot,iretcode,nfun)
312 !    call check_ecartint !el
313         else 
314           if (indpdb.ne.0) then 
315             call bond_regular
316             write(iout,*) 'Warning: Calling chainbuild'
317             call chainbuild
318           endif
319           call geom_to_var(nvar,varia)
320           print *,'Calling MINIMIZE.'
321 #ifdef MPI
322           time1=MPI_WTIME()
323 #endif
324 !    call check_eint
325 !        call exec_checkgrad   !el
326           call minimize(etot,varia,iretcode,nfun)
327 !     call check_eint
328 !        call exec_checkgrad   !el
329         endif
330         print *,'SUMSL return code is',iretcode,' eval ',nfun
331 #ifdef MPI
332         evals=nfun/(MPI_WTIME()-time1)
333 #endif
334         print *,'# eval/s',evals
335         print *,'refstr=',refstr
336         call hairpin(.true.,nharp,iharp)
337         call secondary2(.true.)
338         call etotal(energy_)
339         etot = energy_(0)
340         call enerprint(energy_)
341
342         call intout
343         call briefout(0,etot)
344         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
345           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
346           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
347           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
348       else
349         print *,'refstr=',refstr
350         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
351         print *,"after rms_nac_ncc"
352         call briefout(0,etot)
353       endif
354       if (outpdb) call pdbout(etot,titel(:32),ipdb)
355       if (outmol2) call mol2out(etot,titel(:32))
356       write(iout,*) "after exec_eeval_or_minim"
357       return
358       end subroutine exec_eeval_or_minim
359 !-----------------------------------------------------------------------------
360       subroutine exec_regularize
361 !      use MPI_data             !include 'COMMON.SETUP'
362       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
363       use io_units      !include 'COMMON.IOUNITS'
364       use names
365       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
366       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
367 !     use REMD          !include 'COMMON.REMD'
368 !     use MD            !include 'COMMON.MD'
369       use regularize_
370       use compare
371       implicit none
372 !      implicit real*8 (a-h,o-z)
373 !      include 'DIMENSIONS'
374 #ifdef MPI
375       include 'mpif.h'
376 #endif
377       real(kind=8) :: energy_(0:n_ene)
378       real(kind=8) :: etot
379       real(kind=8) :: rms,frac,frac_nn,co
380       integer :: iretcode
381
382       call alloc_compare_arrays
383       call gen_dist_constr
384       call sc_conf
385       call intout
386       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
387       call etotal(energy_)
388       energy_(0)=energy_(0)-energy_(14)
389       etot=energy_(0)
390       call enerprint(energy_)
391       call intout
392       call briefout(0,etot)
393       if (outpdb) call pdbout(etot,titel(:32),ipdb)
394       if (outmol2) call mol2out(etot,titel(:32))
395       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
396       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
397       return
398       end subroutine exec_regularize
399 !-----------------------------------------------------------------------------
400       subroutine exec_thread
401 !      use MPI_data             !include 'COMMON.SETUP'
402       use compare
403       implicit none
404 !      include 'DIMENSIONS'
405 #ifdef MP
406       include "mpif.h"
407 #endif
408       call alloc_compare_arrays
409       call thread_seq
410       return
411       end subroutine exec_thread
412 !-----------------------------------------------------------------------------
413       subroutine exec_MC
414 !      use MPI_data             !include 'COMMON.SETUP'
415       use control_data  !include 'COMMON.CONTROL'
416       use geometry_data
417       use energy_data
418       use mcm_md
419       implicit none
420 !      implicit real*8 (a-h,o-z)
421 !      include 'DIMENSIONS'
422       character(len=10) :: nodeinfo
423       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
424       integer :: ipar
425 #ifdef MPI
426       include "mpif.h"
427 #endif
428       call alloc_MCM_arrays
429       call mcm_setup
430       if (minim) then
431 #ifdef MPI
432         if (modecalc.eq.3) then
433           call do_mcm(ipar)
434         else
435           call entmcm
436         endif
437 #else
438         if (modecalc.eq.3) then
439           call do_mcm(ipar)
440         else
441           call entmcm
442         endif
443 #endif
444       else
445         call monte_carlo
446       endif
447       return
448       end subroutine exec_MC
449 !-----------------------------------------------------------------------------
450       subroutine exec_mult_eeval_or_minim
451       use MPI_data              !include 'COMMON.SETUP'
452       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
453       use io_units      !include 'COMMON.IOUNITS'
454       use names
455       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
456       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
457 !      use REMD         !include 'COMMON.REMD'
458 !      use MD           !include 'COMMON.MD'
459       use io_base
460       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
461       use energy, only:etotal,enerprint
462       use compare, only:rms_nac_nnc
463       use minimm, only:minimize!,minim_mcmf
464 !      implicit real*8 (a-h,o-z)
465 !      include 'DIMENSIONS'
466 #ifdef MPI
467       use minimm, only:minim_mcmf
468       implicit none
469       include 'mpif.h'
470       integer :: ierror,ierr
471       real(kind=8) :: man
472       real(kind=8),dimension(mpi_status_size) :: muster
473 #else
474       implicit none
475 #endif
476       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
477       integer,dimension(6) :: ind
478       real(kind=8) :: energy_(0:n_ene)
479       logical :: eof
480       real(kind=8) :: etot,ene0
481       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
482          nf_mcmf,j
483       real(kind=8) :: rms,frac,frac_nn,co,time,ene
484
485       eof=.false.
486 #ifdef MPI
487       if(me.ne.king) then
488         call minim_mcmf
489         return
490       endif
491
492       close (intin)
493       open(intin,file=intinname,status='old')
494       write (istat,'(a5,20a12)')"#    ",&
495         (wname(print_order(i)),i=1,nprint_ene)
496       if (refstr) then
497         write (istat,'(a5,20a12)')"#    ",&
498          (ename(print_order(i)),i=1,nprint_ene),&
499          "ETOT total","RMSD","nat.contact","nnt.contact"        
500       else
501         write (istat,'(a5,20a12)')"#    ",&
502           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
503       endif
504
505       if (.not.minim) then
506         do while (.not. eof)
507           if (read_cart) then
508             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
509             call read_x(intin,*11)
510 #ifdef MPI
511 ! Broadcast the order to compute internal coordinates to the slaves.
512             if (nfgtasks.gt.1) &
513               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
514 #endif
515             call int_from_cart1(.false.)
516           else
517             read (intin,'(i5)',end=1100,err=1100) iconf
518             call read_angles(intin,*11)
519             call geom_to_var(nvar,varia)
520             write(iout,*) 'Warning: Calling chainbuild'
521             call chainbuild
522           endif
523           write (iout,'(a,i7)') 'Conformation #',iconf
524           call etotal(energy_)
525           call briefout(iconf,energy_(0))
526           call enerprint(energy_)
527           etot=energy_(0)
528           if (refstr) then 
529             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
530             write (istat,'(i5,20(f12.3))') iconf,&
531             (energy_(print_order(i)),i=1,nprint_ene),etot,&
532              rms,frac,frac_nn,co
533 !jlee end
534           else
535             write (istat,'(i5,16(f12.3))') iconf,&
536            (energy_(print_order(i)),i=1,nprint_ene),etot
537           endif
538         enddo
539 1100    continue
540         goto 1101
541       endif
542
543       mm=0
544       imm=0
545       nft=0
546       ene0=0.0d0
547       n=0
548       iconf=0
549 !      do n=1,nzsc
550       do while (.not. eof)
551         mm=mm+1
552         if (mm.lt.nodes) then
553           if (read_cart) then
554             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
555             call read_x(intin,*11)
556 #ifdef MPI
557 ! Broadcast the order to compute internal coordinates to the slaves.
558             if (nfgtasks.gt.1) &
559               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
560 #endif
561             call int_from_cart1(.false.)
562           else
563             read (intin,'(i5)',end=11,err=11) iconf
564             call read_angles(intin,*11)
565             call geom_to_var(nvar,varia)
566             write(iout,*) 'Warning: Calling chainbuild'
567             call chainbuild
568           endif
569           write (iout,'(a,i7)') 'Conformation #',iconf
570           n=n+1
571          imm=imm+1
572          ind(1)=1
573          ind(2)=n
574          ind(3)=0
575          ind(4)=0
576          ind(5)=0
577          ind(6)=0
578          ene0=0.0d0
579          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
580                         ierr)
581          call mpi_send(varia,nvar,mpi_double_precision,mm,&
582                         idreal,CG_COMM,ierr)
583          call mpi_send(ene0,1,mpi_double_precision,mm,&
584                         idreal,CG_COMM,ierr)
585 !         print *,'task ',n,' sent to worker ',mm,nvar
586         else
587          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
588                        CG_COMM,muster,ierr)
589          man=muster(mpi_source)
590 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
591          call mpi_recv(varia,nvar,mpi_double_precision,&
592                      man,idreal,CG_COMM,muster,ierr)
593          call mpi_recv(ene,1,&
594                      mpi_double_precision,man,idreal,&
595                      CG_COMM,muster,ierr)
596          call mpi_recv(ene0,1,&
597                      mpi_double_precision,man,idreal,&
598                      CG_COMM,muster,ierr)
599 !         print *,'result received from worker ',man,' sending now'
600
601           call var_to_geom(nvar,varia)
602           write(iout,*) 'Warning: Calling chainbuild'
603           call chainbuild
604           call etotal(energy_)
605           iconf=ind(2)
606           write (iout,*)
607           write (iout,*)
608           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
609
610           etot=energy_(0)
611           call enerprint(energy_)
612           call briefout(it,etot)
613 !          if (minim) call briefout(it,etot)
614           if (refstr) then 
615             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
616             write (istat,'(i5,19(f12.3))') iconf,&
617            (energy_(print_order(i)),i=1,nprint_ene),etot,&
618            rms,frac,frac_nn,co
619           else
620             write (istat,'(i5,15(f12.3))') iconf,&
621            (energy_(print_order(i)),i=1,nprint_ene),etot
622           endif
623
624           imm=imm-1
625           if (read_cart) then
626             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
627             call read_x(intin,*11)
628 #ifdef MPI
629 ! Broadcast the order to compute internal coordinates to the slaves.
630             if (nfgtasks.gt.1) &
631               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
632 #endif
633             call int_from_cart1(.false.)
634           else
635             read (intin,'(i5)',end=1101,err=1101) iconf
636             call read_angles(intin,*11)
637             call geom_to_var(nvar,varia)
638             write(iout,*) 'Warning: Calling chainbuild'
639             call chainbuild
640           endif
641           n=n+1
642           imm=imm+1
643           ind(1)=1
644           ind(2)=n
645           ind(3)=0
646           ind(4)=0
647           ind(5)=0
648           ind(6)=0
649           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
650                         ierr)
651           call mpi_send(varia,nvar,mpi_double_precision,man,&
652                         idreal,CG_COMM,ierr)
653           call mpi_send(ene0,1,mpi_double_precision,man,&
654                         idreal,CG_COMM,ierr)
655           nf_mcmf=nf_mcmf+ind(4)
656           nmin=nmin+1
657         endif
658       enddo
659 11    continue
660       do j=1,imm
661         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
662                      CG_COMM,muster,ierr)
663         man=muster(mpi_source)
664         call mpi_recv(varia,nvar,mpi_double_precision,&
665                      man,idreal,CG_COMM,muster,ierr)
666         call mpi_recv(ene,1,&
667                      mpi_double_precision,man,idreal,&
668                      CG_COMM,muster,ierr)
669         call mpi_recv(ene0,1,&
670                      mpi_double_precision,man,idreal,&
671                      CG_COMM,muster,ierr)
672
673         call var_to_geom(nvar,varia)
674         write(iout,*) 'Warning: Calling chainbuild'
675         call chainbuild
676         call etotal(energy_)
677         iconf=ind(2)
678         write (iout,*)
679         write (iout,*)
680         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
681
682         etot=energy_(0)
683         call enerprint(energy_)
684         call briefout(it,etot)
685         if (refstr) then 
686           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
687           write (istat,'(i5,19(f12.3))') iconf,&
688          (energy_(print_order(i)),i=1,nprint_ene),etot,&
689          rms,frac,frac_nn,co
690         else
691           write (istat,'(i5,15(f12.3))') iconf,&
692           (energy_(print_order(i)),i=1,nprint_ene),etot
693         endif
694         nmin=nmin+1
695       enddo
696 1101  continue
697       do i=1, nodes-1
698          ind(1)=0
699          ind(2)=0
700          ind(3)=0
701          ind(4)=0
702          ind(5)=0
703          ind(6)=0
704          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
705                         ierr)
706       enddo
707 #else
708       close (intin)
709       open(intin,file=intinname,status='old')
710       write (istat,'(a5,20a12)')"#    ",&
711          (wname(print_order(i)),i=1,nprint_ene)
712       write (istat,'("#    ",20(1pe12.4))') &
713          (weights(print_order(i)),i=1,nprint_ene)
714       if (refstr) then
715         write (istat,'(a5,20a12)')"#    ",&
716          (ename(print_order(i)),i=1,nprint_ene),&
717          "ETOT total","RMSD","nat.contact","nnt.contact"
718       else
719         write (istat,'(a5,14a12)')"#    ",&
720          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
721       endif
722       do while (.not. eof)
723           if (read_cart) then
724             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
725             call read_x(intin,*11)
726 #ifdef MPI
727 ! Broadcast the order to compute internal coordinates to the slaves.
728             if (nfgtasks.gt.1) &
729               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
730 #endif
731             call int_from_cart1(.false.)
732           else
733             read (intin,'(i5)',end=11,err=11) iconf
734             call read_angles(intin,*11)
735             call geom_to_var(nvar,varia)
736             write(iout,*) 'Warning: Calling chainbuild'
737             call chainbuild
738           endif
739         write (iout,'(a,i7)') 'Conformation #',iconf
740         if (minim) call minimize(etot,varia,iretcode,nfun)
741         call etotal(energy_)
742
743         etot=energy_(0)
744         call enerprint(energy_)
745         if (minim) call briefout(it,etot) 
746         if (refstr) then 
747           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
748           write (istat,'(i5,18(f12.3))') iconf,&
749          (energy_(print_order(i)),i=1,nprint_ene),&
750          etot,rms,frac,frac_nn,co
751 !jlee end
752         else
753           write (istat,'(i5,14(f12.3))') iconf,&
754          (energy_(print_order(i)),i=1,nprint_ene),etot
755         endif
756       enddo
757    11 continue
758 #endif
759       return
760       end subroutine exec_mult_eeval_or_minim
761 !-----------------------------------------------------------------------------
762       subroutine exec_checkgrad
763 !      use MPI_data             !include 'COMMON.SETUP'
764       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
765       use io_units      !include 'COMMON.IOUNITS'
766 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
767       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
768 !      use REMD         !include 'COMMON.REMD'
769       use MD_data               !include 'COMMON.MD'
770       use io_base, only:intout
771       use io_config, only:read_fragments
772       use geometry
773       use energy
774       use comm_srutu
775       implicit none
776 !      implicit real*8 (a-h,o-z)
777 !      include 'DIMENSIONS'
778 #ifdef MPI
779       include 'mpif.h'
780 #endif
781 !el      integer :: icall
782 !el      common /srutu/ icall
783       real(kind=8) :: energy_(0:max_ene)
784       real(kind=8) :: etot
785       integer :: i
786 !      do i=2,nres
787 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
788 !        if (itype(i).ne.10) 
789 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
790 !      enddo
791       if (indpdb.eq.0) then 
792         write(iout,*) 'Warning: Calling chainbuild'
793         call chainbuild
794       endif
795 !      do i=0,nres
796 !        do j=1,3
797 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
798 !        enddo
799 !      enddo
800 !      do i=1,nres-1
801 !        if (itype(i).ne.10) then
802 !          do j=1,3
803 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
804 !          enddo
805 !        endif
806 !      enddo
807 !      do j=1,3
808 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
809 !      enddo
810       usampl=.true.
811       totT=1.d0
812       eq_time=0.0d0
813       call read_fragments
814       call chainbuild_cart
815       call cartprint
816       call intout
817       icall=1
818       call etotal(energy_(0))
819       etot = energy_(0)
820       call enerprint(energy_(0))
821       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
822       print *,'icheckgrad=',icheckgrad
823       goto (10,20,30) icheckgrad
824   10  call check_ecartint
825       return
826   20  call check_cartgrad
827       return
828   30  call check_eint
829       return
830       end subroutine exec_checkgrad
831 !-----------------------------------------------------------------------------
832       subroutine exec_map
833 !      use map_data
834       use map_
835       use io_config, only:map_read
836       implicit none
837 ! Energy maps
838       call alloc_map_arrays
839       call map_read
840       call map
841       return
842       end subroutine exec_map
843 !-----------------------------------------------------------------------------
844       subroutine exec_CSA
845
846       use io_units      !include 'COMMON.IOUNITS'
847       use CSA
848       implicit none
849 #ifdef MPI
850       include "mpif.h"
851 #endif
852 !      include 'DIMENSIONS'
853 ! Conformational Space Annealling programmed by Jooyoung Lee.
854 ! This method works only with parallel machines!
855 #ifdef MPI
856       call alloc_CSA_arrays
857       call together
858 #else
859       write (iout,*) "CSA works on parallel machines only"
860 #endif
861       return
862       end subroutine exec_CSA
863 !-----------------------------------------------------------------------------
864       subroutine exec_softreg
865       use io_units      !include 'COMMON.IOUNITS'
866       use control_data  !include 'COMMON.CONTROL'
867       use energy_data
868       use io_base, only:intout,briefout
869       use geometry, only:chainbuild
870       use energy
871       use compare
872       implicit none
873 !      include 'DIMENSIONS'
874       real(kind=8) :: energy_(0:n_ene)
875 !el local variables
876       real(kind=8) :: rms,frac,frac_nn,co,etot
877       logical :: debug
878
879       call alloc_compare_arrays
880       write(iout,*) 'Warning: Calling chainbuild'
881       call chainbuild
882       call etotal(energy_)
883       call enerprint(energy_)
884       if (.not.lsecondary) then
885         write(iout,*) 'Calling secondary structure recognition'
886         call secondary2(debug)
887       else
888         write(iout,*) 'Using secondary structure supplied in pdb'
889       endif
890
891       call softreg
892
893       call etotal(energy_)
894       etot=energy_(0)
895       call enerprint(energy_)
896       call intout
897       call briefout(0,etot)
898       call secondary2(.true.)
899       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
900       return
901       end subroutine exec_softreg
902 !-----------------------------------------------------------------------------
903 ! minimize_p.F
904 !-----------------------------------------------------------------------------
905 !el#ifdef MPI
906       subroutine ergastulum
907
908 !      implicit real*8 (a-h,o-z)
909 !      include 'DIMENSIONS'
910       use MD_data
911       use energy
912       use MDyn, only:setup_fricmat
913       use REMD, only:fricmat_mult,ginv_mult
914 #ifdef MPI
915       include "mpif.h"
916 #endif
917 !      include 'COMMON.SETUP'
918 !      include 'COMMON.DERIV'
919 !      include 'COMMON.VAR'
920 !      include 'COMMON.IOUNITS'
921 !      include 'COMMON.FFIELD'
922 !      include 'COMMON.INTERACT'
923 !      include 'COMMON.MD'
924 !      include 'COMMON.TIME1'
925       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
926       real(kind=8) :: edum(0:n_ene),time_order(0:10)
927 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
928 !el      common /przechowalnia/ Gcopy
929       integer :: icall = 0
930
931 !el  local variables
932       real(kind=8) :: time00
933       integer :: iorder,i,j,nres2,ierr,ierror
934       nres2=2*nres
935 #ifndef FIVEDIAG
936       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
937 ! common.MD
938       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
939 #endif
940 !      common /mdpmpi/
941       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
942       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
943       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
944       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
945 #ifndef FIVEDIAG
946       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
947 #endif
948 ! Workers wait for variables and NF, and NFL from the boss
949       iorder=0
950       do while (iorder.ge.0)
951 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
952 !     & ' receives order from Master'
953         time00=MPI_Wtime()
954         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
955         time_Bcast=time_Bcast+MPI_Wtime()-time00
956         if (icall.gt.4 .and. iorder.ge.0) &
957          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
958        icall=icall+1
959 !      write (*,*)
960 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
961         if (iorder.eq.0) then
962           call zerograd
963           call etotal(edum)
964 !          write (2,*) "After etotal"
965 !          write (2,*) "dimen",dimen," dimen3",dimen3
966 !          call flush(2)
967         else if (iorder.eq.2) then
968           call zerograd
969           call etotal_short(edum)
970 !          write (2,*) "After etotal_short"
971 !          write (2,*) "dimen",dimen," dimen3",dimen3
972 !          call flush(2)
973         else if (iorder.eq.3) then
974           call zerograd
975           call etotal_long(edum)
976 !          write (2,*) "After etotal_long"
977 !          write (2,*) "dimen",dimen," dimen3",dimen3
978 !          call flush(2)
979         else if (iorder.eq.1) then
980           call sum_gradient
981 !          write (2,*) "After sum_gradient"
982 !          write (2,*) "dimen",dimen," dimen3",dimen3
983 !          call flush(2)
984         else if (iorder.eq.4) then
985           call ginv_mult(z,d_a_tmp)
986         else if (iorder.eq.5) then
987 ! Setup MD things for a slave
988           dimen=(nct-nnt+1)+nside
989           dimen1=(nct-nnt)+(nct-nnt+1)
990           dimen3=dimen*3
991 !          write (2,*) "dimen",dimen," dimen3",dimen3
992 !          call flush(2)
993           call int_bounds(dimen,igmult_start,igmult_end)
994           igmult_start=igmult_start-1
995           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
996            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
997            my_ng_count=igmult_end-igmult_start
998           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
999            MPI_INTEGER,FG_COMM,IERROR)
1000           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1001 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1002           myginv_ng_count=nres2*my_ng_count !el maxres2
1003 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1004 !     &     igmult_end," my_ng_count",my_ng_count
1005 !          call flush(2)
1006           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1007            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1008           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1009            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1010 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1011 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1012 !          call flush(2)
1013 !          call MPI_Barrier(FG_COMM,IERROR)
1014           time00=MPI_Wtime()
1015           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1016            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1017            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1018 #ifdef TIMING
1019           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1020 #endif
1021           do i=1,dimen
1022             do j=1,2*my_ng_count
1023               ginv(j,i)=gcopy(i,j)
1024             enddo
1025           enddo
1026 !          write (2,*) "dimen",dimen," dimen3",dimen3
1027 !          write (2,*) "End MD setup"
1028 !          call flush(2)
1029 !           write (iout,*) "My chunk of ginv_block"
1030 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1031         else if (iorder.eq.6) then
1032           call int_from_cart1(.false.)
1033         else if (iorder.eq.7) then
1034           call chainbuild_cart
1035         else if (iorder.eq.8) then
1036           call intcartderiv
1037         else if (iorder.eq.9) then
1038           call fricmat_mult(z,d_a_tmp)
1039         else if (iorder.eq.10) then
1040           call setup_fricmat
1041         endif
1042       enddo
1043       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1044         ' absolute rank',myrank,' leves ERGASTULUM.'
1045       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1046        (' order[',i,']',time_order(i),i=0,10)
1047       return
1048       end subroutine ergastulum