changes in wham and unres
[unres4.git] / source / unres / unres.f90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80 !
81       if (me.eq.king .or. .not. out1file) then
82        write (iout,'(2a/)') &
83        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
84        ' calculation.' 
85        if (minim) write (iout,'(a)') &
86        'Conformations will be energy-minimized.'
87        write (iout,'(80(1h*)/)') 
88       endif
89       call flush(iout)
90 !
91       if (modecalc.eq.-2) then
92         call test
93         stop
94       else if (modecalc.eq.-1) then
95         write(iout,*) "call check_sc_map next"
96         call check_bond
97         stop
98       endif
99 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
100
101 #ifdef MPI
102       if (fg_rank.gt.0) then
103 ! Fine-grain slaves just do energy and gradient components.
104         call ergastulum ! slave workhouse in Latin
105       else
106 #endif
107       if (modecalc.eq.0) then
108         call exec_eeval_or_minim
109       else if (modecalc.eq.1) then
110         call exec_regularize
111       else if (modecalc.eq.2) then
112         call exec_thread
113       else if (modecalc.eq.3 .or. modecalc .eq.6) then
114         call exec_MC
115       else if (modecalc.eq.4) then
116         call exec_mult_eeval_or_minim
117       else if (modecalc.eq.5) then
118         call exec_checkgrad
119       else if (ModeCalc.eq.7) then
120         call exec_map
121       else if (ModeCalc.eq.8) then
122         call exec_CSA
123       else if (modecalc.eq.11) then
124         call exec_softreg
125       else if (modecalc.eq.12) then
126         call exec_MD
127       else if (modecalc.eq.14) then
128         call exec_MREMD
129       else
130         write (iout,'(a)') 'This calculation type is not supported',&
131          ModeCalc
132       endif
133
134 #ifdef MPI
135       endif
136 ! Finish task.
137       if (fg_rank.eq.0) call finish_task
138 !      call memmon_print_usage()
139 #ifdef TIMING
140        call print_detailed_timing
141 #endif
142       call MPI_Finalize(ierr)
143       stop 'Bye Bye...'
144 #else
145       call dajczas(tcpu(),hrtime,mintime,sectime)
146       stop '********** Program terminated normally.'
147 #endif
148
149       end program !UNRES
150 !-----------------------------------------------------------------------------
151 !
152 !-----------------------------------------------------------------------------
153       subroutine exec_MD
154       use MPI_data      !include 'COMMON.SETUP'
155       use control_data  !include 'COMMON.CONTROL'
156       use geometry, only:chainbuild
157       use MDyn
158       use io_units      !include 'COMMON.IOUNITS'
159 !      use io_common
160       implicit none
161 !      include 'DIMENSIONS'
162 #ifdef MPI
163       include "mpif.h"
164 #endif
165       print *,'Start MD'
166       call alloc_MD_arrays
167       print *,'After MD alloc'
168 !      if (me.eq.king .or. .not. out1file) &
169 !         write (iout,*) "Calling chainbuild"
170 !      call chainbuild
171       call MD
172       return
173       end subroutine exec_MD
174 !---------------------------------------------------------------------------
175       subroutine exec_MREMD
176       use MPI_data      !include 'COMMON.SETUP'
177       use control_data  !include 'COMMON.CONTROL'
178       use io_units      !include 'COMMON.IOUNITS'
179 !      use io_common
180       use REMD_data     !include 'COMMON.REMD'
181       use geometry, only:chainbuild
182       use MREMDyn
183
184       implicit none
185 !      include 'DIMENSIONS'
186 #ifdef MPI
187       include "mpif.h"
188 #endif
189
190       integer :: i
191       call alloc_MD_arrays
192       call alloc_MREMD_arrays
193
194 !     if (me.eq.king .or. .not. out1file) &
195 !         write (iout,*) "Calling chainbuild"
196 !      call chainbuild
197       if (me.eq.king .or. .not. out1file) &
198          write (iout,*) "Calling REMD"
199       if (remd_mlist) then 
200         call MREMD
201       else
202         do i=1,nrep
203           remd_m(i)=1
204         enddo
205         call MREMD
206       endif
207       return
208       end subroutine exec_MREMD
209 !-----------------------------------------------------------------------------
210       subroutine exec_eeval_or_minim
211       use MPI_data              !include 'COMMON.SETUP'
212       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
213       use io_units      !include 'COMMON.IOUNITS'
214       use names
215 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
216       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
217 !      use REMD         !include 'COMMON.REMD'
218 !      use MD           !include 'COMMON.MD'
219
220       use energy_data
221
222       use io_base
223       use geometry, only:chainbuild
224       use energy
225       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
226       use minimm, only:minimize,minim_dc,sc_move
227       use compare_data !el
228       use comm_srutu
229       implicit none
230 !      implicit real*8 (a-h,o-z)
231 !      include 'DIMENSIONS'
232 #ifdef MPI
233       include 'mpif.h'
234 #endif
235       integer :: i
236 !el      common /srutu/ icall
237       real(kind=8) :: energy_(0:n_ene)
238       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
239       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
240       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
241       integer :: nharp,nft_sc,iretcode,nfun
242       integer,dimension(4,nres/3) :: iharp      !(4,nres/3)(4,maxres/3)
243       logical :: fail
244       real(kind=8) :: rms,frac,frac_nn,co
245
246       integer :: j,k
247       call alloc_compare_arrays
248       if ((indpdb.eq.0).and.(.not.read_cart)) then 
249       call chainbuild
250       write(iout,*) 'Warning: Calling chainbuild'
251       endif
252 #ifdef MPI
253       time00=MPI_Wtime()
254 #endif
255       call chainbuild_cart
256 !      write(iout,*)"in exec_eeval or minimim",split_ene
257 !      do j=1,2*nres+2
258 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
259 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
260 !      enddo
261       if (split_ene) then
262 !      write(iout,*)"in exec_eeval or minimim"
263
264        print *,"Processor",myrank," after chainbuild"
265        icall=1
266
267        call etotal_long(energy_long)
268        write (iout,*) "Printing long range energy"
269        call enerprint(energy_long)
270
271        call etotal_short(energy_short)
272        write (iout,*) "Printing short range energy"
273        call enerprint(energy_short)
274        do i=0,n_ene
275          energy_(i)=energy_long(i)+energy_short(i)
276          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
277        enddo
278        write (iout,*) "Printing long+short range energy"
279        call enerprint(energy_)
280       endif
281
282       call etotal(energy_)
283 #ifdef MPI
284       time_ene=MPI_Wtime()-time00
285 #endif
286       write (iout,*) "Time for energy evaluation",time_ene
287       print *,"after etotal"
288       etota = energy_(0)
289       etot = etota
290       call enerprint(energy_)
291       call hairpin(.true.,nharp,iharp)
292       call secondary2(.true.)
293       if (minim) then
294 !rc overlap test
295         if (overlapsc) then 
296           print *, 'Calling OVERLAP_SC'
297           call overlap_sc(fail)
298         endif 
299
300         if (searchsc) then 
301           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
302           print *,'SC_move',nft_sc,etot
303           write(iout,*) 'SC_move',nft_sc,etot
304         endif 
305
306         if (dccart) then
307           print *, 'Calling MINIM_DC'
308 #ifdef MPI
309           time1=MPI_WTIME()
310 #endif
311 !    call check_ecartint !el
312           call minim_dc(etot,iretcode,nfun)
313 !    call check_ecartint !el
314         else 
315           if (indpdb.ne.0) then 
316             call bond_regular
317             write(iout,*) 'Warning: Calling chainbuild'
318             call chainbuild
319           endif
320           call geom_to_var(nvar,varia)
321           print *,'Calling MINIMIZE.'
322 #ifdef MPI
323           time1=MPI_WTIME()
324 #endif
325 !    call check_eint
326 !        call exec_checkgrad   !el
327           call minimize(etot,varia,iretcode,nfun)
328 !     call check_eint
329 !        call exec_checkgrad   !el
330         endif
331         print *,'SUMSL return code is',iretcode,' eval ',nfun
332 #ifdef MPI
333         evals=nfun/(MPI_WTIME()-time1)
334 #endif
335         print *,'# eval/s',evals
336         print *,'refstr=',refstr
337         call hairpin(.true.,nharp,iharp)
338         call secondary2(.true.)
339         call etotal(energy_)
340         etot = energy_(0)
341         call enerprint(energy_)
342
343         call intout
344         call briefout(0,etot)
345         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
346           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
347           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
348           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
349       else
350         print *,'refstr=',refstr,frac,frac_nn,co
351         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
352         print *,"after rms_nac_ncc"
353         call briefout(0,etot)
354       endif
355       if (outpdb) call pdbout(etot,titel(:32),ipdb)
356       if (outmol2) call mol2out(etot,titel(:32))
357       write(iout,*) "after exec_eeval_or_minim"
358       return
359       end subroutine exec_eeval_or_minim
360 !-----------------------------------------------------------------------------
361       subroutine exec_regularize
362 !      use MPI_data             !include 'COMMON.SETUP'
363       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
364       use io_units      !include 'COMMON.IOUNITS'
365       use names
366       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
367       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
368 !     use REMD          !include 'COMMON.REMD'
369 !     use MD            !include 'COMMON.MD'
370       use regularize_
371       use compare
372       implicit none
373 !      implicit real*8 (a-h,o-z)
374 !      include 'DIMENSIONS'
375 #ifdef MPI
376       include 'mpif.h'
377 #endif
378       real(kind=8) :: energy_(0:n_ene)
379       real(kind=8) :: etot
380       real(kind=8) :: rms,frac,frac_nn,co
381       integer :: iretcode
382
383       call alloc_compare_arrays
384       call gen_dist_constr
385       call sc_conf
386       call intout
387       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
388       call etotal(energy_)
389       energy_(0)=energy_(0)-energy_(14)
390       etot=energy_(0)
391       call enerprint(energy_)
392       call intout
393       call briefout(0,etot)
394       if (outpdb) call pdbout(etot,titel(:32),ipdb)
395       if (outmol2) call mol2out(etot,titel(:32))
396       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
397       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
398       return
399       end subroutine exec_regularize
400 !-----------------------------------------------------------------------------
401       subroutine exec_thread
402 !      use MPI_data             !include 'COMMON.SETUP'
403       use compare
404       implicit none
405 !      include 'DIMENSIONS'
406 #ifdef MP
407       include "mpif.h"
408 #endif
409       call alloc_compare_arrays
410       call thread_seq
411       return
412       end subroutine exec_thread
413 !-----------------------------------------------------------------------------
414       subroutine exec_MC
415 !      use MPI_data             !include 'COMMON.SETUP'
416       use control_data  !include 'COMMON.CONTROL'
417       use geometry_data
418       use energy_data
419       use mcm_md
420       implicit none
421 !      implicit real*8 (a-h,o-z)
422 !      include 'DIMENSIONS'
423       character(len=10) :: nodeinfo
424       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
425       integer :: ipar
426 #ifdef MPI
427       include "mpif.h"
428 #endif
429       call alloc_MCM_arrays
430       call mcm_setup
431       if (minim) then
432 #ifdef MPI
433         if (modecalc.eq.3) then
434           call do_mcm(ipar)
435         else
436           call entmcm
437         endif
438 #else
439         if (modecalc.eq.3) then
440           call do_mcm(ipar)
441         else
442           call entmcm
443         endif
444 #endif
445       else
446         call monte_carlo
447       endif
448       return
449       end subroutine exec_MC
450 !-----------------------------------------------------------------------------
451       subroutine exec_mult_eeval_or_minim
452       use MPI_data              !include 'COMMON.SETUP'
453       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
454       use io_units      !include 'COMMON.IOUNITS'
455       use names
456       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
457       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
458 !      use REMD         !include 'COMMON.REMD'
459 !      use MD           !include 'COMMON.MD'
460       use io_base
461       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
462       use energy, only:etotal,enerprint
463       use compare, only:rms_nac_nnc
464       use minimm, only:minimize!,minim_mcmf
465 !      implicit real*8 (a-h,o-z)
466 !      include 'DIMENSIONS'
467 #ifdef MPI
468       use minimm, only:minim_mcmf
469       implicit none
470       include 'mpif.h'
471       integer :: ierror,ierr
472       real(kind=8) :: man
473       real(kind=8),dimension(mpi_status_size) :: muster
474 #else
475       implicit none
476 #endif
477       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
478       integer,dimension(6) :: ind
479       real(kind=8) :: energy_(0:n_ene)
480       logical :: eof
481       real(kind=8) :: etot,ene0
482       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
483          nf_mcmf,j
484       real(kind=8) :: rms,frac,frac_nn,co,time,ene
485
486       eof=.false.
487 #ifdef MPI
488       if(me.ne.king) then
489         call minim_mcmf
490         return
491       endif
492
493       close (intin)
494       open(intin,file=intinname,status='old')
495       write (istat,'(a5,20a12)')"#    ",&
496         (wname(print_order(i)),i=1,nprint_ene)
497       if (refstr) then
498         write (istat,'(a5,20a12)')"#    ",&
499          (ename(print_order(i)),i=1,nprint_ene),&
500          "ETOT total","RMSD","nat.contact","nnt.contact"        
501       else
502         write (istat,'(a5,20a12)')"#    ",&
503           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
504       endif
505
506       if (.not.minim) then
507         do while (.not. eof)
508           if (read_cart) then
509             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
510             call read_x(intin,*11)
511 #ifdef MPI
512 ! Broadcast the order to compute internal coordinates to the slaves.
513             if (nfgtasks.gt.1) &
514               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
515 #endif
516             call int_from_cart1(.false.)
517           else
518             read (intin,'(i5)',end=1100,err=1100) iconf
519             call read_angles(intin,*11)
520             call geom_to_var(nvar,varia)
521             write(iout,*) 'Warning: Calling chainbuild1'
522             call chainbuild
523           endif
524           write (iout,'(a,i7)') 'Conformation #',iconf
525           call etotal(energy_)
526           call briefout(iconf,energy_(0))
527           call enerprint(energy_)
528           etot=energy_(0)
529           if (refstr) then 
530             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
531             write (istat,'(i5,20(f12.3))') iconf,&
532             (energy_(print_order(i)),i=1,nprint_ene),etot,&
533              rms,frac,frac_nn,co
534 !jlee end
535           else
536             write (istat,'(i5,16(f12.3))') iconf,&
537            (energy_(print_order(i)),i=1,nprint_ene),etot
538           endif
539         enddo
540 1100    continue
541         goto 1101
542       endif
543
544       mm=0
545       imm=0
546       nft=0
547       ene0=0.0d0
548       n=0
549       iconf=0
550 !      do n=1,nzsc
551       do while (.not. eof)
552         mm=mm+1
553         if (mm.lt.nodes) then
554           if (read_cart) then
555             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
556             call read_x(intin,*11)
557 #ifdef MPI
558 ! Broadcast the order to compute internal coordinates to the slaves.
559             if (nfgtasks.gt.1) &
560               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
561 #endif
562             call int_from_cart1(.false.)
563           else
564             read (intin,'(i5)',end=11,err=11) iconf
565             call read_angles(intin,*11)
566             call geom_to_var(nvar,varia)
567             write(iout,*) 'Warning: Calling chainbuild2'
568             call chainbuild
569           endif
570           write (iout,'(a,i7)') 'Conformation #',iconf
571           n=n+1
572          imm=imm+1
573          ind(1)=1
574          ind(2)=n
575          ind(3)=0
576          ind(4)=0
577          ind(5)=0
578          ind(6)=0
579          ene0=0.0d0
580          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
581                         ierr)
582          call mpi_send(varia,nvar,mpi_double_precision,mm,&
583                         idreal,CG_COMM,ierr)
584          call mpi_send(ene0,1,mpi_double_precision,mm,&
585                         idreal,CG_COMM,ierr)
586 !         print *,'task ',n,' sent to worker ',mm,nvar
587         else
588          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
589                        CG_COMM,muster,ierr)
590          man=muster(mpi_source)
591 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
592          call mpi_recv(varia,nvar,mpi_double_precision,&
593                      man,idreal,CG_COMM,muster,ierr)
594          call mpi_recv(ene,1,&
595                      mpi_double_precision,man,idreal,&
596                      CG_COMM,muster,ierr)
597          call mpi_recv(ene0,1,&
598                      mpi_double_precision,man,idreal,&
599                      CG_COMM,muster,ierr)
600 !         print *,'result received from worker ',man,' sending now'
601
602           call var_to_geom(nvar,varia)
603           write(iout,*) 'Warning: Calling chainbuild3'
604           call chainbuild
605           call etotal(energy_)
606           iconf=ind(2)
607           write (iout,*)
608           write (iout,*)
609           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
610
611           etot=energy_(0)
612           call enerprint(energy_)
613           call briefout(it,etot)
614 !          if (minim) call briefout(it,etot)
615           if (refstr) then 
616             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
617             write (istat,'(i5,19(f12.3))') iconf,&
618            (energy_(print_order(i)),i=1,nprint_ene),etot,&
619            rms,frac,frac_nn,co
620           else
621             write (istat,'(i5,15(f12.3))') iconf,&
622            (energy_(print_order(i)),i=1,nprint_ene),etot
623           endif
624
625           imm=imm-1
626           if (read_cart) then
627             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
628             call read_x(intin,*11)
629 #ifdef MPI
630 ! Broadcast the order to compute internal coordinates to the slaves.
631             if (nfgtasks.gt.1) &
632               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
633 #endif
634             call int_from_cart1(.false.)
635           else
636             read (intin,'(i5)',end=1101,err=1101) iconf
637             call read_angles(intin,*11)
638             call geom_to_var(nvar,varia)
639             write(iout,*) 'Warning: Calling chainbuild4'
640             call chainbuild
641           endif
642           n=n+1
643           imm=imm+1
644           ind(1)=1
645           ind(2)=n
646           ind(3)=0
647           ind(4)=0
648           ind(5)=0
649           ind(6)=0
650           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
651                         ierr)
652           call mpi_send(varia,nvar,mpi_double_precision,man,&
653                         idreal,CG_COMM,ierr)
654           call mpi_send(ene0,1,mpi_double_precision,man,&
655                         idreal,CG_COMM,ierr)
656           nf_mcmf=nf_mcmf+ind(4)
657           nmin=nmin+1
658         endif
659       enddo
660 11    continue
661       do j=1,imm
662         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
663                      CG_COMM,muster,ierr)
664         man=muster(mpi_source)
665         call mpi_recv(varia,nvar,mpi_double_precision,&
666                      man,idreal,CG_COMM,muster,ierr)
667         call mpi_recv(ene,1,&
668                      mpi_double_precision,man,idreal,&
669                      CG_COMM,muster,ierr)
670         call mpi_recv(ene0,1,&
671                      mpi_double_precision,man,idreal,&
672                      CG_COMM,muster,ierr)
673
674         call var_to_geom(nvar,varia)
675         write(iout,*) 'Warning: Calling chainbuild5'
676         call chainbuild
677         call etotal(energy_)
678         iconf=ind(2)
679         write (iout,*)
680         write (iout,*)
681         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
682
683         etot=energy_(0)
684         call enerprint(energy_)
685         call briefout(it,etot)
686         if (refstr) then 
687           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
688           write (istat,'(i5,19(f12.3))') iconf,&
689          (energy_(print_order(i)),i=1,nprint_ene),etot,&
690          rms,frac,frac_nn,co
691         else
692           write (istat,'(i5,15(f12.3))') iconf,&
693           (energy_(print_order(i)),i=1,nprint_ene),etot
694         endif
695         nmin=nmin+1
696       enddo
697 1101  continue
698       do i=1, nodes-1
699          ind(1)=0
700          ind(2)=0
701          ind(3)=0
702          ind(4)=0
703          ind(5)=0
704          ind(6)=0
705          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
706                         ierr)
707       enddo
708 #else
709       close (intin)
710       open(intin,file=intinname,status='old')
711       write (istat,'(a5,20a12)')"#    ",&
712          (wname(print_order(i)),i=1,nprint_ene)
713       write (istat,'("#    ",20(1pe12.4))') &
714          (weights(print_order(i)),i=1,nprint_ene)
715       if (refstr) then
716         write (istat,'(a5,20a12)')"#    ",&
717          (ename(print_order(i)),i=1,nprint_ene),&
718          "ETOT total","RMSD","nat.contact","nnt.contact"
719       else
720         write (istat,'(a5,14a12)')"#    ",&
721          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
722       endif
723       do while (.not. eof)
724           if (read_cart) then
725             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
726             call read_x(intin,*11)
727 #ifdef MPI
728 ! Broadcast the order to compute internal coordinates to the slaves.
729             if (nfgtasks.gt.1) &
730               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
731 #endif
732             call int_from_cart1(.false.)
733           else
734             read (intin,'(i5)',end=11,err=11) iconf
735             call read_angles(intin,*11)
736             call geom_to_var(nvar,varia)
737             write(iout,*) 'Warning: Calling chainbuild5'
738             call chainbuild
739           endif
740         write (iout,'(a,i7)') 'Conformation #',iconf
741         if (minim) call minimize(etot,varia,iretcode,nfun)
742         call etotal(energy_)
743
744         etot=energy_(0)
745         call enerprint(energy_)
746         if (minim) call briefout(it,etot) 
747         if (refstr) then 
748           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
749           write (istat,'(i5,18(f12.3))') iconf,&
750          (energy_(print_order(i)),i=1,nprint_ene),&
751          etot,rms,frac,frac_nn,co
752 !jlee end
753         else
754           write (istat,'(i5,14(f12.3))') iconf,&
755          (energy_(print_order(i)),i=1,nprint_ene),etot
756         endif
757       enddo
758    11 continue
759 #endif
760       return
761       end subroutine exec_mult_eeval_or_minim
762 !-----------------------------------------------------------------------------
763       subroutine exec_checkgrad
764 !      use MPI_data             !include 'COMMON.SETUP'
765       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
766       use io_units      !include 'COMMON.IOUNITS'
767 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
768       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
769 !      use REMD         !include 'COMMON.REMD'
770       use MD_data               !include 'COMMON.MD'
771       use io_base, only:intout
772       use io_config, only:read_fragments
773       use geometry
774       use energy
775       use comm_srutu
776       implicit none
777 !      implicit real*8 (a-h,o-z)
778 !      include 'DIMENSIONS'
779 #ifdef MPI
780       include 'mpif.h'
781 #endif
782 !el      integer :: icall
783 !el      common /srutu/ icall
784       real(kind=8) :: energy_(0:max_ene)
785       real(kind=8) :: etot
786       integer :: i
787 !      do i=2,nres
788 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
789 !        if (itype(i).ne.10) 
790 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
791 !      enddo
792       if (indpdb.eq.0) then 
793         write(iout,*) 'Warning: Calling chainbuild'
794         call chainbuild
795       endif
796 !      do i=0,nres
797 !        do j=1,3
798 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
799 !        enddo
800 !      enddo
801 !      do i=1,nres-1
802 !        if (itype(i).ne.10) then
803 !          do j=1,3
804 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
805 !          enddo
806 !        endif
807 !      enddo
808 !      do j=1,3
809 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
810 !      enddo
811       usampl=.true.
812       totT=1.d0
813       eq_time=0.0d0
814       call read_fragments
815       call chainbuild_cart
816       call cartprint
817       call intout
818       icall=1
819       call etotal(energy_(0))
820       etot = energy_(0)
821       call enerprint(energy_(0))
822       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
823       print *,'icheckgrad=',icheckgrad
824       goto (10,20,30) icheckgrad
825   10  call check_ecartint
826       return
827   20  call check_cartgrad
828       return
829   30  call check_eint
830       return
831       end subroutine exec_checkgrad
832 !-----------------------------------------------------------------------------
833       subroutine exec_map
834 !      use map_data
835       use map_
836       use io_config, only:map_read
837       implicit none
838 ! Energy maps
839       call alloc_map_arrays
840       call map_read
841       call map
842       return
843       end subroutine exec_map
844 !-----------------------------------------------------------------------------
845       subroutine exec_CSA
846
847       use io_units      !include 'COMMON.IOUNITS'
848       use CSA
849       implicit none
850 #ifdef MPI
851       include "mpif.h"
852 #endif
853 !      include 'DIMENSIONS'
854 ! Conformational Space Annealling programmed by Jooyoung Lee.
855 ! This method works only with parallel machines!
856 #ifdef MPI
857       call alloc_CSA_arrays
858       call together
859 #else
860       write (iout,*) "CSA works on parallel machines only"
861 #endif
862       return
863       end subroutine exec_CSA
864 !-----------------------------------------------------------------------------
865       subroutine exec_softreg
866       use io_units      !include 'COMMON.IOUNITS'
867       use control_data  !include 'COMMON.CONTROL'
868       use energy_data
869       use io_base, only:intout,briefout
870       use geometry, only:chainbuild
871       use energy
872       use compare
873       implicit none
874 !      include 'DIMENSIONS'
875       real(kind=8) :: energy_(0:n_ene)
876 !el local variables
877       real(kind=8) :: rms,frac,frac_nn,co,etot
878       logical :: debug
879
880       call alloc_compare_arrays
881       write(iout,*) 'Warning: Calling chainbuild'
882       call chainbuild
883       call etotal(energy_)
884       call enerprint(energy_)
885       if (.not.lsecondary) then
886         write(iout,*) 'Calling secondary structure recognition'
887         call secondary2(debug)
888       else
889         write(iout,*) 'Using secondary structure supplied in pdb'
890       endif
891
892       call softreg
893
894       call etotal(energy_)
895       etot=energy_(0)
896       call enerprint(energy_)
897       call intout
898       call briefout(0,etot)
899       call secondary2(.true.)
900       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
901       return
902       end subroutine exec_softreg
903 !-----------------------------------------------------------------------------
904 ! minimize_p.F
905 !-----------------------------------------------------------------------------
906 !el#ifdef MPI
907       subroutine ergastulum
908
909 !      implicit real*8 (a-h,o-z)
910 !      include 'DIMENSIONS'
911       use MD_data
912       use energy
913       use MDyn, only:setup_fricmat
914       use REMD, only:fricmat_mult,ginv_mult
915 #ifdef MPI
916       include "mpif.h"
917 #endif
918 !      include 'COMMON.SETUP'
919 !      include 'COMMON.DERIV'
920 !      include 'COMMON.VAR'
921 !      include 'COMMON.IOUNITS'
922 !      include 'COMMON.FFIELD'
923 !      include 'COMMON.INTERACT'
924 !      include 'COMMON.MD'
925 !      include 'COMMON.TIME1'
926       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
927       real(kind=8) :: edum(0:n_ene),time_order(0:10)
928 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
929 !el      common /przechowalnia/ Gcopy
930       integer :: icall = 0
931
932 !el  local variables
933       real(kind=8) :: time00
934       integer :: iorder,i,j,nres2,ierr,ierror
935       nres2=2*nres
936 #ifndef FIVEDIAG
937       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
938 ! common.MD
939       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
940 #endif
941 !      common /mdpmpi/
942       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
943       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
944       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
945       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
946 #ifndef FIVEDIAG
947       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
948 #endif
949 ! Workers wait for variables and NF, and NFL from the boss
950       iorder=0
951       do while (iorder.ge.0)
952 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
953 !     & ' receives order from Master'
954         time00=MPI_Wtime()
955         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
956         time_Bcast=time_Bcast+MPI_Wtime()-time00
957         if (icall.gt.4 .and. iorder.ge.0) &
958          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
959        icall=icall+1
960 !      write (*,*)
961 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
962         if (iorder.eq.0) then
963           call zerograd
964           call etotal(edum)
965 !          write (2,*) "After etotal"
966 !          write (2,*) "dimen",dimen," dimen3",dimen3
967 !          call flush(2)
968         else if (iorder.eq.2) then
969           call zerograd
970           call etotal_short(edum)
971 !          write (2,*) "After etotal_short"
972 !          write (2,*) "dimen",dimen," dimen3",dimen3
973 !          call flush(2)
974         else if (iorder.eq.3) then
975           call zerograd
976           call etotal_long(edum)
977 !          write (2,*) "After etotal_long"
978 !          write (2,*) "dimen",dimen," dimen3",dimen3
979 !          call flush(2)
980         else if (iorder.eq.1) then
981           call sum_gradient
982 !          write (2,*) "After sum_gradient"
983 !          write (2,*) "dimen",dimen," dimen3",dimen3
984 !          call flush(2)
985         else if (iorder.eq.4) then
986           call ginv_mult(z,d_a_tmp)
987         else if (iorder.eq.5) then
988 ! Setup MD things for a slave
989           dimen=(nct-nnt+1)+nside
990           dimen1=(nct-nnt)+(nct-nnt+1)
991           dimen3=dimen*3
992 !          write (2,*) "dimen",dimen," dimen3",dimen3
993 !          call flush(2)
994           call int_bounds(dimen,igmult_start,igmult_end)
995           igmult_start=igmult_start-1
996           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
997            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
998            my_ng_count=igmult_end-igmult_start
999           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1000            MPI_INTEGER,FG_COMM,IERROR)
1001           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1002 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1003           myginv_ng_count=nres2*my_ng_count !el maxres2
1004 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1005 !     &     igmult_end," my_ng_count",my_ng_count
1006 !          call flush(2)
1007           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1008            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1009           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1010            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1011 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1012 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1013 !          call flush(2)
1014 !          call MPI_Barrier(FG_COMM,IERROR)
1015           time00=MPI_Wtime()
1016           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1017            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1018            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1019 #ifdef TIMING
1020           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1021 #endif
1022           do i=1,dimen
1023             do j=1,2*my_ng_count
1024               ginv(j,i)=gcopy(i,j)
1025             enddo
1026           enddo
1027 !          write (2,*) "dimen",dimen," dimen3",dimen3
1028 !          write (2,*) "End MD setup"
1029 !          call flush(2)
1030 !           write (iout,*) "My chunk of ginv_block"
1031 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1032         else if (iorder.eq.6) then
1033           call int_from_cart1(.false.)
1034         else if (iorder.eq.7) then
1035           call chainbuild_cart
1036         else if (iorder.eq.8) then
1037           call intcartderiv
1038         else if (iorder.eq.9) then
1039           call fricmat_mult(z,d_a_tmp)
1040         else if (iorder.eq.10) then
1041           call setup_fricmat
1042         endif
1043       enddo
1044       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1045         ' absolute rank',myrank,' leves ERGASTULUM.'
1046       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1047        (' order[',i,']',time_order(i),i=0,10)
1048       return
1049       end subroutine ergastulum