619aa73a893e596ec8682bed11fd09c127acb755
[unres4.git] / unres.f90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80 !
81       if (me.eq.king .or. .not. out1file) then
82        write (iout,'(2a/)') &
83        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
84        ' calculation.' 
85        if (minim) write (iout,'(a)') &
86        'Conformations will be energy-minimized.'
87        write (iout,'(80(1h*)/)') 
88       endif
89       call flush(iout)
90 !
91       if (modecalc.eq.-2) then
92         call test
93         stop
94       else if (modecalc.eq.-1) then
95         write(iout,*) "call check_sc_map next"
96         call check_bond
97         stop
98       endif
99 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
100
101 #ifdef MPI
102       if (fg_rank.gt.0) then
103 ! Fine-grain slaves just do energy and gradient components.
104         call ergastulum ! slave workhouse in Latin
105       else
106 #endif
107       if (modecalc.eq.0) then
108         call exec_eeval_or_minim
109       else if (modecalc.eq.1) then
110         call exec_regularize
111       else if (modecalc.eq.2) then
112         call exec_thread
113       else if (modecalc.eq.3 .or. modecalc .eq.6) then
114         call exec_MC
115       else if (modecalc.eq.4) then
116         call exec_mult_eeval_or_minim
117       else if (modecalc.eq.5) then
118         call exec_checkgrad
119       else if (ModeCalc.eq.7) then
120         call exec_map
121       else if (ModeCalc.eq.8) then
122         call exec_CSA
123       else if (modecalc.eq.11) then
124         call exec_softreg
125       else if (modecalc.eq.12) then
126         call exec_MD
127       else if (modecalc.eq.14) then
128         call exec_MREMD
129       else
130         write (iout,'(a)') 'This calculation type is not supported',&
131          ModeCalc
132       endif
133
134 #ifdef MPI
135       endif
136 ! Finish task.
137       if (fg_rank.eq.0) call finish_task
138 !      call memmon_print_usage()
139 #ifdef TIMING
140        call print_detailed_timing
141 #endif
142       call MPI_Finalize(ierr)
143       stop 'Bye Bye...'
144 #else
145       call dajczas(tcpu(),hrtime,mintime,sectime)
146       stop '********** Program terminated normally.'
147 #endif
148
149       end program !UNRES
150 !-----------------------------------------------------------------------------
151 !
152 !-----------------------------------------------------------------------------
153       subroutine exec_MD
154       use MPI_data      !include 'COMMON.SETUP'
155       use control_data  !include 'COMMON.CONTROL'
156       use geometry, only:chainbuild
157       use MDyn
158       use io_units      !include 'COMMON.IOUNITS'
159 !      use io_common
160       implicit none
161 !      include 'DIMENSIONS'
162 #ifdef MPI
163       include "mpif.h"
164 #endif
165       print *,'Start MD'
166       call alloc_MD_arrays
167 !      if (me.eq.king .or. .not. out1file) &
168 !         write (iout,*) "Calling chainbuild"
169 !      call chainbuild
170       call MD
171       return
172       end subroutine exec_MD
173 !---------------------------------------------------------------------------
174       subroutine exec_MREMD
175       use MPI_data      !include 'COMMON.SETUP'
176       use control_data  !include 'COMMON.CONTROL'
177       use io_units      !include 'COMMON.IOUNITS'
178 !      use io_common
179       use REMD_data     !include 'COMMON.REMD'
180       use geometry, only:chainbuild
181       use MREMDyn
182
183       implicit none
184 !      include 'DIMENSIONS'
185 #ifdef MPI
186       include "mpif.h"
187 #endif
188
189       integer :: i
190       call alloc_MD_arrays
191       call alloc_MREMD_arrays
192
193 !     if (me.eq.king .or. .not. out1file) &
194 !         write (iout,*) "Calling chainbuild"
195 !      call chainbuild
196       if (me.eq.king .or. .not. out1file) &
197          write (iout,*) "Calling REMD"
198       if (remd_mlist) then 
199         call MREMD
200       else
201         do i=1,nrep
202           remd_m(i)=1
203         enddo
204         call MREMD
205       endif
206       return
207       end subroutine exec_MREMD
208 !-----------------------------------------------------------------------------
209       subroutine exec_eeval_or_minim
210       use MPI_data              !include 'COMMON.SETUP'
211       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
212       use io_units      !include 'COMMON.IOUNITS'
213       use names
214 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
215       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
216 !      use REMD         !include 'COMMON.REMD'
217 !      use MD           !include 'COMMON.MD'
218
219       use energy_data
220
221       use io_base
222       use geometry, only:chainbuild
223       use energy
224       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
225       use minimm, only:minimize,minim_dc,sc_move
226       use compare_data !el
227       use comm_srutu
228       implicit none
229 !      implicit real*8 (a-h,o-z)
230 !      include 'DIMENSIONS'
231 #ifdef MPI
232       include 'mpif.h'
233 #endif
234       integer :: i
235 !el      common /srutu/ icall
236       real(kind=8) :: energy_(0:n_ene)
237       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
238       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
239       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
240       integer :: nharp,nft_sc,iretcode,nfun
241       integer,dimension(4,nres/3) :: iharp      !(4,nres/3)(4,maxres/3)
242       logical :: fail
243       real(kind=8) :: rms,frac,frac_nn,co
244
245       integer :: j,k
246       call alloc_compare_arrays
247       if (indpdb.eq.0) then 
248       call chainbuild
249       write(iout,*) 'Warning: Calling chainbuild'
250       endif
251 #ifdef MPI
252       time00=MPI_Wtime()
253 #endif
254       call chainbuild_cart
255 !      write(iout,*)"in exec_eeval or minimim",split_ene
256 !      do j=1,2*nres+2
257 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
258 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
259 !      enddo
260       if (split_ene) then
261 !      write(iout,*)"in exec_eeval or minimim"
262
263        print *,"Processor",myrank," after chainbuild"
264        icall=1
265
266        call etotal_long(energy_long)
267        write (iout,*) "Printing long range energy"
268        call enerprint(energy_long)
269
270        call etotal_short(energy_short)
271        write (iout,*) "Printing short range energy"
272        call enerprint(energy_short)
273        do i=0,n_ene
274          energy_(i)=energy_long(i)+energy_short(i)
275          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
276        enddo
277        write (iout,*) "Printing long+short range energy"
278        call enerprint(energy_)
279       endif
280
281       call etotal(energy_)
282 #ifdef MPI
283       time_ene=MPI_Wtime()-time00
284 #endif
285       write (iout,*) "Time for energy evaluation",time_ene
286       print *,"after etotal"
287       etota = energy_(0)
288       etot = etota
289       call enerprint(energy_)
290       call hairpin(.true.,nharp,iharp)
291       call secondary2(.true.)
292       if (minim) then
293 !rc overlap test
294         if (overlapsc) then 
295           print *, 'Calling OVERLAP_SC'
296           call overlap_sc(fail)
297         endif 
298
299         if (searchsc) then 
300           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
301           print *,'SC_move',nft_sc,etot
302           write(iout,*) 'SC_move',nft_sc,etot
303         endif 
304
305         if (dccart) then
306           print *, 'Calling MINIM_DC'
307 #ifdef MPI
308           time1=MPI_WTIME()
309 #endif
310 !    call check_ecartint !el
311           call minim_dc(etot,iretcode,nfun)
312 !    call check_ecartint !el
313         else 
314           if (indpdb.ne.0) then 
315             call bond_regular
316             write(iout,*) 'Warning: Calling chainbuild'
317             call chainbuild
318           endif
319           call geom_to_var(nvar,varia)
320           print *,'Calling MINIMIZE.'
321 #ifdef MPI
322           time1=MPI_WTIME()
323 #endif
324 !    call check_eint
325 !        call exec_checkgrad   !el
326           call minimize(etot,varia,iretcode,nfun)
327 !     call check_eint
328 !        call exec_checkgrad   !el
329         endif
330         print *,'SUMSL return code is',iretcode,' eval ',nfun
331 #ifdef MPI
332         evals=nfun/(MPI_WTIME()-time1)
333 #endif
334         print *,'# eval/s',evals
335         print *,'refstr=',refstr
336         call hairpin(.true.,nharp,iharp)
337         call secondary2(.true.)
338         call etotal(energy_)
339         etot = energy_(0)
340         call enerprint(energy_)
341
342         call intout
343         call briefout(0,etot)
344         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
345           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
346           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
347           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
348       else
349         print *,'refstr=',refstr
350         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
351         call briefout(0,etot)
352       endif
353       if (outpdb) call pdbout(etot,titel(:32),ipdb)
354       if (outmol2) call mol2out(etot,titel(:32))
355 !elwrite(iout,*) "after exec_eeval_or_minim"
356       return
357       end subroutine exec_eeval_or_minim
358 !-----------------------------------------------------------------------------
359       subroutine exec_regularize
360 !      use MPI_data             !include 'COMMON.SETUP'
361       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
362       use io_units      !include 'COMMON.IOUNITS'
363       use names
364       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
365       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
366 !     use REMD          !include 'COMMON.REMD'
367 !     use MD            !include 'COMMON.MD'
368       use regularize_
369       use compare
370       implicit none
371 !      implicit real*8 (a-h,o-z)
372 !      include 'DIMENSIONS'
373 #ifdef MPI
374       include 'mpif.h'
375 #endif
376       real(kind=8) :: energy_(0:n_ene)
377       real(kind=8) :: etot
378       real(kind=8) :: rms,frac,frac_nn,co
379       integer :: iretcode
380
381       call alloc_compare_arrays
382       call gen_dist_constr
383       call sc_conf
384       call intout
385       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
386       call etotal(energy_)
387       energy_(0)=energy_(0)-energy_(14)
388       etot=energy_(0)
389       call enerprint(energy_)
390       call intout
391       call briefout(0,etot)
392       if (outpdb) call pdbout(etot,titel(:32),ipdb)
393       if (outmol2) call mol2out(etot,titel(:32))
394       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
395       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
396       return
397       end subroutine exec_regularize
398 !-----------------------------------------------------------------------------
399       subroutine exec_thread
400 !      use MPI_data             !include 'COMMON.SETUP'
401       use compare
402       implicit none
403 !      include 'DIMENSIONS'
404 #ifdef MP
405       include "mpif.h"
406 #endif
407       call alloc_compare_arrays
408       call thread_seq
409       return
410       end subroutine exec_thread
411 !-----------------------------------------------------------------------------
412       subroutine exec_MC
413 !      use MPI_data             !include 'COMMON.SETUP'
414       use control_data  !include 'COMMON.CONTROL'
415       use geometry_data
416       use energy_data
417       use mcm_md
418       implicit none
419 !      implicit real*8 (a-h,o-z)
420 !      include 'DIMENSIONS'
421       character(len=10) :: nodeinfo
422       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
423       integer :: ipar
424 #ifdef MPI
425       include "mpif.h"
426 #endif
427       call alloc_MCM_arrays
428       call mcm_setup
429       if (minim) then
430 #ifdef MPI
431         if (modecalc.eq.3) then
432           call do_mcm(ipar)
433         else
434           call entmcm
435         endif
436 #else
437         if (modecalc.eq.3) then
438           call do_mcm(ipar)
439         else
440           call entmcm
441         endif
442 #endif
443       else
444         call monte_carlo
445       endif
446       return
447       end subroutine exec_MC
448 !-----------------------------------------------------------------------------
449       subroutine exec_mult_eeval_or_minim
450       use MPI_data              !include 'COMMON.SETUP'
451       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
452       use io_units      !include 'COMMON.IOUNITS'
453       use names
454       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
455       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
456 !      use REMD         !include 'COMMON.REMD'
457 !      use MD           !include 'COMMON.MD'
458       use io_base
459       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
460       use energy, only:etotal,enerprint
461       use compare, only:rms_nac_nnc
462       use minimm, only:minimize!,minim_mcmf
463 !      implicit real*8 (a-h,o-z)
464 !      include 'DIMENSIONS'
465 #ifdef MPI
466       use minimm, only:minim_mcmf
467       implicit none
468       include 'mpif.h'
469       integer :: ierror,ierr
470       real(kind=8) :: man
471       real(kind=8),dimension(mpi_status_size) :: muster
472 #else
473       implicit none
474 #endif
475       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
476       integer,dimension(6) :: ind
477       real(kind=8) :: energy_(0:n_ene)
478       logical :: eof
479       real(kind=8) :: etot,ene0
480       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
481          nf_mcmf,j
482       real(kind=8) :: rms,frac,frac_nn,co,time,ene
483
484       eof=.false.
485 #ifdef MPI
486       if(me.ne.king) then
487         call minim_mcmf
488         return
489       endif
490
491       close (intin)
492       open(intin,file=intinname,status='old')
493       write (istat,'(a5,20a12)')"#    ",&
494         (wname(print_order(i)),i=1,nprint_ene)
495       if (refstr) then
496         write (istat,'(a5,20a12)')"#    ",&
497          (ename(print_order(i)),i=1,nprint_ene),&
498          "ETOT total","RMSD","nat.contact","nnt.contact"        
499       else
500         write (istat,'(a5,20a12)')"#    ",&
501           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
502       endif
503
504       if (.not.minim) then
505         do while (.not. eof)
506           if (read_cart) then
507             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
508             call read_x(intin,*11)
509 #ifdef MPI
510 ! Broadcast the order to compute internal coordinates to the slaves.
511             if (nfgtasks.gt.1) &
512               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
513 #endif
514             call int_from_cart1(.false.)
515           else
516             read (intin,'(i5)',end=1100,err=1100) iconf
517             call read_angles(intin,*11)
518             call geom_to_var(nvar,varia)
519             write(iout,*) 'Warning: Calling chainbuild'
520             call chainbuild
521           endif
522           write (iout,'(a,i7)') 'Conformation #',iconf
523           call etotal(energy_)
524           call briefout(iconf,energy_(0))
525           call enerprint(energy_)
526           etot=energy_(0)
527           if (refstr) then 
528             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
529             write (istat,'(i5,20(f12.3))') iconf,&
530             (energy_(print_order(i)),i=1,nprint_ene),etot,&
531              rms,frac,frac_nn,co
532 !jlee end
533           else
534             write (istat,'(i5,16(f12.3))') iconf,&
535            (energy_(print_order(i)),i=1,nprint_ene),etot
536           endif
537         enddo
538 1100    continue
539         goto 1101
540       endif
541
542       mm=0
543       imm=0
544       nft=0
545       ene0=0.0d0
546       n=0
547       iconf=0
548 !      do n=1,nzsc
549       do while (.not. eof)
550         mm=mm+1
551         if (mm.lt.nodes) then
552           if (read_cart) then
553             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
554             call read_x(intin,*11)
555 #ifdef MPI
556 ! Broadcast the order to compute internal coordinates to the slaves.
557             if (nfgtasks.gt.1) &
558               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
559 #endif
560             call int_from_cart1(.false.)
561           else
562             read (intin,'(i5)',end=11,err=11) iconf
563             call read_angles(intin,*11)
564             call geom_to_var(nvar,varia)
565             write(iout,*) 'Warning: Calling chainbuild'
566             call chainbuild
567           endif
568           write (iout,'(a,i7)') 'Conformation #',iconf
569           n=n+1
570          imm=imm+1
571          ind(1)=1
572          ind(2)=n
573          ind(3)=0
574          ind(4)=0
575          ind(5)=0
576          ind(6)=0
577          ene0=0.0d0
578          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
579                         ierr)
580          call mpi_send(varia,nvar,mpi_double_precision,mm,&
581                         idreal,CG_COMM,ierr)
582          call mpi_send(ene0,1,mpi_double_precision,mm,&
583                         idreal,CG_COMM,ierr)
584 !         print *,'task ',n,' sent to worker ',mm,nvar
585         else
586          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
587                        CG_COMM,muster,ierr)
588          man=muster(mpi_source)
589 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
590          call mpi_recv(varia,nvar,mpi_double_precision,&
591                      man,idreal,CG_COMM,muster,ierr)
592          call mpi_recv(ene,1,&
593                      mpi_double_precision,man,idreal,&
594                      CG_COMM,muster,ierr)
595          call mpi_recv(ene0,1,&
596                      mpi_double_precision,man,idreal,&
597                      CG_COMM,muster,ierr)
598 !         print *,'result received from worker ',man,' sending now'
599
600           call var_to_geom(nvar,varia)
601           write(iout,*) 'Warning: Calling chainbuild'
602           call chainbuild
603           call etotal(energy_)
604           iconf=ind(2)
605           write (iout,*)
606           write (iout,*)
607           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
608
609           etot=energy_(0)
610           call enerprint(energy_)
611           call briefout(it,etot)
612 !          if (minim) call briefout(it,etot)
613           if (refstr) then 
614             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
615             write (istat,'(i5,19(f12.3))') iconf,&
616            (energy_(print_order(i)),i=1,nprint_ene),etot,&
617            rms,frac,frac_nn,co
618           else
619             write (istat,'(i5,15(f12.3))') iconf,&
620            (energy_(print_order(i)),i=1,nprint_ene),etot
621           endif
622
623           imm=imm-1
624           if (read_cart) then
625             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
626             call read_x(intin,*11)
627 #ifdef MPI
628 ! Broadcast the order to compute internal coordinates to the slaves.
629             if (nfgtasks.gt.1) &
630               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
631 #endif
632             call int_from_cart1(.false.)
633           else
634             read (intin,'(i5)',end=1101,err=1101) iconf
635             call read_angles(intin,*11)
636             call geom_to_var(nvar,varia)
637             write(iout,*) 'Warning: Calling chainbuild'
638             call chainbuild
639           endif
640           n=n+1
641           imm=imm+1
642           ind(1)=1
643           ind(2)=n
644           ind(3)=0
645           ind(4)=0
646           ind(5)=0
647           ind(6)=0
648           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
649                         ierr)
650           call mpi_send(varia,nvar,mpi_double_precision,man,&
651                         idreal,CG_COMM,ierr)
652           call mpi_send(ene0,1,mpi_double_precision,man,&
653                         idreal,CG_COMM,ierr)
654           nf_mcmf=nf_mcmf+ind(4)
655           nmin=nmin+1
656         endif
657       enddo
658 11    continue
659       do j=1,imm
660         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
661                      CG_COMM,muster,ierr)
662         man=muster(mpi_source)
663         call mpi_recv(varia,nvar,mpi_double_precision,&
664                      man,idreal,CG_COMM,muster,ierr)
665         call mpi_recv(ene,1,&
666                      mpi_double_precision,man,idreal,&
667                      CG_COMM,muster,ierr)
668         call mpi_recv(ene0,1,&
669                      mpi_double_precision,man,idreal,&
670                      CG_COMM,muster,ierr)
671
672         call var_to_geom(nvar,varia)
673         write(iout,*) 'Warning: Calling chainbuild'
674         call chainbuild
675         call etotal(energy_)
676         iconf=ind(2)
677         write (iout,*)
678         write (iout,*)
679         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
680
681         etot=energy_(0)
682         call enerprint(energy_)
683         call briefout(it,etot)
684         if (refstr) then 
685           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
686           write (istat,'(i5,19(f12.3))') iconf,&
687          (energy_(print_order(i)),i=1,nprint_ene),etot,&
688          rms,frac,frac_nn,co
689         else
690           write (istat,'(i5,15(f12.3))') iconf,&
691           (energy_(print_order(i)),i=1,nprint_ene),etot
692         endif
693         nmin=nmin+1
694       enddo
695 1101  continue
696       do i=1, nodes-1
697          ind(1)=0
698          ind(2)=0
699          ind(3)=0
700          ind(4)=0
701          ind(5)=0
702          ind(6)=0
703          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
704                         ierr)
705       enddo
706 #else
707       close (intin)
708       open(intin,file=intinname,status='old')
709       write (istat,'(a5,20a12)')"#    ",&
710          (wname(print_order(i)),i=1,nprint_ene)
711       write (istat,'("#    ",20(1pe12.4))') &
712          (weights(print_order(i)),i=1,nprint_ene)
713       if (refstr) then
714         write (istat,'(a5,20a12)')"#    ",&
715          (ename(print_order(i)),i=1,nprint_ene),&
716          "ETOT total","RMSD","nat.contact","nnt.contact"
717       else
718         write (istat,'(a5,14a12)')"#    ",&
719          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
720       endif
721       do while (.not. eof)
722           if (read_cart) then
723             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
724             call read_x(intin,*11)
725 #ifdef MPI
726 ! Broadcast the order to compute internal coordinates to the slaves.
727             if (nfgtasks.gt.1) &
728               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
729 #endif
730             call int_from_cart1(.false.)
731           else
732             read (intin,'(i5)',end=11,err=11) iconf
733             call read_angles(intin,*11)
734             call geom_to_var(nvar,varia)
735             write(iout,*) 'Warning: Calling chainbuild'
736             call chainbuild
737           endif
738         write (iout,'(a,i7)') 'Conformation #',iconf
739         if (minim) call minimize(etot,varia,iretcode,nfun)
740         call etotal(energy_)
741
742         etot=energy_(0)
743         call enerprint(energy_)
744         if (minim) call briefout(it,etot) 
745         if (refstr) then 
746           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
747           write (istat,'(i5,18(f12.3))') iconf,&
748          (energy_(print_order(i)),i=1,nprint_ene),&
749          etot,rms,frac,frac_nn,co
750 !jlee end
751         else
752           write (istat,'(i5,14(f12.3))') iconf,&
753          (energy_(print_order(i)),i=1,nprint_ene),etot
754         endif
755       enddo
756    11 continue
757 #endif
758       return
759       end subroutine exec_mult_eeval_or_minim
760 !-----------------------------------------------------------------------------
761       subroutine exec_checkgrad
762 !      use MPI_data             !include 'COMMON.SETUP'
763       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
764       use io_units      !include 'COMMON.IOUNITS'
765 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
766       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
767 !      use REMD         !include 'COMMON.REMD'
768       use MD_data               !include 'COMMON.MD'
769       use io_base, only:intout
770       use io_config, only:read_fragments
771       use geometry
772       use energy
773       use comm_srutu
774       implicit none
775 !      implicit real*8 (a-h,o-z)
776 !      include 'DIMENSIONS'
777 #ifdef MPI
778       include 'mpif.h'
779 #endif
780 !el      integer :: icall
781 !el      common /srutu/ icall
782       real(kind=8) :: energy_(0:max_ene)
783       real(kind=8) :: etot
784       integer :: i
785 !      do i=2,nres
786 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
787 !        if (itype(i).ne.10) 
788 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
789 !      enddo
790       if (indpdb.eq.0) then 
791         write(iout,*) 'Warning: Calling chainbuild'
792         call chainbuild
793       endif
794 !      do i=0,nres
795 !        do j=1,3
796 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
797 !        enddo
798 !      enddo
799 !      do i=1,nres-1
800 !        if (itype(i).ne.10) then
801 !          do j=1,3
802 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
803 !          enddo
804 !        endif
805 !      enddo
806 !      do j=1,3
807 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
808 !      enddo
809       usampl=.true.
810       totT=1.d0
811       eq_time=0.0d0
812       call read_fragments
813       call chainbuild_cart
814       call cartprint
815       call intout
816       icall=1
817       call etotal(energy_(0))
818       etot = energy_(0)
819       call enerprint(energy_(0))
820       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
821       print *,'icheckgrad=',icheckgrad
822       goto (10,20,30) icheckgrad
823   10  call check_ecartint
824       return
825   20  call check_cartgrad
826       return
827   30  call check_eint
828       return
829       end subroutine exec_checkgrad
830 !-----------------------------------------------------------------------------
831       subroutine exec_map
832 !      use map_data
833       use map_
834       use io_config, only:map_read
835       implicit none
836 ! Energy maps
837       call alloc_map_arrays
838       call map_read
839       call map
840       return
841       end subroutine exec_map
842 !-----------------------------------------------------------------------------
843       subroutine exec_CSA
844
845       use io_units      !include 'COMMON.IOUNITS'
846       use CSA
847       implicit none
848 #ifdef MPI
849       include "mpif.h"
850 #endif
851 !      include 'DIMENSIONS'
852 ! Conformational Space Annealling programmed by Jooyoung Lee.
853 ! This method works only with parallel machines!
854 #ifdef MPI
855       call alloc_CSA_arrays
856       call together
857 #else
858       write (iout,*) "CSA works on parallel machines only"
859 #endif
860       return
861       end subroutine exec_CSA
862 !-----------------------------------------------------------------------------
863       subroutine exec_softreg
864       use io_units      !include 'COMMON.IOUNITS'
865       use control_data  !include 'COMMON.CONTROL'
866       use energy_data
867       use io_base, only:intout,briefout
868       use geometry, only:chainbuild
869       use energy
870       use compare
871       implicit none
872 !      include 'DIMENSIONS'
873       real(kind=8) :: energy_(0:n_ene)
874 !el local variables
875       real(kind=8) :: rms,frac,frac_nn,co,etot
876       logical :: debug
877
878       call alloc_compare_arrays
879       write(iout,*) 'Warning: Calling chainbuild'
880       call chainbuild
881       call etotal(energy_)
882       call enerprint(energy_)
883       if (.not.lsecondary) then
884         write(iout,*) 'Calling secondary structure recognition'
885         call secondary2(debug)
886       else
887         write(iout,*) 'Using secondary structure supplied in pdb'
888       endif
889
890       call softreg
891
892       call etotal(energy_)
893       etot=energy_(0)
894       call enerprint(energy_)
895       call intout
896       call briefout(0,etot)
897       call secondary2(.true.)
898       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
899       return
900       end subroutine exec_softreg
901 !-----------------------------------------------------------------------------
902 ! minimize_p.F
903 !-----------------------------------------------------------------------------
904 !el#ifdef MPI
905       subroutine ergastulum
906
907 !      implicit real*8 (a-h,o-z)
908 !      include 'DIMENSIONS'
909       use MD_data
910       use energy
911       use MDyn, only:setup_fricmat
912       use REMD, only:fricmat_mult,ginv_mult
913 #ifdef MPI
914       include "mpif.h"
915 #endif
916 !      include 'COMMON.SETUP'
917 !      include 'COMMON.DERIV'
918 !      include 'COMMON.VAR'
919 !      include 'COMMON.IOUNITS'
920 !      include 'COMMON.FFIELD'
921 !      include 'COMMON.INTERACT'
922 !      include 'COMMON.MD'
923 !      include 'COMMON.TIME1'
924       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
925       real(kind=8) :: edum(0:n_ene),time_order(0:10)
926 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
927 !el      common /przechowalnia/ Gcopy
928       integer :: icall = 0
929
930 !el  local variables
931       real(kind=8) :: time00
932       integer :: iorder,i,j,nres2,ierr,ierror
933       nres2=2*nres
934 #ifndef FIVEDIAG
935       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
936 ! common.MD
937       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
938 #endif
939 !      common /mdpmpi/
940       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
941       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
942       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
943       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
944 #ifndef FIVEDIAG
945       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
946 #endif
947 ! Workers wait for variables and NF, and NFL from the boss
948       iorder=0
949       do while (iorder.ge.0)
950 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
951 !     & ' receives order from Master'
952         time00=MPI_Wtime()
953         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
954         time_Bcast=time_Bcast+MPI_Wtime()-time00
955         if (icall.gt.4 .and. iorder.ge.0) &
956          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
957        icall=icall+1
958 !      write (*,*)
959 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
960         if (iorder.eq.0) then
961           call zerograd
962           call etotal(edum)
963 !          write (2,*) "After etotal"
964 !          write (2,*) "dimen",dimen," dimen3",dimen3
965 !          call flush(2)
966         else if (iorder.eq.2) then
967           call zerograd
968           call etotal_short(edum)
969 !          write (2,*) "After etotal_short"
970 !          write (2,*) "dimen",dimen," dimen3",dimen3
971 !          call flush(2)
972         else if (iorder.eq.3) then
973           call zerograd
974           call etotal_long(edum)
975 !          write (2,*) "After etotal_long"
976 !          write (2,*) "dimen",dimen," dimen3",dimen3
977 !          call flush(2)
978         else if (iorder.eq.1) then
979           call sum_gradient
980 !          write (2,*) "After sum_gradient"
981 !          write (2,*) "dimen",dimen," dimen3",dimen3
982 !          call flush(2)
983         else if (iorder.eq.4) then
984           call ginv_mult(z,d_a_tmp)
985         else if (iorder.eq.5) then
986 ! Setup MD things for a slave
987           dimen=(nct-nnt+1)+nside
988           dimen1=(nct-nnt)+(nct-nnt+1)
989           dimen3=dimen*3
990 !          write (2,*) "dimen",dimen," dimen3",dimen3
991 !          call flush(2)
992           call int_bounds(dimen,igmult_start,igmult_end)
993           igmult_start=igmult_start-1
994           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
995            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
996            my_ng_count=igmult_end-igmult_start
997           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
998            MPI_INTEGER,FG_COMM,IERROR)
999           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1000 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1001           myginv_ng_count=nres2*my_ng_count !el maxres2
1002 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1003 !     &     igmult_end," my_ng_count",my_ng_count
1004 !          call flush(2)
1005           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1006            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1007           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1008            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1009 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1010 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1011 !          call flush(2)
1012 !          call MPI_Barrier(FG_COMM,IERROR)
1013           time00=MPI_Wtime()
1014           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1015            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1016            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1017 #ifdef TIMING
1018           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1019 #endif
1020           do i=1,dimen
1021             do j=1,2*my_ng_count
1022               ginv(j,i)=gcopy(i,j)
1023             enddo
1024           enddo
1025 !          write (2,*) "dimen",dimen," dimen3",dimen3
1026 !          write (2,*) "End MD setup"
1027 !          call flush(2)
1028 !           write (iout,*) "My chunk of ginv_block"
1029 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1030         else if (iorder.eq.6) then
1031           call int_from_cart1(.false.)
1032         else if (iorder.eq.7) then
1033           call chainbuild_cart
1034         else if (iorder.eq.8) then
1035           call intcartderiv
1036         else if (iorder.eq.9) then
1037           call fricmat_mult(z,d_a_tmp)
1038         else if (iorder.eq.10) then
1039           call setup_fricmat
1040         endif
1041       enddo
1042       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1043         ' absolute rank',myrank,' leves ERGASTULUM.'
1044       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1045        (' order[',i,']',time_order(i),i=0,10)
1046       return
1047       end subroutine ergastulum