NEWCORR5D working with 15k, work and iwork in random_vel might need testing
[unres4.git] / source / unres / unres.F90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80       write (iout,*) "After readrtns"
81       call cartprint
82 !
83       if (me.eq.king .or. .not. out1file) then
84        write (iout,'(2a/)') &
85        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
86        ' calculation.' 
87        if (minim) write (iout,'(a)') &
88        'Conformations will be energy-minimized.'
89        write (iout,'(80(1h*)/)') 
90       endif
91       call flush(iout)
92 !
93       if (modecalc.eq.-2) then
94         call test
95         stop
96       else if (modecalc.eq.-1) then
97         write(iout,*) "call check_sc_map next"
98         call check_bond
99         stop
100       endif
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
102
103 #ifdef MPI
104       if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106         call ergastulum ! slave workhouse in Latin
107       else
108 #endif
109       if (modecalc.eq.0) then
110         call exec_eeval_or_minim
111       else if (modecalc.eq.1) then
112         call exec_regularize
113       else if (modecalc.eq.2) then
114         call exec_thread
115       else if (modecalc.eq.3 .or. modecalc .eq.6) then
116         call exec_MC
117       else if (modecalc.eq.4) then
118         call exec_mult_eeval_or_minim
119       else if (modecalc.eq.5) then
120         call exec_checkgrad
121       else if (ModeCalc.eq.7) then
122         call exec_map
123       else if (ModeCalc.eq.8) then
124         call exec_CSA
125       else if (modecalc.eq.11) then
126         call exec_softreg
127       else if (modecalc.eq.12) then
128         call exec_MD
129         call exec_checkgrad
130       else if (modecalc.eq.14) then
131         call exec_MREMD
132       else
133         write (iout,'(a)') 'This calculation type is not supported',&
134          ModeCalc
135       endif
136
137 #ifdef MPI
138       endif
139 ! Finish task.
140       if (fg_rank.eq.0) call finish_task
141 !      call memmon_print_usage()
142 #ifdef TIMING
143        call print_detailed_timing
144 #endif
145       call MPI_Finalize(ierr)
146       stop 'Bye Bye...'
147 #else
148       call dajczas(tcpu(),hrtime,mintime,sectime)
149       stop '********** Program terminated normally.'
150 #endif
151
152       end program !UNRES
153 !-----------------------------------------------------------------------------
154 !
155 !-----------------------------------------------------------------------------
156       subroutine exec_MD
157       use MPI_data      !include 'COMMON.SETUP'
158       use control_data  !include 'COMMON.CONTROL'
159       use geometry, only:chainbuild,chainbuild_cart
160       use MDyn
161       use io_units      !include 'COMMON.IOUNITS'
162       use compare, only:alloc_compare_arrays
163 !      use io_common
164       implicit none
165 !      include 'DIMENSIONS'
166 #ifdef MPI
167       include "mpif.h"
168 #endif
169       print *,'Start MD'
170       call alloc_MD_arrays
171       print *,'After MD alloc'
172       call alloc_compare_arrays
173       print *,'After compare alloc'
174       if (me.eq.king .or. .not. out1file) &
175          write (iout,*) "Calling chainbuild"
176       if (extconf) then
177       call chainbuild
178       else
179       call chainbuild_cart
180       endif
181       call MD
182       return
183       end subroutine exec_MD
184 !---------------------------------------------------------------------------
185       subroutine exec_MREMD
186       use MPI_data      !include 'COMMON.SETUP'
187       use control_data  !include 'COMMON.CONTROL'
188       use io_units      !include 'COMMON.IOUNITS'
189 !      use io_common
190       use REMD_data     !include 'COMMON.REMD'
191       use geometry, only:chainbuild
192       use MREMDyn
193       use compare, only:alloc_compare_arrays
194
195       implicit none
196 !      include 'DIMENSIONS'
197 #ifdef MPI
198       include "mpif.h"
199 #endif
200
201       integer :: i
202       call alloc_MD_arrays
203       call alloc_MREMD_arrays
204       call alloc_compare_arrays
205 !     if (me.eq.king .or. .not. out1file) &
206 !         write (iout,*) "Calling chainbuild"
207 !      call chainbuild
208       if (me.eq.king .or. .not. out1file) &
209          write (iout,*) "Calling REMD",remd_mlist,nrep
210       if (remd_mlist) then 
211         call MREMD
212       else
213         do i=1,nrep
214           remd_m(i)=1
215         enddo
216         call MREMD
217       endif
218       return
219       end subroutine exec_MREMD
220 !-----------------------------------------------------------------------------
221       subroutine exec_eeval_or_minim
222       use MPI_data              !include 'COMMON.SETUP'
223       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
224       use io_units      !include 'COMMON.IOUNITS'
225       use names
226 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
227       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
228 !      use REMD         !include 'COMMON.REMD'
229 !      use MD           !include 'COMMON.MD'
230
231       use energy_data
232       use MD_data, only: iset
233       use io_base
234       use geometry, only:chainbuild
235       use energy
236       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
237       use minimm, only:minimize,minim_dc,sc_move
238       use compare_data !el
239       use comm_srutu
240       implicit none
241 !      implicit real*8 (a-h,o-z)
242 !      include 'DIMENSIONS'
243 #ifdef MPI
244       include 'mpif.h'
245 #endif
246       integer :: i
247 !el      common /srutu/ icall
248       real(kind=8) :: energy_(0:n_ene)
249       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
250       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
251       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
252       integer :: nharp,nft_sc,iretcode,nfun
253       integer,dimension(4,nres) :: iharp        !(4,nres/3)(4,maxres/3)
254       logical :: fail
255       real(kind=8) :: rms,frac,frac_nn,co
256
257       integer :: j,k
258        if (iset.eq.0) iset=1
259       call alloc_compare_arrays
260       if ((indpdb.eq.0).and.(.not.read_cart)) then 
261       call chainbuild
262       write(iout,*) 'Warning: Calling chainbuild'
263       endif
264 #ifdef MPI
265       time00=MPI_Wtime()
266 #endif
267       call chainbuild_cart
268 !      write(iout,*)"in exec_eeval or minimim",split_ene
269 !      do j=1,2*nres+2
270 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
271 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
272 !      enddo
273       if (split_ene) then
274 !      write(iout,*)"in exec_eeval or minimim"
275
276        print *,"Processor",myrank," after chainbuild"
277        icall=1
278
279        call etotal_long(energy_long)
280        write (iout,*) "Printing long range energy"
281        call enerprint(energy_long)
282
283        call etotal_short(energy_short)
284        write (iout,*) "Printing short range energy"
285        call enerprint(energy_short)
286        do i=0,n_ene
287          energy_(i)=energy_long(i)+energy_short(i)
288          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
289        enddo
290        write (iout,*) "Printing long+short range energy"
291        call enerprint(energy_)
292       endif
293
294       call etotal(energy_)
295 #ifdef MPI
296       time_ene=MPI_Wtime()-time00
297 #endif
298       write (iout,*) "Time for energy evaluation",time_ene
299       print *,"after etotal"
300       etota = energy_(0)
301       etot = etota
302       call enerprint(energy_)
303       if (.false.) then
304       call hairpin(.true.,nharp,iharp)
305       call secondary2(.true.)
306       endif
307       if (minim) then
308 !rc overlap test
309         print *,"overlap",searchsc,overlapsc
310         if (overlapsc) then 
311           print *, 'Calling OVERLAP_SC'
312           call overlap_sc(fail)
313         endif 
314
315         if (searchsc) then 
316           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
317           print *,'SC_move',nft_sc,etot
318           write(iout,*) 'SC_move',nft_sc,etot
319         endif 
320
321         if (dccart) then
322           print *, 'Calling MINIM_DC'
323 #ifdef MPI
324           time1=MPI_WTIME()
325 #endif
326 !    call check_ecartint !el
327           call minim_dc(etot,iretcode,nfun)
328 !    call check_ecartint !el
329         else 
330           if (indpdb.ne.0) then 
331             call bond_regular
332             write(iout,*) 'Warning: Calling chainbuild'
333             call chainbuild
334           endif
335           call geom_to_var(nvar,varia)
336           print *,'Calling MINIMIZE.'
337 #ifdef MPI
338           time1=MPI_WTIME()
339 #endif
340 !    call check_eint
341 !        call exec_checkgrad   !el
342           call minimize(etot,varia,iretcode,nfun)
343 !     call check_eint
344 !        call exec_checkgrad   !el
345         endif
346         print *,'SUMSL return code is',iretcode,' eval ',nfun
347 #ifdef MPI
348         evals=nfun/(MPI_WTIME()-time1)
349 #endif
350         print *,'# eval/s',evals
351         print *,'refstr=',refstr
352 !        call hairpin(.true.,nharp,iharp)
353 !        call secondary2(.true.)
354         call etotal(energy_)
355         etot = energy_(0)
356         call enerprint(energy_)
357
358         call intout
359         call briefout(0,etot)
360         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
361           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
362           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
363           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
364       else
365         print *,'refstr=',refstr,frac,frac_nn,co
366         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
367         print *,"after rms_nac_ncc"
368         call briefout(0,etot)
369       endif
370       if (outpdb) call pdbout(etot,titel(:32),ipdb)
371       if (outmol2) call mol2out(etot,titel(:32))
372       write(iout,*) "after exec_eeval_or_minim"
373       return
374       end subroutine exec_eeval_or_minim
375 !-----------------------------------------------------------------------------
376       subroutine exec_regularize
377 !      use MPI_data             !include 'COMMON.SETUP'
378       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
379       use io_units      !include 'COMMON.IOUNITS'
380       use names
381       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
382       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
383 !     use REMD          !include 'COMMON.REMD'
384 !     use MD            !include 'COMMON.MD'
385       use regularize_
386       use compare
387       implicit none
388 !      implicit real*8 (a-h,o-z)
389 !      include 'DIMENSIONS'
390 #ifdef MPI
391       include 'mpif.h'
392 #endif
393       real(kind=8) :: energy_(0:n_ene)
394       real(kind=8) :: etot
395       real(kind=8) :: rms,frac,frac_nn,co
396       integer :: iretcode
397
398       call alloc_compare_arrays
399       call gen_dist_constr
400       call sc_conf
401       call intout
402       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
403       call etotal(energy_)
404       energy_(0)=energy_(0)-energy_(14)
405       etot=energy_(0)
406       call enerprint(energy_)
407       call intout
408       call briefout(0,etot)
409       if (outpdb) call pdbout(etot,titel(:32),ipdb)
410       if (outmol2) call mol2out(etot,titel(:32))
411       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
412       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
413       return
414       end subroutine exec_regularize
415 !-----------------------------------------------------------------------------
416       subroutine exec_thread
417 !      use MPI_data             !include 'COMMON.SETUP'
418       use compare
419       implicit none
420 !      include 'DIMENSIONS'
421 #ifdef MP
422       include "mpif.h"
423 #endif
424       call alloc_compare_arrays
425       call thread_seq
426       return
427       end subroutine exec_thread
428 !-----------------------------------------------------------------------------
429       subroutine exec_MC
430 !      use MPI_data             !include 'COMMON.SETUP'
431       use control_data  !include 'COMMON.CONTROL'
432       use geometry_data
433       use energy_data
434       use mcm_md
435       implicit none
436 !      implicit real*8 (a-h,o-z)
437 !      include 'DIMENSIONS'
438       character(len=10) :: nodeinfo
439       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
440       integer :: ipar
441 #ifdef MPI
442       include "mpif.h"
443 #endif
444       call alloc_MCM_arrays
445       call mcm_setup
446       if (minim) then
447 #ifdef MPI
448         if (modecalc.eq.3) then
449           call do_mcm(ipar)
450         else
451           call entmcm
452         endif
453 #else
454         if (modecalc.eq.3) then
455           call do_mcm(ipar)
456         else
457           call entmcm
458         endif
459 #endif
460       else
461         call monte_carlo
462       endif
463       return
464       end subroutine exec_MC
465 !-----------------------------------------------------------------------------
466       subroutine exec_mult_eeval_or_minim
467       use MPI_data              !include 'COMMON.SETUP'
468       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
469       use io_units      !include 'COMMON.IOUNITS'
470       use names
471       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
472       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
473 !      use REMD         !include 'COMMON.REMD'
474 !      use MD           !include 'COMMON.MD'
475       use io_base
476       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
477       use energy, only:etotal,enerprint
478       use compare, only:rms_nac_nnc
479       use minimm, only:minimize!,minim_mcmf
480 !      implicit real*8 (a-h,o-z)
481 !      include 'DIMENSIONS'
482 #ifdef MPI
483       use minimm, only:minim_mcmf
484       implicit none
485       include 'mpif.h'
486       integer :: ierror,ierr
487       real(kind=8) :: man
488       real(kind=8),dimension(mpi_status_size) :: muster
489 #else
490       implicit none
491 #endif
492       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
493       integer,dimension(6) :: ind
494       real(kind=8) :: energy_(0:n_ene)
495       logical :: eof
496       real(kind=8) :: etot,ene0
497       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
498          nf_mcmf,j
499       real(kind=8) :: rms,frac,frac_nn,co,time,ene
500
501       eof=.false.
502 #ifdef MPI
503       if(me.ne.king) then
504         call minim_mcmf
505         return
506       endif
507
508       close (intin)
509       open(intin,file=intinname,status='old')
510       write (istat,'(a5,20a12)')"#    ",&
511         (wname(print_order(i)),i=1,nprint_ene)
512       if (refstr) then
513         write (istat,'(a5,20a12)')"#    ",&
514          (ename(print_order(i)),i=1,nprint_ene),&
515          "ETOT total","RMSD","nat.contact","nnt.contact"        
516       else
517         write (istat,'(a5,20a12)')"#    ",&
518           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
519       endif
520
521       if (.not.minim) then
522         do while (.not. eof)
523           if (read_cart) then
524             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
525             call read_x(intin,*11)
526 #ifdef MPI
527 ! Broadcast the order to compute internal coordinates to the slaves.
528             if (nfgtasks.gt.1) &
529               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
530 #endif
531             call int_from_cart1(.false.)
532           else
533             read (intin,'(i5)',end=1100,err=1100) iconf
534             call read_angles(intin,*11)
535             call geom_to_var(nvar,varia)
536             write(iout,*) 'Warning: Calling chainbuild1'
537             call chainbuild
538           endif
539           write (iout,'(a,i7)') 'Conformation #',iconf
540           call etotal(energy_)
541           call briefout(iconf,energy_(0))
542           call enerprint(energy_)
543           etot=energy_(0)
544           if (refstr) then 
545             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
546             write (istat,'(i5,20(f12.3))') iconf,&
547             (energy_(print_order(i)),i=1,nprint_ene),etot,&
548              rms,frac,frac_nn,co
549 !jlee end
550           else
551             write (istat,'(i5,16(f12.3))') iconf,&
552            (energy_(print_order(i)),i=1,nprint_ene),etot
553           endif
554         enddo
555 1100    continue
556         goto 1101
557       endif
558
559       mm=0
560       imm=0
561       nft=0
562       ene0=0.0d0
563       n=0
564       iconf=0
565 !      do n=1,nzsc
566       do while (.not. eof)
567         mm=mm+1
568         if (mm.lt.nodes) then
569           if (read_cart) then
570             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
571             call read_x(intin,*11)
572 #ifdef MPI
573 ! Broadcast the order to compute internal coordinates to the slaves.
574             if (nfgtasks.gt.1) &
575               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
576 #endif
577             call int_from_cart1(.false.)
578           else
579             read (intin,'(i5)',end=11,err=11) iconf
580             call read_angles(intin,*11)
581             call geom_to_var(nvar,varia)
582             write(iout,*) 'Warning: Calling chainbuild2'
583             call chainbuild
584           endif
585           write (iout,'(a,i7)') 'Conformation #',iconf
586           n=n+1
587          imm=imm+1
588          ind(1)=1
589          ind(2)=n
590          ind(3)=0
591          ind(4)=0
592          ind(5)=0
593          ind(6)=0
594          ene0=0.0d0
595          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
596                         ierr)
597          call mpi_send(varia,nvar,mpi_double_precision,mm,&
598                         idreal,CG_COMM,ierr)
599          call mpi_send(ene0,1,mpi_double_precision,mm,&
600                         idreal,CG_COMM,ierr)
601 !         print *,'task ',n,' sent to worker ',mm,nvar
602         else
603          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
604                        CG_COMM,muster,ierr)
605          man=muster(mpi_source)
606 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
607          call mpi_recv(varia,nvar,mpi_double_precision,&
608                      man,idreal,CG_COMM,muster,ierr)
609          call mpi_recv(ene,1,&
610                      mpi_double_precision,man,idreal,&
611                      CG_COMM,muster,ierr)
612          call mpi_recv(ene0,1,&
613                      mpi_double_precision,man,idreal,&
614                      CG_COMM,muster,ierr)
615 !         print *,'result received from worker ',man,' sending now'
616
617           call var_to_geom(nvar,varia)
618           write(iout,*) 'Warning: Calling chainbuild3'
619           call chainbuild
620           call etotal(energy_)
621           iconf=ind(2)
622           write (iout,*)
623           write (iout,*)
624           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
625
626           etot=energy_(0)
627           call enerprint(energy_)
628           call briefout(it,etot)
629 !          if (minim) call briefout(it,etot)
630           if (refstr) then 
631             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
632             write (istat,'(i5,19(f12.3))') iconf,&
633            (energy_(print_order(i)),i=1,nprint_ene),etot,&
634            rms,frac,frac_nn,co
635           else
636             write (istat,'(i5,15(f12.3))') iconf,&
637            (energy_(print_order(i)),i=1,nprint_ene),etot
638           endif
639
640           imm=imm-1
641           if (read_cart) then
642             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
643             call read_x(intin,*11)
644 #ifdef MPI
645 ! Broadcast the order to compute internal coordinates to the slaves.
646             if (nfgtasks.gt.1) &
647               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
648 #endif
649             call int_from_cart1(.false.)
650           else
651             read (intin,'(i5)',end=1101,err=1101) iconf
652             call read_angles(intin,*11)
653             call geom_to_var(nvar,varia)
654             write(iout,*) 'Warning: Calling chainbuild4'
655             call chainbuild
656           endif
657           n=n+1
658           imm=imm+1
659           ind(1)=1
660           ind(2)=n
661           ind(3)=0
662           ind(4)=0
663           ind(5)=0
664           ind(6)=0
665           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
666                         ierr)
667           call mpi_send(varia,nvar,mpi_double_precision,man,&
668                         idreal,CG_COMM,ierr)
669           call mpi_send(ene0,1,mpi_double_precision,man,&
670                         idreal,CG_COMM,ierr)
671           nf_mcmf=nf_mcmf+ind(4)
672           nmin=nmin+1
673         endif
674       enddo
675 11    continue
676       do j=1,imm
677         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
678                      CG_COMM,muster,ierr)
679         man=muster(mpi_source)
680         call mpi_recv(varia,nvar,mpi_double_precision,&
681                      man,idreal,CG_COMM,muster,ierr)
682         call mpi_recv(ene,1,&
683                      mpi_double_precision,man,idreal,&
684                      CG_COMM,muster,ierr)
685         call mpi_recv(ene0,1,&
686                      mpi_double_precision,man,idreal,&
687                      CG_COMM,muster,ierr)
688
689         call var_to_geom(nvar,varia)
690         write(iout,*) 'Warning: Calling chainbuild5'
691         call chainbuild
692         call etotal(energy_)
693         iconf=ind(2)
694         write (iout,*)
695         write (iout,*)
696         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
697
698         etot=energy_(0)
699         call enerprint(energy_)
700         call briefout(it,etot)
701         if (refstr) then 
702           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
703           write (istat,'(i5,19(f12.3))') iconf,&
704          (energy_(print_order(i)),i=1,nprint_ene),etot,&
705          rms,frac,frac_nn,co
706         else
707           write (istat,'(i5,15(f12.3))') iconf,&
708           (energy_(print_order(i)),i=1,nprint_ene),etot
709         endif
710         nmin=nmin+1
711       enddo
712 1101  continue
713       do i=1, nodes-1
714          ind(1)=0
715          ind(2)=0
716          ind(3)=0
717          ind(4)=0
718          ind(5)=0
719          ind(6)=0
720          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
721                         ierr)
722       enddo
723 #else
724       close (intin)
725       open(intin,file=intinname,status='old')
726       write (istat,'(a5,20a12)')"#    ",&
727          (wname(print_order(i)),i=1,nprint_ene)
728       write (istat,'("#    ",20(1pe12.4))') &
729          (weights(print_order(i)),i=1,nprint_ene)
730       if (refstr) then
731         write (istat,'(a5,20a12)')"#    ",&
732          (ename(print_order(i)),i=1,nprint_ene),&
733          "ETOT total","RMSD","nat.contact","nnt.contact"
734       else
735         write (istat,'(a5,14a12)')"#    ",&
736          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
737       endif
738       do while (.not. eof)
739           if (read_cart) then
740             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
741             call read_x(intin,*11)
742 #ifdef MPI
743 ! Broadcast the order to compute internal coordinates to the slaves.
744             if (nfgtasks.gt.1) &
745               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
746 #endif
747             call int_from_cart1(.false.)
748           else
749             read (intin,'(i5)',end=11,err=11) iconf
750             call read_angles(intin,*11)
751             call geom_to_var(nvar,varia)
752             write(iout,*) 'Warning: Calling chainbuild5'
753             call chainbuild
754           endif
755         write (iout,'(a,i7)') 'Conformation #',iconf
756         if (minim) call minimize(etot,varia,iretcode,nfun)
757         call etotal(energy_)
758
759         etot=energy_(0)
760         call enerprint(energy_)
761         if (minim) call briefout(it,etot) 
762         if (refstr) then 
763           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
764           write (istat,'(i5,18(f12.3))') iconf,&
765          (energy_(print_order(i)),i=1,nprint_ene),&
766          etot,rms,frac,frac_nn,co
767 !jlee end
768         else
769           write (istat,'(i5,14(f12.3))') iconf,&
770          (energy_(print_order(i)),i=1,nprint_ene),etot
771         endif
772       enddo
773    11 continue
774 #endif
775       return
776       end subroutine exec_mult_eeval_or_minim
777 !-----------------------------------------------------------------------------
778       subroutine exec_checkgrad
779 !      use MPI_data             !include 'COMMON.SETUP'
780       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
781       use io_units      !include 'COMMON.IOUNITS'
782 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
783       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
784 !      use REMD         !include 'COMMON.REMD'
785       use MD_data               !include 'COMMON.MD'
786       use io_base, only:intout
787       use io_config, only:read_fragments
788       use geometry
789       use energy
790       use comm_srutu
791       implicit none
792 !      implicit real*8 (a-h,o-z)
793 !      include 'DIMENSIONS'
794 #ifdef MPI
795       include 'mpif.h'
796 #endif
797 !el      integer :: icall
798 !el      common /srutu/ icall
799       real(kind=8) :: energy_(0:max_ene)
800       real(kind=8) :: etot
801       integer :: i
802 !      do i=2,nres
803 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
804 !        if (itype(i).ne.10) 
805 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
806 !      enddo
807       if (indpdb.eq.0) then 
808         write(iout,*) 'Warning: Calling chainbuild'
809         call chainbuild
810       endif
811 !      do i=0,nres
812 !        do j=1,3
813 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
814 !        enddo
815 !      enddo
816 !      do i=1,nres-1
817 !        if (itype(i).ne.10) then
818 !          do j=1,3
819 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
820 !          enddo
821 !        endif
822 !      enddo
823 !      do j=1,3
824 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
825 !      enddo
826 #ifdef UMB
827       usampl=.true.
828 #endif
829       totT=1.d0
830       eq_time=0.0d0
831       call read_fragments
832       call chainbuild_cart
833       call cartprint
834       call intout
835       icall=1
836       write (iout,*) "before etotal"
837       call etotal(energy_(0))
838       etot = energy_(0)
839       call enerprint(energy_(0))
840       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
841       print *,'icheckgrad=',icheckgrad
842       goto (10,20,30) icheckgrad
843   10  call check_ecartint
844       call check_ecartint
845       return
846   20  call check_cartgrad
847       call check_cartgrad
848       return
849   30  call check_eint
850       return
851       end subroutine exec_checkgrad
852 !-----------------------------------------------------------------------------
853       subroutine exec_map
854 !      use map_data
855       use map_
856       use io_config, only:map_read
857       implicit none
858 ! Energy maps
859       call alloc_map_arrays
860       call map_read
861       call map
862       return
863       end subroutine exec_map
864 !-----------------------------------------------------------------------------
865       subroutine exec_CSA
866
867       use io_units      !include 'COMMON.IOUNITS'
868       use CSA
869       implicit none
870 #ifdef MPI
871       include "mpif.h"
872 #endif
873 !      include 'DIMENSIONS'
874 ! Conformational Space Annealling programmed by Jooyoung Lee.
875 ! This method works only with parallel machines!
876 #ifdef MPI
877       call alloc_CSA_arrays
878       call together
879 #else
880       write (iout,*) "CSA works on parallel machines only"
881 #endif
882       return
883       end subroutine exec_CSA
884 !-----------------------------------------------------------------------------
885       subroutine exec_softreg
886       use io_units      !include 'COMMON.IOUNITS'
887       use control_data  !include 'COMMON.CONTROL'
888       use energy_data
889       use io_base, only:intout,briefout
890       use geometry, only:chainbuild
891       use energy
892       use compare
893       implicit none
894 !      include 'DIMENSIONS'
895       real(kind=8) :: energy_(0:n_ene)
896 !el local variables
897       real(kind=8) :: rms,frac,frac_nn,co,etot
898       logical :: debug
899
900       call alloc_compare_arrays
901       write(iout,*) 'Warning: Calling chainbuild'
902       call chainbuild
903       call etotal(energy_)
904       call enerprint(energy_)
905       if (.not.lsecondary) then
906         write(iout,*) 'Calling secondary structure recognition'
907         call secondary2(debug)
908       else
909         write(iout,*) 'Using secondary structure supplied in pdb'
910       endif
911
912       call softreg
913
914       call etotal(energy_)
915       etot=energy_(0)
916       call enerprint(energy_)
917       call intout
918       call briefout(0,etot)
919       call secondary2(.true.)
920       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
921       return
922       end subroutine exec_softreg
923 !-----------------------------------------------------------------------------
924 ! minimize_p.F
925 !-----------------------------------------------------------------------------
926 !el#ifdef MPI
927       subroutine ergastulum
928
929 !      implicit real*8 (a-h,o-z)
930 !      include 'DIMENSIONS'
931       use MD_data
932       use energy
933       use MDyn, only:setup_fricmat
934 #ifndef FIVEDIAG
935       use REMD, only:fricmat_mult,ginv_mult
936 #endif
937 #ifdef MPI
938       include "mpif.h"
939 #endif
940 !      include 'COMMON.SETUP'
941 !      include 'COMMON.DERIV'
942 !      include 'COMMON.VAR'
943 !      include 'COMMON.IOUNITS'
944 !      include 'COMMON.FFIELD'
945 !      include 'COMMON.INTERACT'
946 !      include 'COMMON.MD'
947 !      include 'COMMON.TIME1'
948       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
949       real(kind=8) :: edum(0:n_ene),time_order(0:10)
950 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
951 !el      common /przechowalnia/ Gcopy
952       integer :: icall = 0
953
954 !el  local variables
955       real(kind=8) :: time00
956       integer :: iorder,i,j,nres2,ierr,ierror
957       nres2=2*nres
958 #ifndef FIVEDIAG
959       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
960 ! common.MD
961       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
962 #endif
963 !      common /mdpmpi/
964       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
965       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
966       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
967       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
968 #ifndef FIVEDIAG
969       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
970 #endif
971 ! Workers wait for variables and NF, and NFL from the boss
972       iorder=0
973       do while (iorder.ge.0)
974 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
975 !     & ' receives order from Master'
976         time00=MPI_Wtime()
977         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
978         time_Bcast=time_Bcast+MPI_Wtime()-time00
979         if (icall.gt.4 .and. iorder.ge.0) &
980          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
981        icall=icall+1
982 !      write (*,*)
983 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
984         if (iorder.eq.0) then
985           call zerograd
986           call etotal(edum)
987 !          write (2,*) "After etotal"
988 !          write (2,*) "dimen",dimen," dimen3",dimen3
989 !          call flush(2)
990         else if (iorder.eq.2) then
991           call zerograd
992           call etotal_short(edum)
993 !          write (2,*) "After etotal_short"
994 !          write (2,*) "dimen",dimen," dimen3",dimen3
995 !          call flush(2)
996         else if (iorder.eq.3) then
997           call zerograd
998           call etotal_long(edum)
999 !          write (2,*) "After etotal_long"
1000 !          write (2,*) "dimen",dimen," dimen3",dimen3
1001 !          call flush(2)
1002         else if (iorder.eq.1) then
1003           call sum_gradient
1004 !          write (2,*) "After sum_gradient"
1005 !          write (2,*) "dimen",dimen," dimen3",dimen3
1006 !          call flush(2)
1007 #ifndef FIVEDIAG
1008         else if (iorder.eq.4) then
1009           call ginv_mult(z,d_a_tmp)
1010         else if (iorder.eq.5) then
1011 ! Setup MD things for a slave
1012           dimen=(nct-nnt+1)+nside
1013           dimen1=(nct-nnt)+(nct-nnt+1)
1014           dimen3=dimen*3
1015 !          write (2,*) "dimen",dimen," dimen3",dimen3
1016 !          call flush(2)
1017           call int_bounds(dimen,igmult_start,igmult_end)
1018           igmult_start=igmult_start-1
1019           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1020            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1021            my_ng_count=igmult_end-igmult_start
1022           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1023            MPI_INTEGER,FG_COMM,IERROR)
1024           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1025 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1026           myginv_ng_count=nres2*my_ng_count !el maxres2
1027 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1028 !     &     igmult_end," my_ng_count",my_ng_count
1029 !          call flush(2)
1030           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1031            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1032           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1033            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1034 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1035 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1036 !          call flush(2)
1037 !          call MPI_Barrier(FG_COMM,IERROR)
1038           time00=MPI_Wtime()
1039           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1040            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1041            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1042 #ifdef TIMING
1043           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1044 #endif
1045           do i=1,dimen
1046             do j=1,2*my_ng_count
1047               ginv(j,i)=gcopy(i,j)
1048             enddo
1049           enddo
1050 !          write (2,*) "dimen",dimen," dimen3",dimen3
1051 !          write (2,*) "End MD setup"
1052 !          call flush(2)
1053 !           write (iout,*) "My chunk of ginv_block"
1054 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1055 #endif
1056         else if (iorder.eq.6) then
1057           call int_from_cart1(.false.)
1058         else if (iorder.eq.7) then
1059           call chainbuild_cart
1060         else if (iorder.eq.8) then
1061           call intcartderiv
1062 #ifndef FIVEDIAG
1063         else if (iorder.eq.9) then
1064           call fricmat_mult(z,d_a_tmp)
1065 #endif
1066         else if (iorder.eq.10) then
1067           call setup_fricmat
1068         endif
1069       enddo
1070       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1071         ' absolute rank',myrank,' leves ERGASTULUM.'
1072       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1073        (' order[',i,']',time_order(i),i=0,10)
1074       return
1075       end subroutine ergastulum