804f2285145f101b1d776c441ca52aa8562a3ace
[unres4.git] / source / unres / unres.F90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80       write (iout,*) "After readrtns"
81       call cartprint
82 !
83       if (me.eq.king .or. .not. out1file) then
84        write (iout,'(2a/)') &
85        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
86        ' calculation.' 
87        if (minim) write (iout,'(a)') &
88        'Conformations will be energy-minimized.'
89        write (iout,'(80(1h*)/)') 
90       endif
91       call flush(iout)
92 !
93       if (modecalc.eq.-2) then
94         call test
95         stop
96       else if (modecalc.eq.-1) then
97         write(iout,*) "call check_sc_map next"
98         call check_bond
99         stop
100       endif
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
102
103 #ifdef MPI
104       if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106         call ergastulum ! slave workhouse in Latin
107       else
108 #endif
109       if (modecalc.eq.0) then
110         call exec_eeval_or_minim
111       else if (modecalc.eq.1) then
112         call exec_regularize
113       else if (modecalc.eq.2) then
114         call exec_thread
115       else if (modecalc.eq.3 .or. modecalc .eq.6) then
116         call exec_MC
117       else if (modecalc.eq.4) then
118         call exec_mult_eeval_or_minim
119       else if (modecalc.eq.5) then
120         call exec_checkgrad
121       else if (ModeCalc.eq.7) then
122         call exec_map
123       else if (ModeCalc.eq.8) then
124         call exec_CSA
125       else if (modecalc.eq.11) then
126         call exec_softreg
127       else if (modecalc.eq.12) then
128         call exec_MD
129 !        call exec_checkgrad
130       else if (modecalc.eq.14) then
131         call exec_MREMD
132       else
133         write (iout,'(a)') 'This calculation type is not supported',&
134          ModeCalc
135       endif
136
137 #ifdef MPI
138       endif
139 ! Finish task.
140       if (fg_rank.eq.0) call finish_task
141 !      call memmon_print_usage()
142 #ifdef TIMING
143        call print_detailed_timing
144 #endif
145       call MPI_Finalize(ierr)
146       stop 'Bye Bye...'
147 #else
148       call dajczas(tcpu(),hrtime,mintime,sectime)
149       stop '********** Program terminated normally.'
150 #endif
151
152       end program !UNRES
153 !-----------------------------------------------------------------------------
154 !
155 !-----------------------------------------------------------------------------
156       subroutine exec_MD
157       use MPI_data      !include 'COMMON.SETUP'
158       use control_data  !include 'COMMON.CONTROL'
159       use geometry, only:chainbuild,chainbuild_cart
160       use MDyn
161       use io_units      !include 'COMMON.IOUNITS'
162       use compare, only:alloc_compare_arrays
163 !      use io_common
164       implicit none
165 !      include 'DIMENSIONS'
166 #ifdef MPI
167       include "mpif.h"
168 #endif
169       print *,'Start MD'
170       call alloc_MD_arrays
171       print *,'After MD alloc'
172       call alloc_compare_arrays
173       print *,'After compare alloc'
174       if (me.eq.king .or. .not. out1file) &
175          write (iout,*) "Calling chainbuild"
176       if (extconf) then
177       call chainbuild
178       else
179       call chainbuild_cart
180       endif
181       call MD
182       return
183       end subroutine exec_MD
184 !---------------------------------------------------------------------------
185       subroutine exec_MREMD
186       use MPI_data      !include 'COMMON.SETUP'
187       use control_data  !include 'COMMON.CONTROL'
188       use io_units      !include 'COMMON.IOUNITS'
189 !      use io_common
190       use REMD_data     !include 'COMMON.REMD'
191       use geometry, only:chainbuild
192       use MREMDyn
193       use compare, only:alloc_compare_arrays
194
195       implicit none
196 !      include 'DIMENSIONS'
197 #ifdef MPI
198       include "mpif.h"
199 #endif
200
201       integer :: i
202       call alloc_MD_arrays
203       call alloc_MREMD_arrays
204       call alloc_compare_arrays
205 !     if (me.eq.king .or. .not. out1file) &
206 !         write (iout,*) "Calling chainbuild"
207 !      call chainbuild
208       if (me.eq.king .or. .not. out1file) &
209          write (iout,*) "Calling REMD",remd_mlist,nrep
210       if (remd_mlist) then 
211         call MREMD
212       else
213         do i=1,nrep
214           remd_m(i)=1
215         enddo
216         call MREMD
217       endif
218       return
219       end subroutine exec_MREMD
220 !-----------------------------------------------------------------------------
221       subroutine exec_eeval_or_minim
222       use MPI_data              !include 'COMMON.SETUP'
223       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
224       use io_units      !include 'COMMON.IOUNITS'
225       use names
226 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
227       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
228 !      use REMD         !include 'COMMON.REMD'
229 !      use MD           !include 'COMMON.MD'
230
231       use energy_data
232       use MD_data, only: iset
233       use io_base
234       use geometry, only:chainbuild
235       use energy
236       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
237       use minimm, only:minimize,minim_dc,sc_move
238       use compare_data !el
239       use comm_srutu
240       implicit none
241 !      implicit real*8 (a-h,o-z)
242 !      include 'DIMENSIONS'
243 #ifdef MPI
244       include 'mpif.h'
245 #endif
246       integer :: i
247 !el      common /srutu/ icall
248       real(kind=8) :: energy_(0:n_ene)
249       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
250       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
251       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
252       integer :: nharp,nft_sc,iretcode
253 #ifndef LBFGS
254       integer :: nfun
255 #endif 
256       integer,dimension(4,nres) :: iharp        !(4,nres/3)(4,maxres/3)
257       logical :: fail
258       real(kind=8) :: rms,frac,frac_nn,co
259
260       integer :: j,k
261        if (iset.eq.0) iset=1
262       call alloc_compare_arrays
263       if ((indpdb.eq.0).and.(.not.read_cart)) then 
264       call chainbuild
265       write(iout,*) 'Warning: Calling chainbuild'
266       endif
267 #ifdef MPI
268       time00=MPI_Wtime()
269 #endif
270       call chainbuild_cart
271 !      write(iout,*)"in exec_eeval or minimim",split_ene
272 !      do j=1,2*nres+2
273 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
274 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
275 !      enddo
276       if (split_ene) then
277 !      write(iout,*)"in exec_eeval or minimim"
278
279        print *,"Processor",myrank," after chainbuild"
280        icall=1
281
282        call etotal_long(energy_long)
283        write (iout,*) "Printing long range energy"
284        call enerprint(energy_long)
285
286        call etotal_short(energy_short)
287        write (iout,*) "Printing short range energy"
288        call enerprint(energy_short)
289        do i=0,n_ene
290          energy_(i)=energy_long(i)+energy_short(i)
291          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
292        enddo
293        write (iout,*) "Printing long+short range energy"
294        call enerprint(energy_)
295       endif
296
297       call etotal(energy_)
298 #ifdef MPI
299       time_ene=MPI_Wtime()-time00
300 #endif
301       write (iout,*) "Time for energy evaluation",time_ene
302       print *,"after etotal"
303       etota = energy_(0)
304       etot = etota
305       call enerprint(energy_)
306       if (.false.) then
307       call hairpin(.true.,nharp,iharp)
308       call secondary2(.true.)
309       endif
310       if (minim) then
311 !rc overlap test
312         print *,"overlap",searchsc,overlapsc
313         if (overlapsc) then 
314           print *, 'Calling OVERLAP_SC'
315           call overlap_sc(fail)
316         endif 
317
318         if (searchsc) then 
319           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
320           print *,'SC_move',nft_sc,etot
321           write(iout,*) 'SC_move',nft_sc,etot
322         endif 
323
324         if (dccart) then
325           print *, 'Calling MINIM_DC'
326 #ifdef MPI
327           time1=MPI_WTIME()
328 #endif
329 !    call check_ecartint !el
330           call minim_dc(etot,iretcode,nfun)
331 !    call check_ecartint !el
332         else 
333           if (indpdb.ne.0) then 
334             call bond_regular
335             write(iout,*) 'Warning: Calling chainbuild'
336             call chainbuild
337           endif
338           call geom_to_var(nvar,varia)
339           print *,'Calling MINIMIZE.'
340 #ifdef MPI
341           time1=MPI_WTIME()
342 #endif
343 !    call check_eint
344 !        call exec_checkgrad   !el
345           call minimize(etot,varia,iretcode,nfun)
346 !     call check_eint
347 !        call exec_checkgrad   !el
348         endif
349 !        print *,'SUMSL return code is',iretcode,' eval ',nfun
350 #ifdef LBFGS
351         print *,'LBFGS return code is',status,' eval ',nfun
352 #else
353         print *,'SUMSL return code is',iretcode,' eval ',nfun
354 #endif
355
356 #ifdef MPI
357         evals=nfun/(MPI_WTIME()-time1)
358 #endif
359         print *,'# eval/s',evals
360         print *,'refstr=',refstr
361 !        call hairpin(.true.,nharp,iharp)
362 !        call secondary2(.true.)
363         call etotal(energy_)
364         etot = energy_(0)
365         call enerprint(energy_)
366
367         call intout
368         call briefout(0,etot)
369         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
370           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
371           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
372           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
373       else
374         print *,'refstr=',refstr,frac,frac_nn,co
375         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
376         print *,"after rms_nac_ncc"
377         call briefout(0,etot)
378       endif
379       if (outpdb) call pdbout(etot,titel(:32),ipdb)
380       if (outmol2) call mol2out(etot,titel(:32))
381       write(iout,*) "after exec_eeval_or_minim"
382       return
383       end subroutine exec_eeval_or_minim
384 !-----------------------------------------------------------------------------
385       subroutine exec_regularize
386 !      use MPI_data             !include 'COMMON.SETUP'
387       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
388       use io_units      !include 'COMMON.IOUNITS'
389       use names
390       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
391       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
392 !     use REMD          !include 'COMMON.REMD'
393 !     use MD            !include 'COMMON.MD'
394
395       use regularize_
396       use compare
397       implicit none
398 !      implicit real*8 (a-h,o-z)
399 !      include 'DIMENSIONS'
400 #ifdef MPI
401       include 'mpif.h'
402 #endif
403       real(kind=8) :: energy_(0:n_ene)
404       real(kind=8) :: etot
405       real(kind=8) :: rms,frac,frac_nn,co
406       integer :: iretcode
407
408       call alloc_compare_arrays
409       call gen_dist_constr
410       call sc_conf
411       call intout
412       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
413       call etotal(energy_)
414       energy_(0)=energy_(0)-energy_(14)
415       etot=energy_(0)
416       call enerprint(energy_)
417       call intout
418       call briefout(0,etot)
419       if (outpdb) call pdbout(etot,titel(:32),ipdb)
420       if (outmol2) call mol2out(etot,titel(:32))
421       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
422 #ifdef LBFGS
423       write (iout,'(a,a9)') 'LBFGS return code:',status
424 #else
425       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
426 #endif
427       return
428       end subroutine exec_regularize
429 !-----------------------------------------------------------------------------
430       subroutine exec_thread
431 !      use MPI_data             !include 'COMMON.SETUP'
432       use compare
433       implicit none
434 !      include 'DIMENSIONS'
435 #ifdef MP
436       include "mpif.h"
437 #endif
438       call alloc_compare_arrays
439       call thread_seq
440       return
441       end subroutine exec_thread
442 !-----------------------------------------------------------------------------
443       subroutine exec_MC
444 !      use MPI_data             !include 'COMMON.SETUP'
445       use control_data  !include 'COMMON.CONTROL'
446       use geometry_data
447       use energy_data
448       use mcm_md
449       implicit none
450 !      implicit real*8 (a-h,o-z)
451 !      include 'DIMENSIONS'
452       character(len=10) :: nodeinfo
453       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
454       integer :: ipar
455 #ifdef MPI
456       include "mpif.h"
457 #endif
458       call alloc_MCM_arrays
459       call mcm_setup
460       if (minim) then
461 #ifdef MPI
462         if (modecalc.eq.3) then
463           call do_mcm(ipar)
464         else
465           call entmcm
466         endif
467 #else
468         if (modecalc.eq.3) then
469           call do_mcm(ipar)
470         else
471           call entmcm
472         endif
473 #endif
474       else
475         call monte_carlo
476       endif
477       return
478       end subroutine exec_MC
479 !-----------------------------------------------------------------------------
480       subroutine exec_mult_eeval_or_minim
481       use MPI_data              !include 'COMMON.SETUP'
482       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
483       use io_units      !include 'COMMON.IOUNITS'
484       use names
485       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
486       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
487 !      use REMD         !include 'COMMON.REMD'
488 !      use MD           !include 'COMMON.MD'
489       use io_base
490       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
491       use energy, only:etotal,enerprint
492       use compare, only:rms_nac_nnc
493       use minimm, only:minimize!,minim_mcmf
494 !      implicit real*8 (a-h,o-z)
495 !      include 'DIMENSIONS'
496 #ifdef MPI
497       use minimm, only:minim_mcmf
498       implicit none
499       include 'mpif.h'
500       integer :: ierror,ierr
501       real(kind=8) :: man
502       real(kind=8),dimension(mpi_status_size) :: muster
503 #else
504       implicit none
505 #endif
506       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
507       integer,dimension(6) :: ind
508       real(kind=8) :: energy_(0:n_ene)
509       logical :: eof
510       real(kind=8) :: etot,ene0
511       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,it,&
512          nf_mcmf,j
513 #ifndef LBFGS
514       integer :: nfun
515 #endif 
516       real(kind=8) :: rms,frac,frac_nn,co,time,ene
517
518       eof=.false.
519 #ifdef MPI
520       if(me.ne.king) then
521         call minim_mcmf
522         return
523       endif
524
525       close (intin)
526       open(intin,file=intinname,status='old')
527       write (istat,'(a5,20a12)')"#    ",&
528         (wname(print_order(i)),i=1,nprint_ene)
529       if (refstr) then
530         write (istat,'(a5,20a12)')"#    ",&
531          (ename(print_order(i)),i=1,nprint_ene),&
532          "ETOT total","RMSD","nat.contact","nnt.contact"        
533       else
534         write (istat,'(a5,20a12)')"#    ",&
535           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
536       endif
537
538       if (.not.minim) then
539         do while (.not. eof)
540           if (read_cart) then
541             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
542             call read_x(intin,*11)
543 #ifdef MPI
544 ! Broadcast the order to compute internal coordinates to the slaves.
545             if (nfgtasks.gt.1) &
546               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
547 #endif
548             call int_from_cart1(.false.)
549           else
550             read (intin,'(i5)',end=1100,err=1100) iconf
551             call read_angles(intin,*11)
552             call geom_to_var(nvar,varia)
553             write(iout,*) 'Warning: Calling chainbuild1'
554             call chainbuild
555           endif
556           write (iout,'(a,i7)') 'Conformation #',iconf
557           call etotal(energy_)
558           call briefout(iconf,energy_(0))
559           call enerprint(energy_)
560           etot=energy_(0)
561           if (refstr) then 
562             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
563             write (istat,'(i5,20(f12.3))') iconf,&
564             (energy_(print_order(i)),i=1,nprint_ene),etot,&
565              rms,frac,frac_nn,co
566 !jlee end
567           else
568             write (istat,'(i5,16(f12.3))') iconf,&
569            (energy_(print_order(i)),i=1,nprint_ene),etot
570           endif
571         enddo
572 1100    continue
573         goto 1101
574       endif
575
576       mm=0
577       imm=0
578       nft=0
579       ene0=0.0d0
580       n=0
581       iconf=0
582 !      do n=1,nzsc
583       do while (.not. eof)
584         mm=mm+1
585         if (mm.lt.nodes) then
586           if (read_cart) then
587             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
588             call read_x(intin,*11)
589 #ifdef MPI
590 ! Broadcast the order to compute internal coordinates to the slaves.
591             if (nfgtasks.gt.1) &
592               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
593 #endif
594             call int_from_cart1(.false.)
595           else
596             read (intin,'(i5)',end=11,err=11) iconf
597             call read_angles(intin,*11)
598             call geom_to_var(nvar,varia)
599             write(iout,*) 'Warning: Calling chainbuild2'
600             call chainbuild
601           endif
602           write (iout,'(a,i7)') 'Conformation #',iconf
603           n=n+1
604          imm=imm+1
605          ind(1)=1
606          ind(2)=n
607          ind(3)=0
608          ind(4)=0
609          ind(5)=0
610          ind(6)=0
611          ene0=0.0d0
612          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
613                         ierr)
614          call mpi_send(varia,nvar,mpi_double_precision,mm,&
615                         idreal,CG_COMM,ierr)
616          call mpi_send(ene0,1,mpi_double_precision,mm,&
617                         idreal,CG_COMM,ierr)
618 !         print *,'task ',n,' sent to worker ',mm,nvar
619         else
620          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
621                        CG_COMM,muster,ierr)
622          man=muster(mpi_source)
623 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
624          call mpi_recv(varia,nvar,mpi_double_precision,&
625                      man,idreal,CG_COMM,muster,ierr)
626          call mpi_recv(ene,1,&
627                      mpi_double_precision,man,idreal,&
628                      CG_COMM,muster,ierr)
629          call mpi_recv(ene0,1,&
630                      mpi_double_precision,man,idreal,&
631                      CG_COMM,muster,ierr)
632 !         print *,'result received from worker ',man,' sending now'
633
634           call var_to_geom(nvar,varia)
635           write(iout,*) 'Warning: Calling chainbuild3'
636           call chainbuild
637           call etotal(energy_)
638           iconf=ind(2)
639           write (iout,*)
640           write (iout,*)
641           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
642
643           etot=energy_(0)
644           call enerprint(energy_)
645           call briefout(it,etot)
646 !          if (minim) call briefout(it,etot)
647           if (refstr) then 
648             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
649             write (istat,'(i5,19(f12.3))') iconf,&
650            (energy_(print_order(i)),i=1,nprint_ene),etot,&
651            rms,frac,frac_nn,co
652           else
653             write (istat,'(i5,15(f12.3))') iconf,&
654            (energy_(print_order(i)),i=1,nprint_ene),etot
655           endif
656
657           imm=imm-1
658           if (read_cart) then
659             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
660             call read_x(intin,*11)
661 #ifdef MPI
662 ! Broadcast the order to compute internal coordinates to the slaves.
663             if (nfgtasks.gt.1) &
664               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
665 #endif
666             call int_from_cart1(.false.)
667           else
668             read (intin,'(i5)',end=1101,err=1101) iconf
669             call read_angles(intin,*11)
670             call geom_to_var(nvar,varia)
671             write(iout,*) 'Warning: Calling chainbuild4'
672             call chainbuild
673           endif
674           n=n+1
675           imm=imm+1
676           ind(1)=1
677           ind(2)=n
678           ind(3)=0
679           ind(4)=0
680           ind(5)=0
681           ind(6)=0
682           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
683                         ierr)
684           call mpi_send(varia,nvar,mpi_double_precision,man,&
685                         idreal,CG_COMM,ierr)
686           call mpi_send(ene0,1,mpi_double_precision,man,&
687                         idreal,CG_COMM,ierr)
688           nf_mcmf=nf_mcmf+ind(4)
689           nmin=nmin+1
690         endif
691       enddo
692 11    continue
693       do j=1,imm
694         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
695                      CG_COMM,muster,ierr)
696         man=muster(mpi_source)
697         call mpi_recv(varia,nvar,mpi_double_precision,&
698                      man,idreal,CG_COMM,muster,ierr)
699         call mpi_recv(ene,1,&
700                      mpi_double_precision,man,idreal,&
701                      CG_COMM,muster,ierr)
702         call mpi_recv(ene0,1,&
703                      mpi_double_precision,man,idreal,&
704                      CG_COMM,muster,ierr)
705
706         call var_to_geom(nvar,varia)
707         write(iout,*) 'Warning: Calling chainbuild5'
708         call chainbuild
709         call etotal(energy_)
710         iconf=ind(2)
711         write (iout,*)
712         write (iout,*)
713         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
714
715         etot=energy_(0)
716         call enerprint(energy_)
717         call briefout(it,etot)
718         if (refstr) then 
719           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
720           write (istat,'(i5,19(f12.3))') iconf,&
721          (energy_(print_order(i)),i=1,nprint_ene),etot,&
722          rms,frac,frac_nn,co
723         else
724           write (istat,'(i5,15(f12.3))') iconf,&
725           (energy_(print_order(i)),i=1,nprint_ene),etot
726         endif
727         nmin=nmin+1
728       enddo
729 1101  continue
730       do i=1, nodes-1
731          ind(1)=0
732          ind(2)=0
733          ind(3)=0
734          ind(4)=0
735          ind(5)=0
736          ind(6)=0
737          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
738                         ierr)
739       enddo
740 #else
741       close (intin)
742       open(intin,file=intinname,status='old')
743       write (istat,'(a5,20a12)')"#    ",&
744          (wname(print_order(i)),i=1,nprint_ene)
745       write (istat,'("#    ",20(1pe12.4))') &
746          (weights(print_order(i)),i=1,nprint_ene)
747       if (refstr) then
748         write (istat,'(a5,20a12)')"#    ",&
749          (ename(print_order(i)),i=1,nprint_ene),&
750          "ETOT total","RMSD","nat.contact","nnt.contact"
751       else
752         write (istat,'(a5,14a12)')"#    ",&
753          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
754       endif
755       do while (.not. eof)
756           if (read_cart) then
757             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
758             call read_x(intin,*11)
759 #ifdef MPI
760 ! Broadcast the order to compute internal coordinates to the slaves.
761             if (nfgtasks.gt.1) &
762               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
763 #endif
764             call int_from_cart1(.false.)
765           else
766             read (intin,'(i5)',end=11,err=11) iconf
767             call read_angles(intin,*11)
768             call geom_to_var(nvar,varia)
769             write(iout,*) 'Warning: Calling chainbuild5'
770             call chainbuild
771           endif
772         write (iout,'(a,i7)') 'Conformation #',iconf
773         if (minim) call minimize(etot,varia,iretcode,nfun)
774         call etotal(energy_)
775
776         etot=energy_(0)
777         call enerprint(energy_)
778         if (minim) call briefout(it,etot) 
779         if (refstr) then 
780           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
781           write (istat,'(i5,18(f12.3))') iconf,&
782          (energy_(print_order(i)),i=1,nprint_ene),&
783          etot,rms,frac,frac_nn,co
784 !jlee end
785         else
786           write (istat,'(i5,14(f12.3))') iconf,&
787          (energy_(print_order(i)),i=1,nprint_ene),etot
788         endif
789       enddo
790    11 continue
791 #endif
792       return
793       end subroutine exec_mult_eeval_or_minim
794 !-----------------------------------------------------------------------------
795       subroutine exec_checkgrad
796 !      use MPI_data             !include 'COMMON.SETUP'
797       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
798       use io_units      !include 'COMMON.IOUNITS'
799 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
800       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
801 !      use REMD         !include 'COMMON.REMD'
802       use MD_data               !include 'COMMON.MD'
803       use io_base, only:intout
804       use io_config, only:read_fragments
805       use geometry
806       use energy
807       use comm_srutu
808       implicit none
809 !      implicit real*8 (a-h,o-z)
810 !      include 'DIMENSIONS'
811 #ifdef MPI
812       include 'mpif.h'
813 #endif
814 !el      integer :: icall
815 !el      common /srutu/ icall
816       real(kind=8) :: energy_(0:max_ene)
817       real(kind=8) :: etot
818       integer :: i
819 !      do i=2,nres
820 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
821 !        if (itype(i).ne.10) 
822 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
823 !      enddo
824       if (indpdb.eq.0) then 
825         write(iout,*) 'Warning: Calling chainbuild'
826         call chainbuild
827       endif
828 !      do i=0,nres
829 !        do j=1,3
830 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
831 !        enddo
832 !      enddo
833 !      do i=1,nres-1
834 !        if (itype(i).ne.10) then
835 !          do j=1,3
836 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
837 !          enddo
838 !        endif
839 !      enddo
840 !      do j=1,3
841 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
842 !      enddo
843 #ifdef UMB
844       usampl=.true.
845 #endif
846       totT=1.d0
847       eq_time=0.0d0
848       call read_fragments
849       call chainbuild_cart
850       call cartprint
851       call intout
852       icall=1
853       write (iout,*) "before etotal"
854       call etotal(energy_(0))
855       etot = energy_(0)
856       call enerprint(energy_(0))
857       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
858       print *,'icheckgrad=',icheckgrad
859       goto (10,20,30) icheckgrad
860   10  call check_ecartint
861       call check_ecartint
862       return
863   20  call check_cartgrad
864       call check_cartgrad
865       return
866   30  call check_eint
867       return
868       end subroutine exec_checkgrad
869 !-----------------------------------------------------------------------------
870       subroutine exec_map
871 !      use map_data
872       use map_
873       use io_config, only:map_read
874       implicit none
875 ! Energy maps
876       call alloc_map_arrays
877       call map_read
878       call map
879       return
880       end subroutine exec_map
881 !-----------------------------------------------------------------------------
882       subroutine exec_CSA
883
884       use io_units      !include 'COMMON.IOUNITS'
885       use CSA
886       implicit none
887 #ifdef MPI
888       include "mpif.h"
889 #endif
890 !      include 'DIMENSIONS'
891 ! Conformational Space Annealling programmed by Jooyoung Lee.
892 ! This method works only with parallel machines!
893 #ifdef MPI
894       call alloc_CSA_arrays
895       call together
896 #else
897       write (iout,*) "CSA works on parallel machines only"
898 #endif
899       return
900       end subroutine exec_CSA
901 !-----------------------------------------------------------------------------
902       subroutine exec_softreg
903       use io_units      !include 'COMMON.IOUNITS'
904       use control_data  !include 'COMMON.CONTROL'
905       use energy_data
906       use io_base, only:intout,briefout
907       use geometry, only:chainbuild
908       use energy
909       use compare
910       implicit none
911 !      include 'DIMENSIONS'
912       real(kind=8) :: energy_(0:n_ene)
913 !el local variables
914       real(kind=8) :: rms,frac,frac_nn,co,etot
915       logical :: debug
916
917       call alloc_compare_arrays
918       write(iout,*) 'Warning: Calling chainbuild'
919       call chainbuild
920       call etotal(energy_)
921       call enerprint(energy_)
922       if (.not.lsecondary) then
923         write(iout,*) 'Calling secondary structure recognition'
924         call secondary2(debug)
925       else
926         write(iout,*) 'Using secondary structure supplied in pdb'
927       endif
928
929       call softreg
930
931       call etotal(energy_)
932       etot=energy_(0)
933       call enerprint(energy_)
934       call intout
935       call briefout(0,etot)
936       call secondary2(.true.)
937       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
938       return
939       end subroutine exec_softreg
940 !-----------------------------------------------------------------------------
941 ! minimize_p.F
942 !-----------------------------------------------------------------------------
943 !el#ifdef MPI
944       subroutine ergastulum
945
946 !      implicit real*8 (a-h,o-z)
947 !      include 'DIMENSIONS'
948       use MD_data
949       use energy
950       use MDyn, only:setup_fricmat
951 #ifndef FIVEDIAG
952       use REMD, only:fricmat_mult,ginv_mult
953 #endif
954 #ifdef MPI
955       include "mpif.h"
956 #endif
957 !      include 'COMMON.SETUP'
958 !      include 'COMMON.DERIV'
959 !      include 'COMMON.VAR'
960 !      include 'COMMON.IOUNITS'
961 !      include 'COMMON.FFIELD'
962 !      include 'COMMON.INTERACT'
963 !      include 'COMMON.MD'
964 !      include 'COMMON.TIME1'
965       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
966       real(kind=8) :: edum(0:n_ene),time_order(0:10)
967 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
968 !el      common /przechowalnia/ Gcopy
969       integer :: icall = 0
970
971 !el  local variables
972       real(kind=8) :: time00
973       integer :: iorder,i,j,nres2,ierr,ierror
974       nres2=2*nres
975 #ifndef FIVEDIAG
976       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
977 ! common.MD
978       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
979 #endif
980 !      common /mdpmpi/
981       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
982       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
983       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
984       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
985 #ifndef FIVEDIAG
986       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
987 #endif
988 ! Workers wait for variables and NF, and NFL from the boss
989       iorder=0
990       do while (iorder.ge.0)
991 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
992 !     & ' receives order from Master'
993         time00=MPI_Wtime()
994         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
995         time_Bcast=time_Bcast+MPI_Wtime()-time00
996         if (icall.gt.4 .and. iorder.ge.0) &
997          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
998        icall=icall+1
999 !      write (*,*)
1000 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
1001         if (iorder.eq.0) then
1002           call zerograd
1003           call etotal(edum)
1004 !          write (2,*) "After etotal"
1005 !          write (2,*) "dimen",dimen," dimen3",dimen3
1006 !          call flush(2)
1007         else if (iorder.eq.2) then
1008           call zerograd
1009           call etotal_short(edum)
1010 !          write (2,*) "After etotal_short"
1011 !          write (2,*) "dimen",dimen," dimen3",dimen3
1012 !          call flush(2)
1013         else if (iorder.eq.3) then
1014           call zerograd
1015           call etotal_long(edum)
1016 !          write (2,*) "After etotal_long"
1017 !          write (2,*) "dimen",dimen," dimen3",dimen3
1018 !          call flush(2)
1019         else if (iorder.eq.1) then
1020           call sum_gradient
1021 !          write (2,*) "After sum_gradient"
1022 !          write (2,*) "dimen",dimen," dimen3",dimen3
1023 !          call flush(2)
1024 #ifndef FIVEDIAG
1025         else if (iorder.eq.4) then
1026           call ginv_mult(z,d_a_tmp)
1027         else if (iorder.eq.5) then
1028 ! Setup MD things for a slave
1029           dimen=(nct-nnt+1)+nside
1030           dimen1=(nct-nnt)+(nct-nnt+1)
1031           dimen3=dimen*3
1032 !          write (2,*) "dimen",dimen," dimen3",dimen3
1033 !          call flush(2)
1034           call int_bounds(dimen,igmult_start,igmult_end)
1035           igmult_start=igmult_start-1
1036           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1037            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1038            my_ng_count=igmult_end-igmult_start
1039           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1040            MPI_INTEGER,FG_COMM,IERROR)
1041           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1042 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1043           myginv_ng_count=nres2*my_ng_count !el maxres2
1044 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1045 !     &     igmult_end," my_ng_count",my_ng_count
1046 !          call flush(2)
1047           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1048            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1049           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1050            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1051 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1052 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1053 !          call flush(2)
1054 !          call MPI_Barrier(FG_COMM,IERROR)
1055           time00=MPI_Wtime()
1056           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1057            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1058            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1059 #ifdef TIMING
1060           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1061 #endif
1062           do i=1,dimen
1063             do j=1,2*my_ng_count
1064               ginv(j,i)=gcopy(i,j)
1065             enddo
1066           enddo
1067 !          write (2,*) "dimen",dimen," dimen3",dimen3
1068 !          write (2,*) "End MD setup"
1069 !          call flush(2)
1070 !           write (iout,*) "My chunk of ginv_block"
1071 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1072 #endif
1073         else if (iorder.eq.6) then
1074           call int_from_cart1(.false.)
1075         else if (iorder.eq.7) then
1076           call chainbuild_cart
1077         else if (iorder.eq.8) then
1078           call intcartderiv
1079 #ifndef FIVEDIAG
1080         else if (iorder.eq.9) then
1081           call fricmat_mult(z,d_a_tmp)
1082 #endif
1083         else if (iorder.eq.10) then
1084           call setup_fricmat
1085         endif
1086       enddo
1087       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1088         ' absolute rank',myrank,' leves ERGASTULUM.'
1089       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1090        (' order[',i,']',time_order(i),i=0,10)
1091       return
1092       end subroutine ergastulum