Merge branch 'UCGM' of mmka.chem.univ.gda.pl:unres4 into UCGM
[unres4.git] / source / unres / unres.F90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80       write (iout,*) "After readrtns"
81       call cartprint
82 !
83       if (me.eq.king .or. .not. out1file) then
84        write (iout,'(2a/)') &
85        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
86        ' calculation.' 
87        if (minim) write (iout,'(a)') &
88        'Conformations will be energy-minimized.'
89        write (iout,'(80(1h*)/)') 
90       endif
91       call flush(iout)
92 !
93       if (modecalc.eq.-2) then
94         call test
95         stop
96       else if (modecalc.eq.-1) then
97         write(iout,*) "call check_sc_map next"
98         call check_bond
99         stop
100       endif
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
102
103 #ifdef MPI
104       if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106         call ergastulum ! slave workhouse in Latin
107       else
108 #endif
109       if (modecalc.eq.0) then
110         call exec_eeval_or_minim
111       else if (modecalc.eq.1) then
112         call exec_regularize
113       else if (modecalc.eq.2) then
114         call exec_thread
115       else if (modecalc.eq.3 .or. modecalc .eq.6) then
116         call exec_MC
117       else if (modecalc.eq.4) then
118         call exec_mult_eeval_or_minim
119       else if (modecalc.eq.5) then
120         call exec_checkgrad
121       else if (ModeCalc.eq.7) then
122         call exec_map
123       else if (ModeCalc.eq.8) then
124         call exec_CSA
125       else if (modecalc.eq.11) then
126         call exec_softreg
127       else if (modecalc.eq.12) then
128         call exec_MD
129         call exec_checkgrad
130       else if (modecalc.eq.14) then
131         call exec_MREMD
132       else
133         write (iout,'(a)') 'This calculation type is not supported',&
134          ModeCalc
135       endif
136
137 #ifdef MPI
138       endif
139 ! Finish task.
140       if (fg_rank.eq.0) call finish_task
141 !      call memmon_print_usage()
142 #ifdef TIMING
143        call print_detailed_timing
144 #endif
145       call MPI_Finalize(ierr)
146       stop 'Bye Bye...'
147 #else
148       call dajczas(tcpu(),hrtime,mintime,sectime)
149       stop '********** Program terminated normally.'
150 #endif
151
152       end program !UNRES
153 !-----------------------------------------------------------------------------
154 !
155 !-----------------------------------------------------------------------------
156       subroutine exec_MD
157       use MPI_data      !include 'COMMON.SETUP'
158       use control_data  !include 'COMMON.CONTROL'
159       use geometry, only:chainbuild,chainbuild_cart
160       use MDyn
161       use io_units      !include 'COMMON.IOUNITS'
162       use compare, only:alloc_compare_arrays
163 !      use io_common
164       implicit none
165 !      include 'DIMENSIONS'
166 #ifdef MPI
167       include "mpif.h"
168 #endif
169       print *,'Start MD'
170       call alloc_MD_arrays
171       call alloc_compare_arrays
172       print *,'After MD alloc'
173       if (me.eq.king .or. .not. out1file) &
174          write (iout,*) "Calling chainbuild"
175       if (extconf) then
176       call chainbuild
177       else
178       call chainbuild_cart
179       endif
180       call MD
181       return
182       end subroutine exec_MD
183 !---------------------------------------------------------------------------
184       subroutine exec_MREMD
185       use MPI_data      !include 'COMMON.SETUP'
186       use control_data  !include 'COMMON.CONTROL'
187       use io_units      !include 'COMMON.IOUNITS'
188 !      use io_common
189       use REMD_data     !include 'COMMON.REMD'
190       use geometry, only:chainbuild
191       use MREMDyn
192       use compare, only:alloc_compare_arrays
193
194       implicit none
195 !      include 'DIMENSIONS'
196 #ifdef MPI
197       include "mpif.h"
198 #endif
199
200       integer :: i
201       call alloc_MD_arrays
202       call alloc_MREMD_arrays
203       call alloc_compare_arrays
204 !     if (me.eq.king .or. .not. out1file) &
205 !         write (iout,*) "Calling chainbuild"
206 !      call chainbuild
207       if (me.eq.king .or. .not. out1file) &
208          write (iout,*) "Calling REMD",remd_mlist,nrep
209       if (remd_mlist) then 
210         call MREMD
211       else
212         do i=1,nrep
213           remd_m(i)=1
214         enddo
215         call MREMD
216       endif
217       return
218       end subroutine exec_MREMD
219 !-----------------------------------------------------------------------------
220       subroutine exec_eeval_or_minim
221       use MPI_data              !include 'COMMON.SETUP'
222       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
223       use io_units      !include 'COMMON.IOUNITS'
224       use names
225 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
226       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
227 !      use REMD         !include 'COMMON.REMD'
228 !      use MD           !include 'COMMON.MD'
229
230       use energy_data
231       use MD_data, only: iset
232       use io_base
233       use geometry, only:chainbuild
234       use energy
235       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
236       use minimm, only:minimize,minim_dc,sc_move
237       use compare_data !el
238       use comm_srutu
239       implicit none
240 !      implicit real*8 (a-h,o-z)
241 !      include 'DIMENSIONS'
242 #ifdef MPI
243       include 'mpif.h'
244 #endif
245       integer :: i
246 !el      common /srutu/ icall
247       real(kind=8) :: energy_(0:n_ene)
248       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
249       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
250       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
251       integer :: nharp,nft_sc,iretcode,nfun
252       integer,dimension(4,nres) :: iharp        !(4,nres/3)(4,maxres/3)
253       logical :: fail
254       real(kind=8) :: rms,frac,frac_nn,co
255
256       integer :: j,k
257        if (iset.eq.0) iset=1
258       call alloc_compare_arrays
259       if ((indpdb.eq.0).and.(.not.read_cart)) then 
260       call chainbuild
261       write(iout,*) 'Warning: Calling chainbuild'
262       endif
263 #ifdef MPI
264       time00=MPI_Wtime()
265 #endif
266       call chainbuild_cart
267 !      write(iout,*)"in exec_eeval or minimim",split_ene
268 !      do j=1,2*nres+2
269 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
270 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
271 !      enddo
272       if (split_ene) then
273 !      write(iout,*)"in exec_eeval or minimim"
274
275        print *,"Processor",myrank," after chainbuild"
276        icall=1
277
278        call etotal_long(energy_long)
279        write (iout,*) "Printing long range energy"
280        call enerprint(energy_long)
281
282        call etotal_short(energy_short)
283        write (iout,*) "Printing short range energy"
284        call enerprint(energy_short)
285        do i=0,n_ene
286          energy_(i)=energy_long(i)+energy_short(i)
287          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
288        enddo
289        write (iout,*) "Printing long+short range energy"
290        call enerprint(energy_)
291       endif
292
293       call etotal(energy_)
294 #ifdef MPI
295       time_ene=MPI_Wtime()-time00
296 #endif
297       write (iout,*) "Time for energy evaluation",time_ene
298       print *,"after etotal"
299       etota = energy_(0)
300       etot = etota
301       call enerprint(energy_)
302       call hairpin(.true.,nharp,iharp)
303       call secondary2(.true.)
304       if (minim) then
305 !rc overlap test
306         if (overlapsc) then 
307           print *, 'Calling OVERLAP_SC'
308           call overlap_sc(fail)
309         endif 
310
311         if (searchsc) then 
312           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
313           print *,'SC_move',nft_sc,etot
314           write(iout,*) 'SC_move',nft_sc,etot
315         endif 
316
317         if (dccart) then
318           print *, 'Calling MINIM_DC'
319 #ifdef MPI
320           time1=MPI_WTIME()
321 #endif
322 !    call check_ecartint !el
323           call minim_dc(etot,iretcode,nfun)
324 !    call check_ecartint !el
325         else 
326           if (indpdb.ne.0) then 
327             call bond_regular
328             write(iout,*) 'Warning: Calling chainbuild'
329             call chainbuild
330           endif
331           call geom_to_var(nvar,varia)
332           print *,'Calling MINIMIZE.'
333 #ifdef MPI
334           time1=MPI_WTIME()
335 #endif
336 !    call check_eint
337 !        call exec_checkgrad   !el
338           call minimize(etot,varia,iretcode,nfun)
339 !     call check_eint
340 !        call exec_checkgrad   !el
341         endif
342         print *,'SUMSL return code is',iretcode,' eval ',nfun
343 #ifdef MPI
344         evals=nfun/(MPI_WTIME()-time1)
345 #endif
346         print *,'# eval/s',evals
347         print *,'refstr=',refstr
348 !        call hairpin(.true.,nharp,iharp)
349 !        call secondary2(.true.)
350         call etotal(energy_)
351         etot = energy_(0)
352         call enerprint(energy_)
353
354         call intout
355         call briefout(0,etot)
356         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
357           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
358           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
359           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
360       else
361         print *,'refstr=',refstr,frac,frac_nn,co
362         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
363         print *,"after rms_nac_ncc"
364         call briefout(0,etot)
365       endif
366       if (outpdb) call pdbout(etot,titel(:32),ipdb)
367       if (outmol2) call mol2out(etot,titel(:32))
368       write(iout,*) "after exec_eeval_or_minim"
369       return
370       end subroutine exec_eeval_or_minim
371 !-----------------------------------------------------------------------------
372       subroutine exec_regularize
373 !      use MPI_data             !include 'COMMON.SETUP'
374       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
375       use io_units      !include 'COMMON.IOUNITS'
376       use names
377       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
378       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
379 !     use REMD          !include 'COMMON.REMD'
380 !     use MD            !include 'COMMON.MD'
381       use regularize_
382       use compare
383       implicit none
384 !      implicit real*8 (a-h,o-z)
385 !      include 'DIMENSIONS'
386 #ifdef MPI
387       include 'mpif.h'
388 #endif
389       real(kind=8) :: energy_(0:n_ene)
390       real(kind=8) :: etot
391       real(kind=8) :: rms,frac,frac_nn,co
392       integer :: iretcode
393
394       call alloc_compare_arrays
395       call gen_dist_constr
396       call sc_conf
397       call intout
398       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
399       call etotal(energy_)
400       energy_(0)=energy_(0)-energy_(14)
401       etot=energy_(0)
402       call enerprint(energy_)
403       call intout
404       call briefout(0,etot)
405       if (outpdb) call pdbout(etot,titel(:32),ipdb)
406       if (outmol2) call mol2out(etot,titel(:32))
407       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
408       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
409       return
410       end subroutine exec_regularize
411 !-----------------------------------------------------------------------------
412       subroutine exec_thread
413 !      use MPI_data             !include 'COMMON.SETUP'
414       use compare
415       implicit none
416 !      include 'DIMENSIONS'
417 #ifdef MP
418       include "mpif.h"
419 #endif
420       call alloc_compare_arrays
421       call thread_seq
422       return
423       end subroutine exec_thread
424 !-----------------------------------------------------------------------------
425       subroutine exec_MC
426 !      use MPI_data             !include 'COMMON.SETUP'
427       use control_data  !include 'COMMON.CONTROL'
428       use geometry_data
429       use energy_data
430       use mcm_md
431       implicit none
432 !      implicit real*8 (a-h,o-z)
433 !      include 'DIMENSIONS'
434       character(len=10) :: nodeinfo
435       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
436       integer :: ipar
437 #ifdef MPI
438       include "mpif.h"
439 #endif
440       call alloc_MCM_arrays
441       call mcm_setup
442       if (minim) then
443 #ifdef MPI
444         if (modecalc.eq.3) then
445           call do_mcm(ipar)
446         else
447           call entmcm
448         endif
449 #else
450         if (modecalc.eq.3) then
451           call do_mcm(ipar)
452         else
453           call entmcm
454         endif
455 #endif
456       else
457         call monte_carlo
458       endif
459       return
460       end subroutine exec_MC
461 !-----------------------------------------------------------------------------
462       subroutine exec_mult_eeval_or_minim
463       use MPI_data              !include 'COMMON.SETUP'
464       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
465       use io_units      !include 'COMMON.IOUNITS'
466       use names
467       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
468       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
469 !      use REMD         !include 'COMMON.REMD'
470 !      use MD           !include 'COMMON.MD'
471       use io_base
472       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
473       use energy, only:etotal,enerprint
474       use compare, only:rms_nac_nnc
475       use minimm, only:minimize!,minim_mcmf
476 !      implicit real*8 (a-h,o-z)
477 !      include 'DIMENSIONS'
478 #ifdef MPI
479       use minimm, only:minim_mcmf
480       implicit none
481       include 'mpif.h'
482       integer :: ierror,ierr
483       real(kind=8) :: man
484       real(kind=8),dimension(mpi_status_size) :: muster
485 #else
486       implicit none
487 #endif
488       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
489       integer,dimension(6) :: ind
490       real(kind=8) :: energy_(0:n_ene)
491       logical :: eof
492       real(kind=8) :: etot,ene0
493       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
494          nf_mcmf,j
495       real(kind=8) :: rms,frac,frac_nn,co,time,ene
496
497       eof=.false.
498 #ifdef MPI
499       if(me.ne.king) then
500         call minim_mcmf
501         return
502       endif
503
504       close (intin)
505       open(intin,file=intinname,status='old')
506       write (istat,'(a5,20a12)')"#    ",&
507         (wname(print_order(i)),i=1,nprint_ene)
508       if (refstr) then
509         write (istat,'(a5,20a12)')"#    ",&
510          (ename(print_order(i)),i=1,nprint_ene),&
511          "ETOT total","RMSD","nat.contact","nnt.contact"        
512       else
513         write (istat,'(a5,20a12)')"#    ",&
514           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
515       endif
516
517       if (.not.minim) then
518         do while (.not. eof)
519           if (read_cart) then
520             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
521             call read_x(intin,*11)
522 #ifdef MPI
523 ! Broadcast the order to compute internal coordinates to the slaves.
524             if (nfgtasks.gt.1) &
525               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
526 #endif
527             call int_from_cart1(.false.)
528           else
529             read (intin,'(i5)',end=1100,err=1100) iconf
530             call read_angles(intin,*11)
531             call geom_to_var(nvar,varia)
532             write(iout,*) 'Warning: Calling chainbuild1'
533             call chainbuild
534           endif
535           write (iout,'(a,i7)') 'Conformation #',iconf
536           call etotal(energy_)
537           call briefout(iconf,energy_(0))
538           call enerprint(energy_)
539           etot=energy_(0)
540           if (refstr) then 
541             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
542             write (istat,'(i5,20(f12.3))') iconf,&
543             (energy_(print_order(i)),i=1,nprint_ene),etot,&
544              rms,frac,frac_nn,co
545 !jlee end
546           else
547             write (istat,'(i5,16(f12.3))') iconf,&
548            (energy_(print_order(i)),i=1,nprint_ene),etot
549           endif
550         enddo
551 1100    continue
552         goto 1101
553       endif
554
555       mm=0
556       imm=0
557       nft=0
558       ene0=0.0d0
559       n=0
560       iconf=0
561 !      do n=1,nzsc
562       do while (.not. eof)
563         mm=mm+1
564         if (mm.lt.nodes) then
565           if (read_cart) then
566             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
567             call read_x(intin,*11)
568 #ifdef MPI
569 ! Broadcast the order to compute internal coordinates to the slaves.
570             if (nfgtasks.gt.1) &
571               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
572 #endif
573             call int_from_cart1(.false.)
574           else
575             read (intin,'(i5)',end=11,err=11) iconf
576             call read_angles(intin,*11)
577             call geom_to_var(nvar,varia)
578             write(iout,*) 'Warning: Calling chainbuild2'
579             call chainbuild
580           endif
581           write (iout,'(a,i7)') 'Conformation #',iconf
582           n=n+1
583          imm=imm+1
584          ind(1)=1
585          ind(2)=n
586          ind(3)=0
587          ind(4)=0
588          ind(5)=0
589          ind(6)=0
590          ene0=0.0d0
591          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
592                         ierr)
593          call mpi_send(varia,nvar,mpi_double_precision,mm,&
594                         idreal,CG_COMM,ierr)
595          call mpi_send(ene0,1,mpi_double_precision,mm,&
596                         idreal,CG_COMM,ierr)
597 !         print *,'task ',n,' sent to worker ',mm,nvar
598         else
599          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
600                        CG_COMM,muster,ierr)
601          man=muster(mpi_source)
602 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
603          call mpi_recv(varia,nvar,mpi_double_precision,&
604                      man,idreal,CG_COMM,muster,ierr)
605          call mpi_recv(ene,1,&
606                      mpi_double_precision,man,idreal,&
607                      CG_COMM,muster,ierr)
608          call mpi_recv(ene0,1,&
609                      mpi_double_precision,man,idreal,&
610                      CG_COMM,muster,ierr)
611 !         print *,'result received from worker ',man,' sending now'
612
613           call var_to_geom(nvar,varia)
614           write(iout,*) 'Warning: Calling chainbuild3'
615           call chainbuild
616           call etotal(energy_)
617           iconf=ind(2)
618           write (iout,*)
619           write (iout,*)
620           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
621
622           etot=energy_(0)
623           call enerprint(energy_)
624           call briefout(it,etot)
625 !          if (minim) call briefout(it,etot)
626           if (refstr) then 
627             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
628             write (istat,'(i5,19(f12.3))') iconf,&
629            (energy_(print_order(i)),i=1,nprint_ene),etot,&
630            rms,frac,frac_nn,co
631           else
632             write (istat,'(i5,15(f12.3))') iconf,&
633            (energy_(print_order(i)),i=1,nprint_ene),etot
634           endif
635
636           imm=imm-1
637           if (read_cart) then
638             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
639             call read_x(intin,*11)
640 #ifdef MPI
641 ! Broadcast the order to compute internal coordinates to the slaves.
642             if (nfgtasks.gt.1) &
643               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
644 #endif
645             call int_from_cart1(.false.)
646           else
647             read (intin,'(i5)',end=1101,err=1101) iconf
648             call read_angles(intin,*11)
649             call geom_to_var(nvar,varia)
650             write(iout,*) 'Warning: Calling chainbuild4'
651             call chainbuild
652           endif
653           n=n+1
654           imm=imm+1
655           ind(1)=1
656           ind(2)=n
657           ind(3)=0
658           ind(4)=0
659           ind(5)=0
660           ind(6)=0
661           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
662                         ierr)
663           call mpi_send(varia,nvar,mpi_double_precision,man,&
664                         idreal,CG_COMM,ierr)
665           call mpi_send(ene0,1,mpi_double_precision,man,&
666                         idreal,CG_COMM,ierr)
667           nf_mcmf=nf_mcmf+ind(4)
668           nmin=nmin+1
669         endif
670       enddo
671 11    continue
672       do j=1,imm
673         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
674                      CG_COMM,muster,ierr)
675         man=muster(mpi_source)
676         call mpi_recv(varia,nvar,mpi_double_precision,&
677                      man,idreal,CG_COMM,muster,ierr)
678         call mpi_recv(ene,1,&
679                      mpi_double_precision,man,idreal,&
680                      CG_COMM,muster,ierr)
681         call mpi_recv(ene0,1,&
682                      mpi_double_precision,man,idreal,&
683                      CG_COMM,muster,ierr)
684
685         call var_to_geom(nvar,varia)
686         write(iout,*) 'Warning: Calling chainbuild5'
687         call chainbuild
688         call etotal(energy_)
689         iconf=ind(2)
690         write (iout,*)
691         write (iout,*)
692         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
693
694         etot=energy_(0)
695         call enerprint(energy_)
696         call briefout(it,etot)
697         if (refstr) then 
698           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
699           write (istat,'(i5,19(f12.3))') iconf,&
700          (energy_(print_order(i)),i=1,nprint_ene),etot,&
701          rms,frac,frac_nn,co
702         else
703           write (istat,'(i5,15(f12.3))') iconf,&
704           (energy_(print_order(i)),i=1,nprint_ene),etot
705         endif
706         nmin=nmin+1
707       enddo
708 1101  continue
709       do i=1, nodes-1
710          ind(1)=0
711          ind(2)=0
712          ind(3)=0
713          ind(4)=0
714          ind(5)=0
715          ind(6)=0
716          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
717                         ierr)
718       enddo
719 #else
720       close (intin)
721       open(intin,file=intinname,status='old')
722       write (istat,'(a5,20a12)')"#    ",&
723          (wname(print_order(i)),i=1,nprint_ene)
724       write (istat,'("#    ",20(1pe12.4))') &
725          (weights(print_order(i)),i=1,nprint_ene)
726       if (refstr) then
727         write (istat,'(a5,20a12)')"#    ",&
728          (ename(print_order(i)),i=1,nprint_ene),&
729          "ETOT total","RMSD","nat.contact","nnt.contact"
730       else
731         write (istat,'(a5,14a12)')"#    ",&
732          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
733       endif
734       do while (.not. eof)
735           if (read_cart) then
736             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
737             call read_x(intin,*11)
738 #ifdef MPI
739 ! Broadcast the order to compute internal coordinates to the slaves.
740             if (nfgtasks.gt.1) &
741               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
742 #endif
743             call int_from_cart1(.false.)
744           else
745             read (intin,'(i5)',end=11,err=11) iconf
746             call read_angles(intin,*11)
747             call geom_to_var(nvar,varia)
748             write(iout,*) 'Warning: Calling chainbuild5'
749             call chainbuild
750           endif
751         write (iout,'(a,i7)') 'Conformation #',iconf
752         if (minim) call minimize(etot,varia,iretcode,nfun)
753         call etotal(energy_)
754
755         etot=energy_(0)
756         call enerprint(energy_)
757         if (minim) call briefout(it,etot) 
758         if (refstr) then 
759           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
760           write (istat,'(i5,18(f12.3))') iconf,&
761          (energy_(print_order(i)),i=1,nprint_ene),&
762          etot,rms,frac,frac_nn,co
763 !jlee end
764         else
765           write (istat,'(i5,14(f12.3))') iconf,&
766          (energy_(print_order(i)),i=1,nprint_ene),etot
767         endif
768       enddo
769    11 continue
770 #endif
771       return
772       end subroutine exec_mult_eeval_or_minim
773 !-----------------------------------------------------------------------------
774       subroutine exec_checkgrad
775 !      use MPI_data             !include 'COMMON.SETUP'
776       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
777       use io_units      !include 'COMMON.IOUNITS'
778 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
779       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
780 !      use REMD         !include 'COMMON.REMD'
781       use MD_data               !include 'COMMON.MD'
782       use io_base, only:intout
783       use io_config, only:read_fragments
784       use geometry
785       use energy
786       use comm_srutu
787       implicit none
788 !      implicit real*8 (a-h,o-z)
789 !      include 'DIMENSIONS'
790 #ifdef MPI
791       include 'mpif.h'
792 #endif
793 !el      integer :: icall
794 !el      common /srutu/ icall
795       real(kind=8) :: energy_(0:max_ene)
796       real(kind=8) :: etot
797       integer :: i
798 !      do i=2,nres
799 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
800 !        if (itype(i).ne.10) 
801 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
802 !      enddo
803       if (indpdb.eq.0) then 
804         write(iout,*) 'Warning: Calling chainbuild'
805         call chainbuild
806       endif
807 !      do i=0,nres
808 !        do j=1,3
809 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
810 !        enddo
811 !      enddo
812 !      do i=1,nres-1
813 !        if (itype(i).ne.10) then
814 !          do j=1,3
815 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
816 !          enddo
817 !        endif
818 !      enddo
819 !      do j=1,3
820 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
821 !      enddo
822 #ifdef UMB
823       usampl=.true.
824 #endif
825       totT=1.d0
826       eq_time=0.0d0
827       call read_fragments
828       call chainbuild_cart
829       call cartprint
830       call intout
831       icall=1
832       write (iout,*) "before etotal"
833       call etotal(energy_(0))
834       etot = energy_(0)
835       call enerprint(energy_(0))
836       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
837       print *,'icheckgrad=',icheckgrad
838       goto (10,20,30) icheckgrad
839   10  call check_ecartint
840       call check_ecartint
841       return
842   20  call check_cartgrad
843       call check_cartgrad
844       return
845   30  call check_eint
846       return
847       end subroutine exec_checkgrad
848 !-----------------------------------------------------------------------------
849       subroutine exec_map
850 !      use map_data
851       use map_
852       use io_config, only:map_read
853       implicit none
854 ! Energy maps
855       call alloc_map_arrays
856       call map_read
857       call map
858       return
859       end subroutine exec_map
860 !-----------------------------------------------------------------------------
861       subroutine exec_CSA
862
863       use io_units      !include 'COMMON.IOUNITS'
864       use CSA
865       implicit none
866 #ifdef MPI
867       include "mpif.h"
868 #endif
869 !      include 'DIMENSIONS'
870 ! Conformational Space Annealling programmed by Jooyoung Lee.
871 ! This method works only with parallel machines!
872 #ifdef MPI
873       call alloc_CSA_arrays
874       call together
875 #else
876       write (iout,*) "CSA works on parallel machines only"
877 #endif
878       return
879       end subroutine exec_CSA
880 !-----------------------------------------------------------------------------
881       subroutine exec_softreg
882       use io_units      !include 'COMMON.IOUNITS'
883       use control_data  !include 'COMMON.CONTROL'
884       use energy_data
885       use io_base, only:intout,briefout
886       use geometry, only:chainbuild
887       use energy
888       use compare
889       implicit none
890 !      include 'DIMENSIONS'
891       real(kind=8) :: energy_(0:n_ene)
892 !el local variables
893       real(kind=8) :: rms,frac,frac_nn,co,etot
894       logical :: debug
895
896       call alloc_compare_arrays
897       write(iout,*) 'Warning: Calling chainbuild'
898       call chainbuild
899       call etotal(energy_)
900       call enerprint(energy_)
901       if (.not.lsecondary) then
902         write(iout,*) 'Calling secondary structure recognition'
903         call secondary2(debug)
904       else
905         write(iout,*) 'Using secondary structure supplied in pdb'
906       endif
907
908       call softreg
909
910       call etotal(energy_)
911       etot=energy_(0)
912       call enerprint(energy_)
913       call intout
914       call briefout(0,etot)
915       call secondary2(.true.)
916       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
917       return
918       end subroutine exec_softreg
919 !-----------------------------------------------------------------------------
920 ! minimize_p.F
921 !-----------------------------------------------------------------------------
922 !el#ifdef MPI
923       subroutine ergastulum
924
925 !      implicit real*8 (a-h,o-z)
926 !      include 'DIMENSIONS'
927       use MD_data
928       use energy
929       use MDyn, only:setup_fricmat
930       use REMD, only:fricmat_mult,ginv_mult
931 #ifdef MPI
932       include "mpif.h"
933 #endif
934 !      include 'COMMON.SETUP'
935 !      include 'COMMON.DERIV'
936 !      include 'COMMON.VAR'
937 !      include 'COMMON.IOUNITS'
938 !      include 'COMMON.FFIELD'
939 !      include 'COMMON.INTERACT'
940 !      include 'COMMON.MD'
941 !      include 'COMMON.TIME1'
942       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
943       real(kind=8) :: edum(0:n_ene),time_order(0:10)
944 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
945 !el      common /przechowalnia/ Gcopy
946       integer :: icall = 0
947
948 !el  local variables
949       real(kind=8) :: time00
950       integer :: iorder,i,j,nres2,ierr,ierror
951       nres2=2*nres
952 #ifndef FIVEDIAG
953       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
954 ! common.MD
955       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
956 #endif
957 !      common /mdpmpi/
958       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
959       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
960       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
961       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
962 #ifndef FIVEDIAG
963       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
964 #endif
965 ! Workers wait for variables and NF, and NFL from the boss
966       iorder=0
967       do while (iorder.ge.0)
968 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
969 !     & ' receives order from Master'
970         time00=MPI_Wtime()
971         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
972         time_Bcast=time_Bcast+MPI_Wtime()-time00
973         if (icall.gt.4 .and. iorder.ge.0) &
974          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
975        icall=icall+1
976 !      write (*,*)
977 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
978         if (iorder.eq.0) then
979           call zerograd
980           call etotal(edum)
981 !          write (2,*) "After etotal"
982 !          write (2,*) "dimen",dimen," dimen3",dimen3
983 !          call flush(2)
984         else if (iorder.eq.2) then
985           call zerograd
986           call etotal_short(edum)
987 !          write (2,*) "After etotal_short"
988 !          write (2,*) "dimen",dimen," dimen3",dimen3
989 !          call flush(2)
990         else if (iorder.eq.3) then
991           call zerograd
992           call etotal_long(edum)
993 !          write (2,*) "After etotal_long"
994 !          write (2,*) "dimen",dimen," dimen3",dimen3
995 !          call flush(2)
996         else if (iorder.eq.1) then
997           call sum_gradient
998 !          write (2,*) "After sum_gradient"
999 !          write (2,*) "dimen",dimen," dimen3",dimen3
1000 !          call flush(2)
1001         else if (iorder.eq.4) then
1002           call ginv_mult(z,d_a_tmp)
1003         else if (iorder.eq.5) then
1004 ! Setup MD things for a slave
1005           dimen=(nct-nnt+1)+nside
1006           dimen1=(nct-nnt)+(nct-nnt+1)
1007           dimen3=dimen*3
1008 !          write (2,*) "dimen",dimen," dimen3",dimen3
1009 !          call flush(2)
1010           call int_bounds(dimen,igmult_start,igmult_end)
1011           igmult_start=igmult_start-1
1012           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1013            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1014            my_ng_count=igmult_end-igmult_start
1015           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1016            MPI_INTEGER,FG_COMM,IERROR)
1017           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1018 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1019           myginv_ng_count=nres2*my_ng_count !el maxres2
1020 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1021 !     &     igmult_end," my_ng_count",my_ng_count
1022 !          call flush(2)
1023           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1024            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1025           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1026            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1027 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1028 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1029 !          call flush(2)
1030 !          call MPI_Barrier(FG_COMM,IERROR)
1031           time00=MPI_Wtime()
1032           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1033            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1034            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1035 #ifdef TIMING
1036           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1037 #endif
1038           do i=1,dimen
1039             do j=1,2*my_ng_count
1040               ginv(j,i)=gcopy(i,j)
1041             enddo
1042           enddo
1043 !          write (2,*) "dimen",dimen," dimen3",dimen3
1044 !          write (2,*) "End MD setup"
1045 !          call flush(2)
1046 !           write (iout,*) "My chunk of ginv_block"
1047 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1048         else if (iorder.eq.6) then
1049           call int_from_cart1(.false.)
1050         else if (iorder.eq.7) then
1051           call chainbuild_cart
1052         else if (iorder.eq.8) then
1053           call intcartderiv
1054         else if (iorder.eq.9) then
1055           call fricmat_mult(z,d_a_tmp)
1056         else if (iorder.eq.10) then
1057           call setup_fricmat
1058         endif
1059       enddo
1060       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1061         ' absolute rank',myrank,' leves ERGASTULUM.'
1062       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1063        (' order[',i,']',time_order(i),i=0,10)
1064       return
1065       end subroutine ergastulum