unres_package_Oct_2016 from emilial
[unres4.git] / source / unres / unres.f90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80 !
81       if (me.eq.king .or. .not. out1file) then
82        write (iout,'(2a/)') &
83        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
84        ' calculation.' 
85        if (minim) write (iout,'(a)') &
86        'Conformations will be energy-minimized.'
87        write (iout,'(80(1h*)/)') 
88       endif
89       call flush(iout)
90 !
91       if (modecalc.eq.-2) then
92         call test
93         stop
94       else if (modecalc.eq.-1) then
95         write(iout,*) "call check_sc_map next"
96         call check_bond
97         stop
98       endif
99 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
100
101 #ifdef MPI
102       if (fg_rank.gt.0) then
103 ! Fine-grain slaves just do energy and gradient components.
104         call ergastulum ! slave workhouse in Latin
105       else
106 #endif
107       if (modecalc.eq.0) then
108 !write(iout,*)"!!!!!!!!!!!!!!!!! in unres"
109
110         call exec_eeval_or_minim
111 !write(iout,*)"!!!!!!!!!!!!!!!!! in unres"
112
113       else if (modecalc.eq.1) then
114         call exec_regularize
115       else if (modecalc.eq.2) then
116         call exec_thread
117       else if (modecalc.eq.3 .or. modecalc .eq.6) then
118         call exec_MC
119       else if (modecalc.eq.4) then
120         call exec_mult_eeval_or_minim
121       else if (modecalc.eq.5) then
122         call exec_checkgrad
123 !write(iout,*) "check grad dwa razy"
124 !el        call exec_checkgrad
125       else if (ModeCalc.eq.7) then
126         call exec_map
127       else if (ModeCalc.eq.8) then
128         call exec_CSA
129       else if (modecalc.eq.11) then
130         call exec_softreg
131       else if (modecalc.eq.12) then
132         call exec_MD
133       else if (modecalc.eq.14) then
134         call exec_MREMD
135       else
136         write (iout,'(a)') 'This calculation type is not supported',&
137          ModeCalc
138       endif
139 !elwrite(iout,*)"!!!!!!!!!!!!!!!!!"
140
141 #ifdef MPI
142       endif
143 ! Finish task.
144       if (fg_rank.eq.0) call finish_task
145 !      call memmon_print_usage()
146 #ifdef TIMING
147        call print_detailed_timing
148 #endif
149       call MPI_Finalize(ierr)
150       stop 'Bye Bye...'
151 #else
152       call dajczas(tcpu(),hrtime,mintime,sectime)
153       stop '********** Program terminated normally.'
154 #endif
155
156       end program !UNRES
157 !-----------------------------------------------------------------------------
158 !
159 !-----------------------------------------------------------------------------
160       subroutine exec_MD
161       use MPI_data      !include 'COMMON.SETUP'
162       use control_data  !include 'COMMON.CONTROL'
163       use geometry, only:chainbuild
164       use MDyn
165       use io_units      !include 'COMMON.IOUNITS'
166 !      use io_common
167       implicit none
168 !      include 'DIMENSIONS'
169 #ifdef MPI
170       include "mpif.h"
171 #endif
172       call alloc_MD_arrays
173       if (me.eq.king .or. .not. out1file) &
174          write (iout,*) "Calling chainbuild"
175       call chainbuild
176       call MD
177       return
178       end subroutine exec_MD
179 !---------------------------------------------------------------------------
180       subroutine exec_MREMD
181       use MPI_data      !include 'COMMON.SETUP'
182       use control_data  !include 'COMMON.CONTROL'
183       use io_units      !include 'COMMON.IOUNITS'
184 !      use io_common
185       use REMD_data     !include 'COMMON.REMD'
186       use geometry, only:chainbuild
187       use MREMDyn
188
189       implicit none
190 !      include 'DIMENSIONS'
191 #ifdef MPI
192       include "mpif.h"
193 #endif
194
195       integer :: i
196       call alloc_MD_arrays
197       call alloc_MREMD_arrays
198
199       if (me.eq.king .or. .not. out1file) &
200          write (iout,*) "Calling chainbuild"
201       call chainbuild
202       if (me.eq.king .or. .not. out1file) &
203          write (iout,*) "Calling REMD"
204       if (remd_mlist) then 
205         call MREMD
206       else
207         do i=1,nrep
208           remd_m(i)=1
209         enddo
210         call MREMD
211       endif
212       return
213       end subroutine exec_MREMD
214 !-----------------------------------------------------------------------------
215       subroutine exec_eeval_or_minim
216       use MPI_data              !include 'COMMON.SETUP'
217       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
218       use io_units      !include 'COMMON.IOUNITS'
219       use names
220 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
221       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
222 !      use REMD         !include 'COMMON.REMD'
223 !      use MD           !include 'COMMON.MD'
224
225       use energy_data
226
227       use io_base
228       use geometry, only:chainbuild
229       use energy
230       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
231       use minimm, only:minimize,minim_dc,sc_move
232       use compare_data !el
233       use comm_srutu
234       implicit none
235 !      implicit real*8 (a-h,o-z)
236 !      include 'DIMENSIONS'
237 #ifdef MPI
238       include 'mpif.h'
239 #endif
240       integer :: i
241 !el      common /srutu/ icall
242       real(kind=8) :: energy_(0:n_ene)
243       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
244       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
245       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
246       integer :: nharp,nft_sc,iretcode,nfun
247       integer,dimension(4,nres/3) :: iharp      !(4,nres/3)(4,maxres/3)
248       logical :: fail
249       real(kind=8) :: rms,frac,frac_nn,co
250
251       integer :: j,k
252       call alloc_compare_arrays
253       if (indpdb.eq.0) call chainbuild
254 #ifdef MPI
255       time00=MPI_Wtime()
256 #endif
257       call chainbuild_cart
258 !      write(iout,*)"in exec_eeval or minimim",split_ene
259 !      do j=1,2*nres+2
260 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
261 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
262 !      enddo
263       if (split_ene) then
264 !      write(iout,*)"in exec_eeval or minimim"
265
266        print *,"Processor",myrank," after chainbuild"
267        icall=1
268 !elwrite(iout,*)"in exec_eeval or minimim"
269
270        call etotal_long(energy_long)
271        write (iout,*) "Printing long range energy"
272        call enerprint(energy_long)
273 !elwrite(iout,*)"in exec_eeval or minimim"
274
275        call etotal_short(energy_short)
276        write (iout,*) "Printing short range energy"
277        call enerprint(energy_short)
278        do i=0,n_ene
279          energy_(i)=energy_long(i)+energy_short(i)
280          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
281        enddo
282        write (iout,*) "Printing long+short range energy"
283        call enerprint(energy_)
284       endif
285
286       call etotal(energy_)
287 !elwrite(iout,*)"after etotal in exec_eev"
288 #ifdef MPI
289       time_ene=MPI_Wtime()-time00
290 #endif
291       write (iout,*) "Time for energy evaluation",time_ene
292       print *,"after etotal"
293       etota = energy_(0)
294       etot = etota
295       call enerprint(energy_)
296 !write(iout,*)"after enerprint"
297       call hairpin(.true.,nharp,iharp)
298 !write(iout,*) "after hairpin"!,hfrag(1,1)
299       call secondary2(.true.)
300 !write(iout,*) "after secondary2"
301       if (minim) then
302 !rc overlap test
303 !elwrite(iout,*) "after secondary2 minim",minim
304         if (overlapsc) then 
305           print *, 'Calling OVERLAP_SC'
306 !write(iout,*) 'Calling OVERLAP_SC'
307           call overlap_sc(fail)
308 !write(iout,*) 'after Calling OVERLAP_SC'
309         endif 
310
311         if (searchsc) then 
312           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
313           print *,'SC_move',nft_sc,etot
314           write(iout,*) 'SC_move',nft_sc,etot
315         endif 
316
317         if (dccart) then
318 !write(iout,*) 'CART  calling minim_dc', nvar
319           print *, 'Calling MINIM_DC'
320 #ifdef MPI
321           time1=MPI_WTIME()
322 #endif
323 !    call check_ecartint !el
324           call minim_dc(etot,iretcode,nfun)
325 !    call check_ecartint !el
326         else 
327 !write(iout,*) "indpdb",indpdb
328           if (indpdb.ne.0) then 
329 !write(iout,*) 'if indpdb', indpdb
330             call bond_regular
331             call chainbuild
332           endif
333           call geom_to_var(nvar,varia)
334 !write(iout,*) 'po geom to var; calling minimize', nvar
335           print *,'Calling MINIMIZE.'
336 #ifdef MPI
337           time1=MPI_WTIME()
338 #endif
339 !    call check_eint
340 !        call exec_checkgrad   !el
341           call minimize(etot,varia,iretcode,nfun)
342 !     call check_eint
343 !        call exec_checkgrad   !el
344         endif
345         print *,'SUMSL return code is',iretcode,' eval ',nfun
346 #ifdef MPI
347         evals=nfun/(MPI_WTIME()-time1)
348 #endif
349         print *,'# eval/s',evals
350         print *,'refstr=',refstr
351         call hairpin(.true.,nharp,iharp)
352         call secondary2(.true.)
353         call etotal(energy_)
354         etot = energy_(0)
355         call enerprint(energy_)
356
357         call intout
358         call briefout(0,etot)
359         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
360           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
361           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
362           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
363       else
364 !elwrite(iout,*) "after secondary2 minim",minim
365         print *,'refstr=',refstr
366         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
367 !elwrite(iout,*) "rms_nac"
368 !elwrite(iout,*) "before briefout"
369         call briefout(0,etot)
370 !elwrite(iout,*) "after briefout"
371       endif
372       if (outpdb) call pdbout(etot,titel(:32),ipdb)
373       if (outmol2) call mol2out(etot,titel(:32))
374 !elwrite(iout,*) "after exec_eeval_or_minim"
375       return
376       end subroutine exec_eeval_or_minim
377 !-----------------------------------------------------------------------------
378       subroutine exec_regularize
379 !      use MPI_data             !include 'COMMON.SETUP'
380       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
381       use io_units      !include 'COMMON.IOUNITS'
382       use names
383       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
384       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
385  !     use REMD         !include 'COMMON.REMD'
386 !     use MD            !include 'COMMON.MD'
387       use regularize_
388       use compare
389       implicit none
390 !      implicit real*8 (a-h,o-z)
391 !      include 'DIMENSIONS'
392 #ifdef MPI
393       include 'mpif.h'
394 #endif
395       real(kind=8) :: energy_(0:n_ene)
396       real(kind=8) :: etot
397       real(kind=8) :: rms,frac,frac_nn,co
398       integer :: iretcode
399
400       call alloc_compare_arrays
401       call gen_dist_constr
402       call sc_conf
403       call intout
404       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
405       call etotal(energy_)
406       energy_(0)=energy_(0)-energy_(14)
407       etot=energy_(0)
408       call enerprint(energy_)
409       call intout
410       call briefout(0,etot)
411       if (outpdb) call pdbout(etot,titel(:32),ipdb)
412       if (outmol2) call mol2out(etot,titel(:32))
413       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
414       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
415       return
416       end subroutine exec_regularize
417 !-----------------------------------------------------------------------------
418       subroutine exec_thread
419 !      use MPI_data             !include 'COMMON.SETUP'
420       use compare
421       implicit none
422 !      include 'DIMENSIONS'
423 #ifdef MP
424       include "mpif.h"
425 #endif
426       call alloc_compare_arrays
427       call thread_seq
428       return
429       end subroutine exec_thread
430 !-----------------------------------------------------------------------------
431       subroutine exec_MC
432 !      use MPI_data             !include 'COMMON.SETUP'
433       use control_data  !include 'COMMON.CONTROL'
434       use geometry_data
435       use energy_data
436       use mcm_md
437       implicit none
438 !      implicit real*8 (a-h,o-z)
439 !      include 'DIMENSIONS'
440       character(len=10) :: nodeinfo
441       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
442       integer :: ipar
443 #ifdef MPI
444       include "mpif.h"
445 #endif
446       call alloc_MCM_arrays
447       call mcm_setup
448       if (minim) then
449 #ifdef MPI
450         if (modecalc.eq.3) then
451           call do_mcm(ipar)
452         else
453           call entmcm
454         endif
455 #else
456         if (modecalc.eq.3) then
457           call do_mcm(ipar)
458         else
459           call entmcm
460         endif
461 #endif
462       else
463         call monte_carlo
464       endif
465       return
466       end subroutine exec_MC
467 !-----------------------------------------------------------------------------
468       subroutine exec_mult_eeval_or_minim
469       use MPI_data              !include 'COMMON.SETUP'
470       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
471       use io_units      !include 'COMMON.IOUNITS'
472       use names
473       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
474       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
475 !      use REMD         !include 'COMMON.REMD'
476 !      use MD           !include 'COMMON.MD'
477       use io_base
478       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
479       use energy, only:etotal,enerprint
480       use compare, only:rms_nac_nnc
481       use minimm, only:minimize!,minim_mcmf
482 !      implicit real*8 (a-h,o-z)
483 !      include 'DIMENSIONS'
484 #ifdef MPI
485       use minimm, only:minim_mcmf
486       implicit none
487       include 'mpif.h'
488       integer :: ierror,ierr
489       real(kind=8) :: man
490       real(kind=8),dimension(mpi_status_size) :: muster
491 #else
492       implicit none
493 #endif
494       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
495       integer,dimension(6) :: ind
496       real(kind=8) :: energy_(0:n_ene)
497       logical :: eof
498       real(kind=8) :: etot,ene0
499       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
500          nf_mcmf,j
501       real(kind=8) :: rms,frac,frac_nn,co,time,ene
502
503       eof=.false.
504 #ifdef MPI
505       if(me.ne.king) then
506         call minim_mcmf
507         return
508       endif
509
510       close (intin)
511       open(intin,file=intinname,status='old')
512       write (istat,'(a5,20a12)')"#    ",&
513         (wname(print_order(i)),i=1,nprint_ene)
514       if (refstr) then
515         write (istat,'(a5,20a12)')"#    ",&
516          (ename(print_order(i)),i=1,nprint_ene),&
517          "ETOT total","RMSD","nat.contact","nnt.contact"        
518       else
519         write (istat,'(a5,20a12)')"#    ",&
520           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
521       endif
522
523       if (.not.minim) then
524         do while (.not. eof)
525           if (read_cart) then
526             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
527             call read_x(intin,*11)
528 #ifdef MPI
529 ! Broadcast the order to compute internal coordinates to the slaves.
530             if (nfgtasks.gt.1) &
531               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
532 #endif
533             call int_from_cart1(.false.)
534           else
535             read (intin,'(i5)',end=1100,err=1100) iconf
536             call read_angles(intin,*11)
537             call geom_to_var(nvar,varia)
538             call chainbuild
539           endif
540           write (iout,'(a,i7)') 'Conformation #',iconf
541           call etotal(energy_)
542           call briefout(iconf,energy_(0))
543           call enerprint(energy_)
544           etot=energy_(0)
545           if (refstr) then 
546             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
547             write (istat,'(i5,20(f12.3))') iconf,&
548             (energy_(print_order(i)),i=1,nprint_ene),etot,&
549              rms,frac,frac_nn,co
550 !jlee end
551           else
552             write (istat,'(i5,16(f12.3))') iconf,&
553            (energy_(print_order(i)),i=1,nprint_ene),etot
554           endif
555         enddo
556 1100    continue
557         goto 1101
558       endif
559
560       mm=0
561       imm=0
562       nft=0
563       ene0=0.0d0
564       n=0
565       iconf=0
566 !      do n=1,nzsc
567       do while (.not. eof)
568         mm=mm+1
569         if (mm.lt.nodes) then
570           if (read_cart) then
571             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
572             call read_x(intin,*11)
573 #ifdef MPI
574 ! Broadcast the order to compute internal coordinates to the slaves.
575             if (nfgtasks.gt.1) &
576               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
577 #endif
578             call int_from_cart1(.false.)
579           else
580             read (intin,'(i5)',end=11,err=11) iconf
581             call read_angles(intin,*11)
582             call geom_to_var(nvar,varia)
583             call chainbuild
584           endif
585           write (iout,'(a,i7)') 'Conformation #',iconf
586           n=n+1
587          imm=imm+1
588          ind(1)=1
589          ind(2)=n
590          ind(3)=0
591          ind(4)=0
592          ind(5)=0
593          ind(6)=0
594          ene0=0.0d0
595          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
596                         ierr)
597          call mpi_send(varia,nvar,mpi_double_precision,mm,&
598                         idreal,CG_COMM,ierr)
599          call mpi_send(ene0,1,mpi_double_precision,mm,&
600                         idreal,CG_COMM,ierr)
601 !         print *,'task ',n,' sent to worker ',mm,nvar
602         else
603          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
604                        CG_COMM,muster,ierr)
605          man=muster(mpi_source)
606 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
607          call mpi_recv(varia,nvar,mpi_double_precision,&
608                      man,idreal,CG_COMM,muster,ierr)
609          call mpi_recv(ene,1,&
610                      mpi_double_precision,man,idreal,&
611                      CG_COMM,muster,ierr)
612          call mpi_recv(ene0,1,&
613                      mpi_double_precision,man,idreal,&
614                      CG_COMM,muster,ierr)
615 !         print *,'result received from worker ',man,' sending now'
616
617           call var_to_geom(nvar,varia)
618           call chainbuild
619           call etotal(energy_)
620           iconf=ind(2)
621           write (iout,*)
622           write (iout,*)
623           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
624
625           etot=energy_(0)
626           call enerprint(energy_)
627           call briefout(it,etot)
628 !          if (minim) call briefout(it,etot)
629           if (refstr) then 
630             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
631             write (istat,'(i5,19(f12.3))') iconf,&
632            (energy_(print_order(i)),i=1,nprint_ene),etot,&
633            rms,frac,frac_nn,co
634           else
635             write (istat,'(i5,15(f12.3))') iconf,&
636            (energy_(print_order(i)),i=1,nprint_ene),etot
637           endif
638
639           imm=imm-1
640           if (read_cart) then
641             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
642             call read_x(intin,*11)
643 #ifdef MPI
644 ! Broadcast the order to compute internal coordinates to the slaves.
645             if (nfgtasks.gt.1) &
646               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
647 #endif
648             call int_from_cart1(.false.)
649           else
650             read (intin,'(i5)',end=1101,err=1101) iconf
651             call read_angles(intin,*11)
652             call geom_to_var(nvar,varia)
653             call chainbuild
654           endif
655           n=n+1
656           imm=imm+1
657           ind(1)=1
658           ind(2)=n
659           ind(3)=0
660           ind(4)=0
661           ind(5)=0
662           ind(6)=0
663           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
664                         ierr)
665           call mpi_send(varia,nvar,mpi_double_precision,man,&
666                         idreal,CG_COMM,ierr)
667           call mpi_send(ene0,1,mpi_double_precision,man,&
668                         idreal,CG_COMM,ierr)
669           nf_mcmf=nf_mcmf+ind(4)
670           nmin=nmin+1
671         endif
672       enddo
673 11    continue
674       do j=1,imm
675         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
676                      CG_COMM,muster,ierr)
677         man=muster(mpi_source)
678         call mpi_recv(varia,nvar,mpi_double_precision,&
679                      man,idreal,CG_COMM,muster,ierr)
680         call mpi_recv(ene,1,&
681                      mpi_double_precision,man,idreal,&
682                      CG_COMM,muster,ierr)
683         call mpi_recv(ene0,1,&
684                      mpi_double_precision,man,idreal,&
685                      CG_COMM,muster,ierr)
686
687         call var_to_geom(nvar,varia)
688         call chainbuild
689         call etotal(energy_)
690         iconf=ind(2)
691         write (iout,*)
692         write (iout,*)
693         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
694
695         etot=energy_(0)
696         call enerprint(energy_)
697         call briefout(it,etot)
698         if (refstr) then 
699           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
700           write (istat,'(i5,19(f12.3))') iconf,&
701          (energy_(print_order(i)),i=1,nprint_ene),etot,&
702          rms,frac,frac_nn,co
703         else
704           write (istat,'(i5,15(f12.3))') iconf,&
705           (energy_(print_order(i)),i=1,nprint_ene),etot
706         endif
707         nmin=nmin+1
708       enddo
709 1101  continue
710       do i=1, nodes-1
711          ind(1)=0
712          ind(2)=0
713          ind(3)=0
714          ind(4)=0
715          ind(5)=0
716          ind(6)=0
717          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
718                         ierr)
719       enddo
720 #else
721       close (intin)
722       open(intin,file=intinname,status='old')
723       write (istat,'(a5,20a12)')"#    ",&
724          (wname(print_order(i)),i=1,nprint_ene)
725       write (istat,'("#    ",20(1pe12.4))') &
726          (weights(print_order(i)),i=1,nprint_ene)
727       if (refstr) then
728         write (istat,'(a5,20a12)')"#    ",&
729          (ename(print_order(i)),i=1,nprint_ene),&
730          "ETOT total","RMSD","nat.contact","nnt.contact"
731       else
732         write (istat,'(a5,14a12)')"#    ",&
733          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
734       endif
735       do while (.not. eof)
736           if (read_cart) then
737             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
738             call read_x(intin,*11)
739 #ifdef MPI
740 ! Broadcast the order to compute internal coordinates to the slaves.
741             if (nfgtasks.gt.1) &
742               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
743 #endif
744             call int_from_cart1(.false.)
745           else
746             read (intin,'(i5)',end=11,err=11) iconf
747             call read_angles(intin,*11)
748             call geom_to_var(nvar,varia)
749             call chainbuild
750           endif
751         write (iout,'(a,i7)') 'Conformation #',iconf
752         if (minim) call minimize(etot,varia,iretcode,nfun)
753         call etotal(energy_)
754
755         etot=energy_(0)
756         call enerprint(energy_)
757         if (minim) call briefout(it,etot) 
758         if (refstr) then 
759           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
760           write (istat,'(i5,18(f12.3))') iconf,&
761          (energy_(print_order(i)),i=1,nprint_ene),&
762          etot,rms,frac,frac_nn,co
763 !jlee end
764         else
765           write (istat,'(i5,14(f12.3))') iconf,&
766          (energy_(print_order(i)),i=1,nprint_ene),etot
767         endif
768       enddo
769    11 continue
770 #endif
771       return
772       end subroutine exec_mult_eeval_or_minim
773 !-----------------------------------------------------------------------------
774       subroutine exec_checkgrad
775 !      use MPI_data             !include 'COMMON.SETUP'
776       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
777       use io_units      !include 'COMMON.IOUNITS'
778 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
779       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
780 !      use REMD         !include 'COMMON.REMD'
781       use MD_data               !include 'COMMON.MD'
782       use io_base, only:intout
783       use io_config, only:read_fragments
784       use geometry
785       use energy
786       use comm_srutu
787       implicit none
788 !      implicit real*8 (a-h,o-z)
789 !      include 'DIMENSIONS'
790 #ifdef MPI
791       include 'mpif.h'
792 #endif
793 !el      integer :: icall
794 !el      common /srutu/ icall
795       real(kind=8) :: energy_(0:max_ene)
796       real(kind=8) :: etot
797       integer :: i
798 !      do i=2,nres
799 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
800 !        if (itype(i).ne.10) 
801 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
802 !      enddo
803       if (indpdb.eq.0) call chainbuild
804 !      do i=0,nres
805 !        do j=1,3
806 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
807 !        enddo
808 !      enddo
809 !      do i=1,nres-1
810 !        if (itype(i).ne.10) then
811 !          do j=1,3
812 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
813 !          enddo
814 !        endif
815 !      enddo
816 !      do j=1,3
817 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
818 !      enddo
819       usampl=.true.
820       totT=1.d0
821       eq_time=0.0d0
822       call read_fragments
823       call chainbuild_cart
824       call cartprint
825       call intout
826       icall=1
827       call etotal(energy_(0))
828       etot = energy_(0)
829       call enerprint(energy_(0))
830       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
831       print *,'icheckgrad=',icheckgrad
832       goto (10,20,30) icheckgrad
833   10  call check_ecartint
834       return
835   20  call check_cartgrad
836       return
837   30  call check_eint
838       return
839       end subroutine exec_checkgrad
840 !-----------------------------------------------------------------------------
841       subroutine exec_map
842 !      use map_data
843       use map_
844       use io_config, only:map_read
845       implicit none
846 ! Energy maps
847       call alloc_map_arrays
848       call map_read
849       call map
850       return
851       end subroutine exec_map
852 !-----------------------------------------------------------------------------
853       subroutine exec_CSA
854
855       use io_units      !include 'COMMON.IOUNITS'
856       use CSA
857
858       implicit none
859 #ifdef MPI
860       include "mpif.h"
861 #endif
862 !      include 'DIMENSIONS'
863 ! Conformational Space Annealling programmed by Jooyoung Lee.
864 ! This method works only with parallel machines!
865 #ifdef MPI
866       call alloc_CSA_arrays
867       call together
868 #else
869       write (iout,*) "CSA works on parallel machines only"
870 #endif
871       return
872       end subroutine exec_CSA
873 !-----------------------------------------------------------------------------
874       subroutine exec_softreg
875       use io_units      !include 'COMMON.IOUNITS'
876       use control_data  !include 'COMMON.CONTROL'
877       use energy_data
878       use io_base, only:intout,briefout
879       use geometry, only:chainbuild
880       use energy
881       use compare
882       implicit none
883 !      include 'DIMENSIONS'
884       real(kind=8) :: energy_(0:n_ene)
885 !el local variables
886       real(kind=8) :: rms,frac,frac_nn,co,etot
887       logical :: debug
888
889       call alloc_compare_arrays
890       call chainbuild
891       call etotal(energy_)
892       call enerprint(energy_)
893       if (.not.lsecondary) then
894         write(iout,*) 'Calling secondary structure recognition'
895         call secondary2(debug)
896       else
897         write(iout,*) 'Using secondary structure supplied in pdb'
898       endif
899
900       call softreg
901
902       call etotal(energy_)
903       etot=energy_(0)
904       call enerprint(energy_)
905       call intout
906       call briefout(0,etot)
907       call secondary2(.true.)
908       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
909       return
910       end subroutine exec_softreg
911 !-----------------------------------------------------------------------------
912 ! minimize_p.F
913 !-----------------------------------------------------------------------------
914 !el#ifdef MPI
915       subroutine ergastulum
916
917 !      implicit real*8 (a-h,o-z)
918 !      include 'DIMENSIONS'
919       use MD_data
920       use energy
921       use MDyn, only:setup_fricmat
922       use REMD, only:fricmat_mult,ginv_mult
923 #ifdef MPI
924       include "mpif.h"
925 #endif
926 !      include 'COMMON.SETUP'
927 !      include 'COMMON.DERIV'
928 !      include 'COMMON.VAR'
929 !      include 'COMMON.IOUNITS'
930 !      include 'COMMON.FFIELD'
931 !      include 'COMMON.INTERACT'
932 !      include 'COMMON.MD'
933 !      include 'COMMON.TIME1'
934       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
935       real(kind=8) :: edum(0:n_ene),time_order(0:10)
936 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
937 !el      common /przechowalnia/ Gcopy
938       integer :: icall = 0
939
940 !el  local variables
941       real(kind=8) :: time00
942       integer :: iorder,i,j,nres2,ierr,ierror
943       nres2=2*nres
944       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
945 ! common.MD
946       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
947 !      common /mdpmpi/
948       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
949       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
950       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
951       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
952
953       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
954
955 ! Workers wait for variables and NF, and NFL from the boss
956       iorder=0
957       do while (iorder.ge.0)
958 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
959 !     & ' receives order from Master'
960         time00=MPI_Wtime()
961         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
962         time_Bcast=time_Bcast+MPI_Wtime()-time00
963         if (icall.gt.4 .and. iorder.ge.0) &
964          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
965        icall=icall+1
966 !      write (*,*)
967 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
968         if (iorder.eq.0) then
969           call zerograd
970           call etotal(edum)
971 !          write (2,*) "After etotal"
972 !          write (2,*) "dimen",dimen," dimen3",dimen3
973 !          call flush(2)
974         else if (iorder.eq.2) then
975           call zerograd
976           call etotal_short(edum)
977 !          write (2,*) "After etotal_short"
978 !          write (2,*) "dimen",dimen," dimen3",dimen3
979 !          call flush(2)
980         else if (iorder.eq.3) then
981           call zerograd
982           call etotal_long(edum)
983 !          write (2,*) "After etotal_long"
984 !          write (2,*) "dimen",dimen," dimen3",dimen3
985 !          call flush(2)
986         else if (iorder.eq.1) then
987           call sum_gradient
988 !          write (2,*) "After sum_gradient"
989 !          write (2,*) "dimen",dimen," dimen3",dimen3
990 !          call flush(2)
991         else if (iorder.eq.4) then
992           call ginv_mult(z,d_a_tmp)
993         else if (iorder.eq.5) then
994 ! Setup MD things for a slave
995           dimen=(nct-nnt+1)+nside
996           dimen1=(nct-nnt)+(nct-nnt+1)
997           dimen3=dimen*3
998 !          write (2,*) "dimen",dimen," dimen3",dimen3
999 !          call flush(2)
1000           call int_bounds(dimen,igmult_start,igmult_end)
1001           igmult_start=igmult_start-1
1002           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1003            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1004            my_ng_count=igmult_end-igmult_start
1005           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1006            MPI_INTEGER,FG_COMM,IERROR)
1007           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1008 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1009           myginv_ng_count=nres2*my_ng_count !el maxres2
1010 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1011 !     &     igmult_end," my_ng_count",my_ng_count
1012 !          call flush(2)
1013           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1014            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1015           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1016            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1017 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1018 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1019 !          call flush(2)
1020 !          call MPI_Barrier(FG_COMM,IERROR)
1021           time00=MPI_Wtime()
1022           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1023            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1024            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1025 #ifdef TIMING
1026           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1027 #endif
1028           do i=1,dimen
1029             do j=1,2*my_ng_count
1030               ginv(j,i)=gcopy(i,j)
1031             enddo
1032           enddo
1033 !          write (2,*) "dimen",dimen," dimen3",dimen3
1034 !          write (2,*) "End MD setup"
1035 !          call flush(2)
1036 !           write (iout,*) "My chunk of ginv_block"
1037 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1038         else if (iorder.eq.6) then
1039           call int_from_cart1(.false.)
1040         else if (iorder.eq.7) then
1041           call chainbuild_cart
1042         else if (iorder.eq.8) then
1043           call intcartderiv
1044         else if (iorder.eq.9) then
1045           call fricmat_mult(z,d_a_tmp)
1046         else if (iorder.eq.10) then
1047           call setup_fricmat
1048         endif
1049       enddo
1050       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1051         ' absolute rank',myrank,' leves ERGASTULUM.'
1052       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1053        (' order[',i,']',time_order(i),i=0,10)
1054       return
1055       end subroutine ergastulum