f75675ab432b8b7e497f76c5b30d88d6a2ec3364
[unres4.git] / source / unres / unres.f90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !el      run_wham=.false.
72 !      call memmon_print_usage()
73
74       call init_task
75       if (me.eq.king) &
76         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
77       if (me.eq.king) call cinfo
78 ! Read force field parameters and job setup data
79       call readrtns
80       call flush(iout)
81 !
82       if (me.eq.king .or. .not. out1file) then
83        write (iout,'(2a/)') &
84        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
85        ' calculation.' 
86        if (minim) write (iout,'(a)') &
87        'Conformations will be energy-minimized.'
88        write (iout,'(80(1h*)/)') 
89       endif
90       call flush(iout)
91 !
92       if (modecalc.eq.-2) then
93         call test
94         stop
95       else if (modecalc.eq.-1) then
96         write(iout,*) "call check_sc_map next"
97         call check_bond
98         stop
99       endif
100 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
101
102 #ifdef MPI
103       if (fg_rank.gt.0) then
104 ! Fine-grain slaves just do energy and gradient components.
105         call ergastulum ! slave workhouse in Latin
106       else
107 #endif
108       if (modecalc.eq.0) then
109 !write(iout,*)"!!!!!!!!!!!!!!!!! in unres"
110
111         call exec_eeval_or_minim
112 !write(iout,*)"!!!!!!!!!!!!!!!!! in unres"
113
114       else if (modecalc.eq.1) then
115         call exec_regularize
116       else if (modecalc.eq.2) then
117         call exec_thread
118       else if (modecalc.eq.3 .or. modecalc .eq.6) then
119         call exec_MC
120       else if (modecalc.eq.4) then
121         call exec_mult_eeval_or_minim
122       else if (modecalc.eq.5) then
123         call exec_checkgrad
124 !write(iout,*) "check grad dwa razy"
125 !el        call exec_checkgrad
126       else if (ModeCalc.eq.7) then
127         call exec_map
128       else if (ModeCalc.eq.8) then
129         call exec_CSA
130       else if (modecalc.eq.11) then
131         call exec_softreg
132       else if (modecalc.eq.12) then
133         call exec_MD
134       else if (modecalc.eq.14) then
135         call exec_MREMD
136       else
137         write (iout,'(a)') 'This calculation type is not supported',&
138          ModeCalc
139       endif
140 !elwrite(iout,*)"!!!!!!!!!!!!!!!!!"
141
142 #ifdef MPI
143       endif
144 ! Finish task.
145       if (fg_rank.eq.0) call finish_task
146 !      call memmon_print_usage()
147 #ifdef TIMING
148        call print_detailed_timing
149 #endif
150       call MPI_Finalize(ierr)
151       stop 'Bye Bye...'
152 #else
153       call dajczas(tcpu(),hrtime,mintime,sectime)
154       stop '********** Program terminated normally.'
155 #endif
156
157       end program !UNRES
158 !-----------------------------------------------------------------------------
159 !
160 !-----------------------------------------------------------------------------
161       subroutine exec_MD
162       use MPI_data      !include 'COMMON.SETUP'
163       use control_data  !include 'COMMON.CONTROL'
164       use geometry, only:chainbuild
165       use MDyn
166       use io_units      !include 'COMMON.IOUNITS'
167       implicit none
168 !      include 'DIMENSIONS'
169 #ifdef MPI
170       include "mpif.h"
171 #endif
172       call alloc_MD_arrays
173       if (me.eq.king .or. .not. out1file) &
174          write (iout,*) "Calling chainbuild"
175       call chainbuild
176       call MD
177       return
178       end subroutine exec_MD
179 !---------------------------------------------------------------------------
180       subroutine exec_MREMD
181       use MPI_data      !include 'COMMON.SETUP'
182       use control_data  !include 'COMMON.CONTROL'
183       use io_units      !include 'COMMON.IOUNITS'
184 !      use io_common
185       use REMD_data     !include 'COMMON.REMD'
186       use geometry, only:chainbuild
187       use MREMDyn
188
189       implicit none
190 !      include 'DIMENSIONS'
191 #ifdef MPI
192       include "mpif.h"
193 #endif
194
195       integer :: i
196       call alloc_MD_arrays
197       call alloc_MREMD_arrays
198
199       if (me.eq.king .or. .not. out1file) &
200          write (iout,*) "Calling chainbuild"
201       call chainbuild
202       if (me.eq.king .or. .not. out1file) &
203          write (iout,*) "Calling REMD"
204       if (remd_mlist) then 
205         call MREMD
206       else
207         do i=1,nrep
208           remd_m(i)=1
209         enddo
210         call MREMD
211       endif
212       return
213       end subroutine exec_MREMD
214 !-----------------------------------------------------------------------------
215       subroutine exec_eeval_or_minim
216       use MPI_data              !include 'COMMON.SETUP'
217       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
218       use io_units      !include 'COMMON.IOUNITS'
219       use names
220 !      use io_common
221 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
222       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
223 !      use REMD         !include 'COMMON.REMD'
224 !      use MD           !include 'COMMON.MD'
225       use io_base
226       use geometry, only:chainbuild
227       use energy
228       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
229       use minimm, only:minimize,minim_dc,sc_move
230       use compare_data !el
231       use comm_srutu
232       implicit none
233 !      implicit real*8 (a-h,o-z)
234 !      include 'DIMENSIONS'
235 #ifdef MPI
236       include 'mpif.h'
237 #endif
238       integer :: i
239 !el      common /srutu/ icall
240       real(kind=8) :: energy_(0:n_ene)
241       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
242       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
243       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
244       integer :: nharp,nft_sc,iretcode,nfun
245       integer,dimension(4,nres/3) :: iharp      !(4,nres/3)(4,maxres/3)
246       logical :: fail
247       real(kind=8) :: rms,frac,frac_nn,co
248       call alloc_compare_arrays
249       if (indpdb.eq.0) call chainbuild
250 #ifdef MPI
251       time00=MPI_Wtime()
252 #endif
253       call chainbuild_cart
254
255       if (split_ene) then
256
257        print *,"Processor",myrank," after chainbuild"
258        icall=1
259
260        call etotal_long(energy_long)
261        write (iout,*) "Printing long range energy"
262        call enerprint(energy_long)
263
264        call etotal_short(energy_short)
265        write (iout,*) "Printing short range energy"
266        call enerprint(energy_short)
267        do i=0,n_ene
268          energy_(i)=energy_long(i)+energy_short(i)
269          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
270        enddo
271        write (iout,*) "Printing long+short range energy"
272        call enerprint(energy_)
273       endif
274
275       call etotal(energy_)
276 #ifdef MPI
277       time_ene=MPI_Wtime()-time00
278 #endif
279       write (iout,*) "Time for energy evaluation",time_ene
280       print *,"after etotal"
281       etota = energy_(0)
282       etot = etota
283       call enerprint(energy_)
284       call hairpin(.true.,nharp,iharp)
285       call secondary2(.true.)
286       if (minim) then
287 !rc overlap test
288         if (overlapsc) then 
289           print *, 'Calling OVERLAP_SC'
290           call overlap_sc(fail)
291         endif 
292
293         if (searchsc) then 
294           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
295           print *,'SC_move',nft_sc,etot
296           write(iout,*) 'SC_move',nft_sc,etot
297         endif 
298
299         if (dccart) then
300           print *, 'Calling MINIM_DC'
301 #ifdef MPI
302           time1=MPI_WTIME()
303 #endif
304           call minim_dc(etot,iretcode,nfun)
305         else 
306           if (indpdb.ne.0) then 
307             call bond_regular
308             call chainbuild
309           endif
310           call geom_to_var(nvar,varia)
311           print *,'Calling MINIMIZE.'
312 #ifdef MPI
313           time1=MPI_WTIME()
314 #endif
315           call minimize(etot,varia,iretcode,nfun)
316         endif
317         print *,'SUMSL return code is',iretcode,' eval ',nfun
318 #ifdef MPI
319         evals=nfun/(MPI_WTIME()-time1)
320 #endif
321         print *,'# eval/s',evals
322         print *,'refstr=',refstr
323         call hairpin(.true.,nharp,iharp)
324         call secondary2(.true.)
325         call etotal(energy_)
326         etot = energy_(0)
327         call enerprint(energy_)
328
329         call intout
330         call briefout(0,etot)
331         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
332           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
333           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
334           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
335       else
336         print *,'refstr=',refstr
337         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
338         call briefout(0,etot)
339       endif
340       if (outpdb) call pdbout(etot,titel(:32),ipdb)
341       if (outmol2) call mol2out(etot,titel(:32))
342       return
343       end subroutine exec_eeval_or_minim
344 !-----------------------------------------------------------------------------
345       subroutine exec_regularize
346 !      use MPI_data             !include 'COMMON.SETUP'
347       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
348       use io_units      !include 'COMMON.IOUNITS'
349       use names
350       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
351       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
352 !     use REMD          !include 'COMMON.REMD'
353 !     use MD            !include 'COMMON.MD'
354       use regularize_
355       use compare
356       implicit none
357 !      implicit real*8 (a-h,o-z)
358 !      include 'DIMENSIONS'
359 #ifdef MPI
360       include 'mpif.h'
361 #endif
362       real(kind=8) :: energy_(0:n_ene)
363       real(kind=8) :: etot
364       real(kind=8) :: rms,frac,frac_nn,co
365       integer :: iretcode
366
367       call alloc_compare_arrays
368       call gen_dist_constr
369       call sc_conf
370       call intout
371       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
372       call etotal(energy_)
373       energy_(0)=energy_(0)-energy_(14)
374       etot=energy_(0)
375       call enerprint(energy_)
376       call intout
377       call briefout(0,etot)
378       if (outpdb) call pdbout(etot,titel(:32),ipdb)
379       if (outmol2) call mol2out(etot,titel(:32))
380       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
381       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
382       return
383       end subroutine exec_regularize
384 !-----------------------------------------------------------------------------
385       subroutine exec_thread
386 !      use MPI_data             !include 'COMMON.SETUP'
387       use compare
388       implicit none
389 !      include 'DIMENSIONS'
390 #ifdef MP
391       include "mpif.h"
392 #endif
393       call alloc_compare_arrays
394       call thread_seq
395       return
396       end subroutine exec_thread
397 !-----------------------------------------------------------------------------
398       subroutine exec_MC
399 !      use MPI_data             !include 'COMMON.SETUP'
400       use control_data  !include 'COMMON.CONTROL'
401       use geometry_data
402       use energy_data
403       use mcm_md
404       implicit none
405 !      implicit real*8 (a-h,o-z)
406 !      include 'DIMENSIONS'
407       character(len=10) :: nodeinfo
408       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
409       integer :: ipar
410 #ifdef MPI
411       include "mpif.h"
412 #endif
413       call alloc_MCM_arrays
414       call mcm_setup
415       if (minim) then
416 #ifdef MPI
417         if (modecalc.eq.3) then
418           call do_mcm(ipar)
419         else
420           call entmcm
421         endif
422 #else
423         if (modecalc.eq.3) then
424           call do_mcm(ipar)
425         else
426           call entmcm
427         endif
428 #endif
429       else
430         call monte_carlo
431       endif
432       return
433       end subroutine exec_MC
434 !-----------------------------------------------------------------------------
435       subroutine exec_mult_eeval_or_minim
436       use MPI_data              !include 'COMMON.SETUP'
437       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
438       use io_units      !include 'COMMON.IOUNITS'
439       use names
440       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
441       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
442 !      use REMD         !include 'COMMON.REMD'
443 !      use MD           !include 'COMMON.MD'
444       use io_base
445       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
446       use energy, only:etotal,enerprint
447       use compare, only:rms_nac_nnc
448       use minimm, only:minimize!,minim_mcmf
449 !      implicit real*8 (a-h,o-z)
450 !      include 'DIMENSIONS'
451 #ifdef MPI
452       use minimm, only:minim_mcmf
453       implicit none
454       include 'mpif.h'
455       integer :: ierror,ierr
456       real(kind=8) :: man
457       real(kind=8),dimension(mpi_status_size) :: muster
458 #else
459       implicit none
460 #endif
461       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
462       integer,dimension(6) :: ind
463       real(kind=8) :: energy_(0:n_ene)
464       logical :: eof
465       real(kind=8) :: etot,ene0
466       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
467          nf_mcmf,j
468       real(kind=8) :: rms,frac,frac_nn,co,time,ene
469
470       eof=.false.
471 #ifdef MPI
472       if(me.ne.king) then
473         call minim_mcmf
474         return
475       endif
476
477       close (intin)
478       open(intin,file=intinname,status='old')
479       write (istat,'(a5,20a12)')"#    ",&
480         (wname(print_order(i)),i=1,nprint_ene)
481       if (refstr) then
482         write (istat,'(a5,20a12)')"#    ",&
483          (ename(print_order(i)),i=1,nprint_ene),&
484          "ETOT total","RMSD","nat.contact","nnt.contact"        
485       else
486         write (istat,'(a5,20a12)')"#    ",&
487           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
488       endif
489
490       if (.not.minim) then
491         do while (.not. eof)
492           if (read_cart) then
493             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
494             call read_x(intin,*11)
495 #ifdef MPI
496 ! Broadcast the order to compute internal coordinates to the slaves.
497             if (nfgtasks.gt.1) &
498               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
499 #endif
500             call int_from_cart1(.false.)
501           else
502             read (intin,'(i5)',end=1100,err=1100) iconf
503             call read_angles(intin,*11)
504             call geom_to_var(nvar,varia)
505             call chainbuild
506           endif
507           write (iout,'(a,i7)') 'Conformation #',iconf
508           call etotal(energy_)
509           call briefout(iconf,energy_(0))
510           call enerprint(energy_)
511           etot=energy_(0)
512           if (refstr) then 
513             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
514             write (istat,'(i5,20(f12.3))') iconf,&
515             (energy_(print_order(i)),i=1,nprint_ene),etot,&
516              rms,frac,frac_nn,co
517 !jlee end
518           else
519             write (istat,'(i5,16(f12.3))') iconf,&
520            (energy_(print_order(i)),i=1,nprint_ene),etot
521           endif
522         enddo
523 1100    continue
524         goto 1101
525       endif
526
527       mm=0
528       imm=0
529       nft=0
530       ene0=0.0d0
531       n=0
532       iconf=0
533 !      do n=1,nzsc
534       do while (.not. eof)
535         mm=mm+1
536         if (mm.lt.nodes) then
537           if (read_cart) then
538             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
539             call read_x(intin,*11)
540 #ifdef MPI
541 ! Broadcast the order to compute internal coordinates to the slaves.
542             if (nfgtasks.gt.1) &
543               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
544 #endif
545             call int_from_cart1(.false.)
546           else
547             read (intin,'(i5)',end=11,err=11) iconf
548             call read_angles(intin,*11)
549             call geom_to_var(nvar,varia)
550             call chainbuild
551           endif
552           write (iout,'(a,i7)') 'Conformation #',iconf
553           n=n+1
554          imm=imm+1
555          ind(1)=1
556          ind(2)=n
557          ind(3)=0
558          ind(4)=0
559          ind(5)=0
560          ind(6)=0
561          ene0=0.0d0
562          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
563                         ierr)
564          call mpi_send(varia,nvar,mpi_double_precision,mm,&
565                         idreal,CG_COMM,ierr)
566          call mpi_send(ene0,1,mpi_double_precision,mm,&
567                         idreal,CG_COMM,ierr)
568 !         print *,'task ',n,' sent to worker ',mm,nvar
569         else
570          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
571                        CG_COMM,muster,ierr)
572          man=muster(mpi_source)
573 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
574          call mpi_recv(varia,nvar,mpi_double_precision,&
575                      man,idreal,CG_COMM,muster,ierr)
576          call mpi_recv(ene,1,&
577                      mpi_double_precision,man,idreal,&
578                      CG_COMM,muster,ierr)
579          call mpi_recv(ene0,1,&
580                      mpi_double_precision,man,idreal,&
581                      CG_COMM,muster,ierr)
582 !         print *,'result received from worker ',man,' sending now'
583
584           call var_to_geom(nvar,varia)
585           call chainbuild
586           call etotal(energy_)
587           iconf=ind(2)
588           write (iout,*)
589           write (iout,*)
590           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
591
592           etot=energy_(0)
593           call enerprint(energy_)
594           call briefout(it,etot)
595 !          if (minim) call briefout(it,etot)
596           if (refstr) then 
597             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
598             write (istat,'(i5,19(f12.3))') iconf,&
599            (energy_(print_order(i)),i=1,nprint_ene),etot,&
600            rms,frac,frac_nn,co
601           else
602             write (istat,'(i5,15(f12.3))') iconf,&
603            (energy_(print_order(i)),i=1,nprint_ene),etot
604           endif
605
606           imm=imm-1
607           if (read_cart) then
608             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
609             call read_x(intin,*11)
610 #ifdef MPI
611 ! Broadcast the order to compute internal coordinates to the slaves.
612             if (nfgtasks.gt.1) &
613               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
614 #endif
615             call int_from_cart1(.false.)
616           else
617             read (intin,'(i5)',end=1101,err=1101) iconf
618             call read_angles(intin,*11)
619             call geom_to_var(nvar,varia)
620             call chainbuild
621           endif
622           n=n+1
623           imm=imm+1
624           ind(1)=1
625           ind(2)=n
626           ind(3)=0
627           ind(4)=0
628           ind(5)=0
629           ind(6)=0
630           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
631                         ierr)
632           call mpi_send(varia,nvar,mpi_double_precision,man,&
633                         idreal,CG_COMM,ierr)
634           call mpi_send(ene0,1,mpi_double_precision,man,&
635                         idreal,CG_COMM,ierr)
636           nf_mcmf=nf_mcmf+ind(4)
637           nmin=nmin+1
638         endif
639       enddo
640 11    continue
641       do j=1,imm
642         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
643                      CG_COMM,muster,ierr)
644         man=muster(mpi_source)
645         call mpi_recv(varia,nvar,mpi_double_precision,&
646                      man,idreal,CG_COMM,muster,ierr)
647         call mpi_recv(ene,1,&
648                      mpi_double_precision,man,idreal,&
649                      CG_COMM,muster,ierr)
650         call mpi_recv(ene0,1,&
651                      mpi_double_precision,man,idreal,&
652                      CG_COMM,muster,ierr)
653
654         call var_to_geom(nvar,varia)
655         call chainbuild
656         call etotal(energy_)
657         iconf=ind(2)
658         write (iout,*)
659         write (iout,*)
660         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
661
662         etot=energy_(0)
663         call enerprint(energy_)
664         call briefout(it,etot)
665         if (refstr) then 
666           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
667           write (istat,'(i5,19(f12.3))') iconf,&
668          (energy_(print_order(i)),i=1,nprint_ene),etot,&
669          rms,frac,frac_nn,co
670         else
671           write (istat,'(i5,15(f12.3))') iconf,&
672           (energy_(print_order(i)),i=1,nprint_ene),etot
673         endif
674         nmin=nmin+1
675       enddo
676 1101  continue
677       do i=1, nodes-1
678          ind(1)=0
679          ind(2)=0
680          ind(3)=0
681          ind(4)=0
682          ind(5)=0
683          ind(6)=0
684          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
685                         ierr)
686       enddo
687 #else
688       close (intin)
689       open(intin,file=intinname,status='old')
690       write (istat,'(a5,20a12)')"#    ",&
691          (wname(print_order(i)),i=1,nprint_ene)
692       write (istat,'("#    ",20(1pe12.4))') &
693          (weights(print_order(i)),i=1,nprint_ene)
694       if (refstr) then
695         write (istat,'(a5,20a12)')"#    ",&
696          (ename(print_order(i)),i=1,nprint_ene),&
697          "ETOT total","RMSD","nat.contact","nnt.contact"
698       else
699         write (istat,'(a5,14a12)')"#    ",&
700          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
701       endif
702       do while (.not. eof)
703           if (read_cart) then
704             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
705             call read_x(intin,*11)
706 #ifdef MPI
707 ! Broadcast the order to compute internal coordinates to the slaves.
708             if (nfgtasks.gt.1) &
709               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
710 #endif
711             call int_from_cart1(.false.)
712           else
713             read (intin,'(i5)',end=11,err=11) iconf
714             call read_angles(intin,*11)
715             call geom_to_var(nvar,varia)
716             call chainbuild
717           endif
718         write (iout,'(a,i7)') 'Conformation #',iconf
719         if (minim) call minimize(etot,varia,iretcode,nfun)
720         call etotal(energy_)
721
722         etot=energy_(0)
723         call enerprint(energy_)
724         if (minim) call briefout(it,etot) 
725         if (refstr) then 
726           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
727           write (istat,'(i5,18(f12.3))') iconf,&
728          (energy_(print_order(i)),i=1,nprint_ene),&
729          etot,rms,frac,frac_nn,co
730 !jlee end
731         else
732           write (istat,'(i5,14(f12.3))') iconf,&
733          (energy_(print_order(i)),i=1,nprint_ene),etot
734         endif
735       enddo
736    11 continue
737 #endif
738       return
739       end subroutine exec_mult_eeval_or_minim
740 !-----------------------------------------------------------------------------
741       subroutine exec_checkgrad
742 !      use MPI_data             !include 'COMMON.SETUP'
743       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
744       use io_units      !include 'COMMON.IOUNITS'
745 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
746       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
747 !      use REMD         !include 'COMMON.REMD'
748       use MD_data               !include 'COMMON.MD'
749       use io_base, only:intout
750       use io_config, only:read_fragments
751       use geometry
752       use energy
753       use comm_srutu
754       implicit none
755 !      implicit real*8 (a-h,o-z)
756 !      include 'DIMENSIONS'
757 #ifdef MPI
758       include 'mpif.h'
759 #endif
760 !el      integer :: icall
761 !el      common /srutu/ icall
762       real(kind=8) :: energy_(0:max_ene)
763       real(kind=8) :: etot
764       integer :: i
765 !      do i=2,nres
766 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
767 !        if (itype(i).ne.10) 
768 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
769 !      enddo
770       if (indpdb.eq.0) call chainbuild
771 !      do i=0,nres
772 !        do j=1,3
773 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
774 !        enddo
775 !      enddo
776 !      do i=1,nres-1
777 !        if (itype(i).ne.10) then
778 !          do j=1,3
779 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
780 !          enddo
781 !        endif
782 !      enddo
783 !      do j=1,3
784 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
785 !      enddo
786       usampl=.true.
787       totT=1.d0
788       eq_time=0.0d0
789       call read_fragments
790       call chainbuild_cart
791       call cartprint
792       call intout
793       icall=1
794       call etotal(energy_(0))
795       etot = energy_(0)
796       call enerprint(energy_(0))
797       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
798       print *,'icheckgrad=',icheckgrad
799       goto (10,20,30) icheckgrad
800   10  call check_ecartint
801       return
802   20  call check_cartgrad
803       return
804   30  call check_eint
805       return
806       end subroutine exec_checkgrad
807 !-----------------------------------------------------------------------------
808       subroutine exec_map
809       use map_
810       use io_config, only:map_read
811       implicit none
812 ! Energy maps
813       call alloc_map_arrays
814       call map_read
815       call map
816       return
817       end subroutine exec_map
818 !-----------------------------------------------------------------------------
819       subroutine exec_CSA
820
821       use io_units      !include 'COMMON.IOUNITS'
822       use CSA
823       implicit none
824 #ifdef MPI
825       include "mpif.h"
826 #endif
827 !      include 'DIMENSIONS'
828 ! Conformational Space Annealling programmed by Jooyoung Lee.
829 ! This method works only with parallel machines!
830 #ifdef MPI
831       call alloc_CSA_arrays
832       call together
833 #else
834       write (iout,*) "CSA works on parallel machines only"
835 #endif
836       return
837       end subroutine exec_CSA
838 !-----------------------------------------------------------------------------
839       subroutine exec_softreg
840       use io_units      !include 'COMMON.IOUNITS'
841       use control_data  !include 'COMMON.CONTROL'
842       use energy_data
843       use io_base, only:intout,briefout
844       use geometry, only:chainbuild
845       use energy
846       use compare
847       implicit none
848 !      include 'DIMENSIONS'
849       real(kind=8) :: energy_(0:n_ene)
850 !el local variables
851       real(kind=8) :: rms,frac,frac_nn,co,etot
852       logical :: debug
853
854       call alloc_compare_arrays
855       call chainbuild
856       call etotal(energy_)
857       call enerprint(energy_)
858       if (.not.lsecondary) then
859         write(iout,*) 'Calling secondary structure recognition'
860         call secondary2(debug)
861       else
862         write(iout,*) 'Using secondary structure supplied in pdb'
863       endif
864
865       call softreg
866
867       call etotal(energy_)
868       etot=energy_(0)
869       call enerprint(energy_)
870       call intout
871       call briefout(0,etot)
872       call secondary2(.true.)
873       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
874       return
875       end subroutine exec_softreg
876 !-----------------------------------------------------------------------------
877 ! minimize_p.F
878 !-----------------------------------------------------------------------------
879 !el#ifdef MPI
880       subroutine ergastulum
881
882 !      implicit real*8 (a-h,o-z)
883 !      include 'DIMENSIONS'
884       use MD_data
885       use energy
886       use MDyn, only:setup_fricmat
887       use REMD, only:fricmat_mult,ginv_mult
888 #ifdef MPI
889       include "mpif.h"
890 #endif
891 !      include 'COMMON.SETUP'
892 !      include 'COMMON.DERIV'
893 !      include 'COMMON.VAR'
894 !      include 'COMMON.IOUNITS'
895 !      include 'COMMON.FFIELD'
896 !      include 'COMMON.INTERACT'
897 !      include 'COMMON.MD'
898 !      include 'COMMON.TIME1'
899       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
900       real(kind=8) :: edum(0:n_ene),time_order(0:10)
901 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
902 !el      common /przechowalnia/ Gcopy
903       integer :: icall = 0
904
905 !el  local variables
906       real(kind=8) :: time00
907       integer :: iorder,i,j,nres2,ierr,ierror
908       nres2=2*nres
909       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
910 ! common.MD
911       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
912 !      common /mdpmpi/
913       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
914       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
915       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
916       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
917
918       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
919
920 ! Workers wait for variables and NF, and NFL from the boss
921       iorder=0
922       do while (iorder.ge.0)
923 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
924 !     & ' receives order from Master'
925         time00=MPI_Wtime()
926         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
927         time_Bcast=time_Bcast+MPI_Wtime()-time00
928         if (icall.gt.4 .and. iorder.ge.0) &
929          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
930        icall=icall+1
931 !      write (*,*)
932 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
933         if (iorder.eq.0) then
934           call zerograd
935           call etotal(edum)
936 !          write (2,*) "After etotal"
937 !          write (2,*) "dimen",dimen," dimen3",dimen3
938 !          call flush(2)
939         else if (iorder.eq.2) then
940           call zerograd
941           call etotal_short(edum)
942 !          write (2,*) "After etotal_short"
943 !          write (2,*) "dimen",dimen," dimen3",dimen3
944 !          call flush(2)
945         else if (iorder.eq.3) then
946           call zerograd
947           call etotal_long(edum)
948 !          write (2,*) "After etotal_long"
949 !          write (2,*) "dimen",dimen," dimen3",dimen3
950 !          call flush(2)
951         else if (iorder.eq.1) then
952           call sum_gradient
953 !          write (2,*) "After sum_gradient"
954 !          write (2,*) "dimen",dimen," dimen3",dimen3
955 !          call flush(2)
956         else if (iorder.eq.4) then
957           call ginv_mult(z,d_a_tmp)
958         else if (iorder.eq.5) then
959 ! Setup MD things for a slave
960           dimen=(nct-nnt+1)+nside
961           dimen1=(nct-nnt)+(nct-nnt+1)
962           dimen3=dimen*3
963 !          write (2,*) "dimen",dimen," dimen3",dimen3
964 !          call flush(2)
965           call int_bounds(dimen,igmult_start,igmult_end)
966           igmult_start=igmult_start-1
967           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
968            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
969            my_ng_count=igmult_end-igmult_start
970           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
971            MPI_INTEGER,FG_COMM,IERROR)
972           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
973 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
974           myginv_ng_count=nres2*my_ng_count !el maxres2
975 !          write (2,*) "igmult_start",igmult_start," igmult_end",
976 !     &     igmult_end," my_ng_count",my_ng_count
977 !          call flush(2)
978           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
979            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
980           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
981            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
982 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
983 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
984 !          call flush(2)
985 !          call MPI_Barrier(FG_COMM,IERROR)
986           time00=MPI_Wtime()
987           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
988            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
989            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
990 #ifdef TIMING
991           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
992 #endif
993           do i=1,dimen
994             do j=1,2*my_ng_count
995               ginv(j,i)=gcopy(i,j)
996             enddo
997           enddo
998 !          write (2,*) "dimen",dimen," dimen3",dimen3
999 !          write (2,*) "End MD setup"
1000 !          call flush(2)
1001 !           write (iout,*) "My chunk of ginv_block"
1002 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1003         else if (iorder.eq.6) then
1004           call int_from_cart1(.false.)
1005         else if (iorder.eq.7) then
1006           call chainbuild_cart
1007         else if (iorder.eq.8) then
1008           call intcartderiv
1009         else if (iorder.eq.9) then
1010 write(iout,*) "przed fricmat_mult"
1011           call fricmat_mult(z,d_a_tmp)
1012 write(iout,*) "po fricmat_mult"
1013         else if (iorder.eq.10) then
1014 write(iout,*) "przed setup_fricmat"
1015           call setup_fricmat
1016 write(iout,*) "o setup_fricmat"
1017         endif
1018       enddo
1019       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1020         ' absolute rank',myrank,' leves ERGASTULUM.'
1021       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1022        (' order[',i,']',time_order(i),i=0,10)
1023       return
1024       end subroutine ergastulum