51ffe34cee4228c3ea74e7bbb8d054f5426335db
[unres4.git] / source / unres / unres.F90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80       write (iout,*) "After readrtns"
81       call cartprint
82 !
83       if (me.eq.king .or. .not. out1file) then
84        write (iout,'(2a/)') &
85        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
86        ' calculation.' 
87        if (minim) write (iout,'(a)') &
88        'Conformations will be energy-minimized.'
89        write (iout,'(80(1h*)/)') 
90       endif
91       call flush(iout)
92 !
93       if (modecalc.eq.-2) then
94         call test
95         stop
96       else if (modecalc.eq.-1) then
97         write(iout,*) "call check_sc_map next"
98         call check_bond
99         stop
100       endif
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
102
103 #ifdef MPI
104       if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106         call ergastulum ! slave workhouse in Latin
107       else
108 #endif
109       if (modecalc.eq.0) then
110         call exec_eeval_or_minim
111       else if (modecalc.eq.1) then
112         call exec_regularize
113       else if (modecalc.eq.2) then
114         call exec_thread
115       else if (modecalc.eq.3 .or. modecalc .eq.6) then
116         call exec_MC
117       else if (modecalc.eq.4) then
118         call exec_mult_eeval_or_minim
119       else if (modecalc.eq.5) then
120         call exec_checkgrad
121       else if (ModeCalc.eq.7) then
122         call exec_map
123       else if (ModeCalc.eq.8) then
124         call exec_CSA
125       else if (modecalc.eq.11) then
126         call exec_softreg
127       else if (modecalc.eq.12) then
128         call exec_MD
129       else if (modecalc.eq.14) then
130         call exec_MREMD
131       else
132         write (iout,'(a)') 'This calculation type is not supported',&
133          ModeCalc
134       endif
135
136 #ifdef MPI
137       endif
138 ! Finish task.
139       if (fg_rank.eq.0) call finish_task
140 !      call memmon_print_usage()
141 #ifdef TIMING
142        call print_detailed_timing
143 #endif
144       call MPI_Finalize(ierr)
145       stop 'Bye Bye...'
146 #else
147       call dajczas(tcpu(),hrtime,mintime,sectime)
148       stop '********** Program terminated normally.'
149 #endif
150
151       end program !UNRES
152 !-----------------------------------------------------------------------------
153 !
154 !-----------------------------------------------------------------------------
155       subroutine exec_MD
156       use MPI_data      !include 'COMMON.SETUP'
157       use control_data  !include 'COMMON.CONTROL'
158       use geometry, only:chainbuild
159       use MDyn
160       use io_units      !include 'COMMON.IOUNITS'
161 !      use io_common
162       implicit none
163 !      include 'DIMENSIONS'
164 #ifdef MPI
165       include "mpif.h"
166 #endif
167       print *,'Start MD'
168       call alloc_MD_arrays
169       print *,'After MD alloc'
170       if (me.eq.king .or. .not. out1file) &
171          write (iout,*) "Calling chainbuild"
172       call chainbuild
173       call MD
174       return
175       end subroutine exec_MD
176 !---------------------------------------------------------------------------
177       subroutine exec_MREMD
178       use MPI_data      !include 'COMMON.SETUP'
179       use control_data  !include 'COMMON.CONTROL'
180       use io_units      !include 'COMMON.IOUNITS'
181 !      use io_common
182       use REMD_data     !include 'COMMON.REMD'
183       use geometry, only:chainbuild
184       use MREMDyn
185
186       implicit none
187 !      include 'DIMENSIONS'
188 #ifdef MPI
189       include "mpif.h"
190 #endif
191
192       integer :: i
193       call alloc_MD_arrays
194       call alloc_MREMD_arrays
195
196 !     if (me.eq.king .or. .not. out1file) &
197 !         write (iout,*) "Calling chainbuild"
198 !      call chainbuild
199       if (me.eq.king .or. .not. out1file) &
200          write (iout,*) "Calling REMD",remd_mlist,nrep
201       if (remd_mlist) then 
202         call MREMD
203       else
204         do i=1,nrep
205           remd_m(i)=1
206         enddo
207         call MREMD
208       endif
209       return
210       end subroutine exec_MREMD
211 !-----------------------------------------------------------------------------
212       subroutine exec_eeval_or_minim
213       use MPI_data              !include 'COMMON.SETUP'
214       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
215       use io_units      !include 'COMMON.IOUNITS'
216       use names
217 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
218       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
219 !      use REMD         !include 'COMMON.REMD'
220 !      use MD           !include 'COMMON.MD'
221
222       use energy_data
223       use MD_data, only: iset
224       use io_base
225       use geometry, only:chainbuild
226       use energy
227       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
228       use minimm, only:minimize,minim_dc,sc_move
229       use compare_data !el
230       use comm_srutu
231       implicit none
232 !      implicit real*8 (a-h,o-z)
233 !      include 'DIMENSIONS'
234 #ifdef MPI
235       include 'mpif.h'
236 #endif
237       integer :: i
238 !el      common /srutu/ icall
239       real(kind=8) :: energy_(0:n_ene)
240       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
241       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
242       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
243       integer :: nharp,nft_sc,iretcode,nfun
244       integer,dimension(4,nres) :: iharp        !(4,nres/3)(4,maxres/3)
245       logical :: fail
246       real(kind=8) :: rms,frac,frac_nn,co
247
248       integer :: j,k
249        if (iset.eq.0) iset=1
250       call alloc_compare_arrays
251       if ((indpdb.eq.0).and.(.not.read_cart)) then 
252       call chainbuild
253       write(iout,*) 'Warning: Calling chainbuild'
254       endif
255 #ifdef MPI
256       time00=MPI_Wtime()
257 #endif
258       call chainbuild_cart
259 !      write(iout,*)"in exec_eeval or minimim",split_ene
260 !      do j=1,2*nres+2
261 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
262 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
263 !      enddo
264       if (split_ene) then
265 !      write(iout,*)"in exec_eeval or minimim"
266
267        print *,"Processor",myrank," after chainbuild"
268        icall=1
269
270        call etotal_long(energy_long)
271        write (iout,*) "Printing long range energy"
272        call enerprint(energy_long)
273
274        call etotal_short(energy_short)
275        write (iout,*) "Printing short range energy"
276        call enerprint(energy_short)
277        do i=0,n_ene
278          energy_(i)=energy_long(i)+energy_short(i)
279          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
280        enddo
281        write (iout,*) "Printing long+short range energy"
282        call enerprint(energy_)
283       endif
284
285       call etotal(energy_)
286 #ifdef MPI
287       time_ene=MPI_Wtime()-time00
288 #endif
289       write (iout,*) "Time for energy evaluation",time_ene
290       print *,"after etotal"
291       etota = energy_(0)
292       etot = etota
293       call enerprint(energy_)
294       call hairpin(.true.,nharp,iharp)
295       call secondary2(.true.)
296       if (minim) then
297 !rc overlap test
298         if (overlapsc) then 
299           print *, 'Calling OVERLAP_SC'
300           call overlap_sc(fail)
301         endif 
302
303         if (searchsc) then 
304           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
305           print *,'SC_move',nft_sc,etot
306           write(iout,*) 'SC_move',nft_sc,etot
307         endif 
308
309         if (dccart) then
310           print *, 'Calling MINIM_DC'
311 #ifdef MPI
312           time1=MPI_WTIME()
313 #endif
314 !    call check_ecartint !el
315           call minim_dc(etot,iretcode,nfun)
316 !    call check_ecartint !el
317         else 
318           if (indpdb.ne.0) then 
319             call bond_regular
320             write(iout,*) 'Warning: Calling chainbuild'
321             call chainbuild
322           endif
323           call geom_to_var(nvar,varia)
324           print *,'Calling MINIMIZE.'
325 #ifdef MPI
326           time1=MPI_WTIME()
327 #endif
328 !    call check_eint
329 !        call exec_checkgrad   !el
330           call minimize(etot,varia,iretcode,nfun)
331 !     call check_eint
332 !        call exec_checkgrad   !el
333         endif
334         print *,'SUMSL return code is',iretcode,' eval ',nfun
335 #ifdef MPI
336         evals=nfun/(MPI_WTIME()-time1)
337 #endif
338         print *,'# eval/s',evals
339         print *,'refstr=',refstr
340 !        call hairpin(.true.,nharp,iharp)
341 !        call secondary2(.true.)
342         call etotal(energy_)
343         etot = energy_(0)
344         call enerprint(energy_)
345
346         call intout
347         call briefout(0,etot)
348         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
349           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
350           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
351           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
352       else
353         print *,'refstr=',refstr,frac,frac_nn,co
354         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
355         print *,"after rms_nac_ncc"
356         call briefout(0,etot)
357       endif
358       if (outpdb) call pdbout(etot,titel(:32),ipdb)
359       if (outmol2) call mol2out(etot,titel(:32))
360       write(iout,*) "after exec_eeval_or_minim"
361       return
362       end subroutine exec_eeval_or_minim
363 !-----------------------------------------------------------------------------
364       subroutine exec_regularize
365 !      use MPI_data             !include 'COMMON.SETUP'
366       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
367       use io_units      !include 'COMMON.IOUNITS'
368       use names
369       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
370       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
371 !     use REMD          !include 'COMMON.REMD'
372 !     use MD            !include 'COMMON.MD'
373       use regularize_
374       use compare
375       implicit none
376 !      implicit real*8 (a-h,o-z)
377 !      include 'DIMENSIONS'
378 #ifdef MPI
379       include 'mpif.h'
380 #endif
381       real(kind=8) :: energy_(0:n_ene)
382       real(kind=8) :: etot
383       real(kind=8) :: rms,frac,frac_nn,co
384       integer :: iretcode
385
386       call alloc_compare_arrays
387       call gen_dist_constr
388       call sc_conf
389       call intout
390       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
391       call etotal(energy_)
392       energy_(0)=energy_(0)-energy_(14)
393       etot=energy_(0)
394       call enerprint(energy_)
395       call intout
396       call briefout(0,etot)
397       if (outpdb) call pdbout(etot,titel(:32),ipdb)
398       if (outmol2) call mol2out(etot,titel(:32))
399       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
400       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
401       return
402       end subroutine exec_regularize
403 !-----------------------------------------------------------------------------
404       subroutine exec_thread
405 !      use MPI_data             !include 'COMMON.SETUP'
406       use compare
407       implicit none
408 !      include 'DIMENSIONS'
409 #ifdef MP
410       include "mpif.h"
411 #endif
412       call alloc_compare_arrays
413       call thread_seq
414       return
415       end subroutine exec_thread
416 !-----------------------------------------------------------------------------
417       subroutine exec_MC
418 !      use MPI_data             !include 'COMMON.SETUP'
419       use control_data  !include 'COMMON.CONTROL'
420       use geometry_data
421       use energy_data
422       use mcm_md
423       implicit none
424 !      implicit real*8 (a-h,o-z)
425 !      include 'DIMENSIONS'
426       character(len=10) :: nodeinfo
427       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
428       integer :: ipar
429 #ifdef MPI
430       include "mpif.h"
431 #endif
432       call alloc_MCM_arrays
433       call mcm_setup
434       if (minim) then
435 #ifdef MPI
436         if (modecalc.eq.3) then
437           call do_mcm(ipar)
438         else
439           call entmcm
440         endif
441 #else
442         if (modecalc.eq.3) then
443           call do_mcm(ipar)
444         else
445           call entmcm
446         endif
447 #endif
448       else
449         call monte_carlo
450       endif
451       return
452       end subroutine exec_MC
453 !-----------------------------------------------------------------------------
454       subroutine exec_mult_eeval_or_minim
455       use MPI_data              !include 'COMMON.SETUP'
456       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
457       use io_units      !include 'COMMON.IOUNITS'
458       use names
459       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
460       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
461 !      use REMD         !include 'COMMON.REMD'
462 !      use MD           !include 'COMMON.MD'
463       use io_base
464       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
465       use energy, only:etotal,enerprint
466       use compare, only:rms_nac_nnc
467       use minimm, only:minimize!,minim_mcmf
468 !      implicit real*8 (a-h,o-z)
469 !      include 'DIMENSIONS'
470 #ifdef MPI
471       use minimm, only:minim_mcmf
472       implicit none
473       include 'mpif.h'
474       integer :: ierror,ierr
475       real(kind=8) :: man
476       real(kind=8),dimension(mpi_status_size) :: muster
477 #else
478       implicit none
479 #endif
480       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
481       integer,dimension(6) :: ind
482       real(kind=8) :: energy_(0:n_ene)
483       logical :: eof
484       real(kind=8) :: etot,ene0
485       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
486          nf_mcmf,j
487       real(kind=8) :: rms,frac,frac_nn,co,time,ene
488
489       eof=.false.
490 #ifdef MPI
491       if(me.ne.king) then
492         call minim_mcmf
493         return
494       endif
495
496       close (intin)
497       open(intin,file=intinname,status='old')
498       write (istat,'(a5,20a12)')"#    ",&
499         (wname(print_order(i)),i=1,nprint_ene)
500       if (refstr) then
501         write (istat,'(a5,20a12)')"#    ",&
502          (ename(print_order(i)),i=1,nprint_ene),&
503          "ETOT total","RMSD","nat.contact","nnt.contact"        
504       else
505         write (istat,'(a5,20a12)')"#    ",&
506           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
507       endif
508
509       if (.not.minim) then
510         do while (.not. eof)
511           if (read_cart) then
512             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
513             call read_x(intin,*11)
514 #ifdef MPI
515 ! Broadcast the order to compute internal coordinates to the slaves.
516             if (nfgtasks.gt.1) &
517               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
518 #endif
519             call int_from_cart1(.false.)
520           else
521             read (intin,'(i5)',end=1100,err=1100) iconf
522             call read_angles(intin,*11)
523             call geom_to_var(nvar,varia)
524             write(iout,*) 'Warning: Calling chainbuild1'
525             call chainbuild
526           endif
527           write (iout,'(a,i7)') 'Conformation #',iconf
528           call etotal(energy_)
529           call briefout(iconf,energy_(0))
530           call enerprint(energy_)
531           etot=energy_(0)
532           if (refstr) then 
533             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
534             write (istat,'(i5,20(f12.3))') iconf,&
535             (energy_(print_order(i)),i=1,nprint_ene),etot,&
536              rms,frac,frac_nn,co
537 !jlee end
538           else
539             write (istat,'(i5,16(f12.3))') iconf,&
540            (energy_(print_order(i)),i=1,nprint_ene),etot
541           endif
542         enddo
543 1100    continue
544         goto 1101
545       endif
546
547       mm=0
548       imm=0
549       nft=0
550       ene0=0.0d0
551       n=0
552       iconf=0
553 !      do n=1,nzsc
554       do while (.not. eof)
555         mm=mm+1
556         if (mm.lt.nodes) then
557           if (read_cart) then
558             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
559             call read_x(intin,*11)
560 #ifdef MPI
561 ! Broadcast the order to compute internal coordinates to the slaves.
562             if (nfgtasks.gt.1) &
563               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
564 #endif
565             call int_from_cart1(.false.)
566           else
567             read (intin,'(i5)',end=11,err=11) iconf
568             call read_angles(intin,*11)
569             call geom_to_var(nvar,varia)
570             write(iout,*) 'Warning: Calling chainbuild2'
571             call chainbuild
572           endif
573           write (iout,'(a,i7)') 'Conformation #',iconf
574           n=n+1
575          imm=imm+1
576          ind(1)=1
577          ind(2)=n
578          ind(3)=0
579          ind(4)=0
580          ind(5)=0
581          ind(6)=0
582          ene0=0.0d0
583          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
584                         ierr)
585          call mpi_send(varia,nvar,mpi_double_precision,mm,&
586                         idreal,CG_COMM,ierr)
587          call mpi_send(ene0,1,mpi_double_precision,mm,&
588                         idreal,CG_COMM,ierr)
589 !         print *,'task ',n,' sent to worker ',mm,nvar
590         else
591          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
592                        CG_COMM,muster,ierr)
593          man=muster(mpi_source)
594 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
595          call mpi_recv(varia,nvar,mpi_double_precision,&
596                      man,idreal,CG_COMM,muster,ierr)
597          call mpi_recv(ene,1,&
598                      mpi_double_precision,man,idreal,&
599                      CG_COMM,muster,ierr)
600          call mpi_recv(ene0,1,&
601                      mpi_double_precision,man,idreal,&
602                      CG_COMM,muster,ierr)
603 !         print *,'result received from worker ',man,' sending now'
604
605           call var_to_geom(nvar,varia)
606           write(iout,*) 'Warning: Calling chainbuild3'
607           call chainbuild
608           call etotal(energy_)
609           iconf=ind(2)
610           write (iout,*)
611           write (iout,*)
612           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
613
614           etot=energy_(0)
615           call enerprint(energy_)
616           call briefout(it,etot)
617 !          if (minim) call briefout(it,etot)
618           if (refstr) then 
619             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
620             write (istat,'(i5,19(f12.3))') iconf,&
621            (energy_(print_order(i)),i=1,nprint_ene),etot,&
622            rms,frac,frac_nn,co
623           else
624             write (istat,'(i5,15(f12.3))') iconf,&
625            (energy_(print_order(i)),i=1,nprint_ene),etot
626           endif
627
628           imm=imm-1
629           if (read_cart) then
630             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
631             call read_x(intin,*11)
632 #ifdef MPI
633 ! Broadcast the order to compute internal coordinates to the slaves.
634             if (nfgtasks.gt.1) &
635               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
636 #endif
637             call int_from_cart1(.false.)
638           else
639             read (intin,'(i5)',end=1101,err=1101) iconf
640             call read_angles(intin,*11)
641             call geom_to_var(nvar,varia)
642             write(iout,*) 'Warning: Calling chainbuild4'
643             call chainbuild
644           endif
645           n=n+1
646           imm=imm+1
647           ind(1)=1
648           ind(2)=n
649           ind(3)=0
650           ind(4)=0
651           ind(5)=0
652           ind(6)=0
653           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
654                         ierr)
655           call mpi_send(varia,nvar,mpi_double_precision,man,&
656                         idreal,CG_COMM,ierr)
657           call mpi_send(ene0,1,mpi_double_precision,man,&
658                         idreal,CG_COMM,ierr)
659           nf_mcmf=nf_mcmf+ind(4)
660           nmin=nmin+1
661         endif
662       enddo
663 11    continue
664       do j=1,imm
665         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
666                      CG_COMM,muster,ierr)
667         man=muster(mpi_source)
668         call mpi_recv(varia,nvar,mpi_double_precision,&
669                      man,idreal,CG_COMM,muster,ierr)
670         call mpi_recv(ene,1,&
671                      mpi_double_precision,man,idreal,&
672                      CG_COMM,muster,ierr)
673         call mpi_recv(ene0,1,&
674                      mpi_double_precision,man,idreal,&
675                      CG_COMM,muster,ierr)
676
677         call var_to_geom(nvar,varia)
678         write(iout,*) 'Warning: Calling chainbuild5'
679         call chainbuild
680         call etotal(energy_)
681         iconf=ind(2)
682         write (iout,*)
683         write (iout,*)
684         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
685
686         etot=energy_(0)
687         call enerprint(energy_)
688         call briefout(it,etot)
689         if (refstr) then 
690           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
691           write (istat,'(i5,19(f12.3))') iconf,&
692          (energy_(print_order(i)),i=1,nprint_ene),etot,&
693          rms,frac,frac_nn,co
694         else
695           write (istat,'(i5,15(f12.3))') iconf,&
696           (energy_(print_order(i)),i=1,nprint_ene),etot
697         endif
698         nmin=nmin+1
699       enddo
700 1101  continue
701       do i=1, nodes-1
702          ind(1)=0
703          ind(2)=0
704          ind(3)=0
705          ind(4)=0
706          ind(5)=0
707          ind(6)=0
708          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
709                         ierr)
710       enddo
711 #else
712       close (intin)
713       open(intin,file=intinname,status='old')
714       write (istat,'(a5,20a12)')"#    ",&
715          (wname(print_order(i)),i=1,nprint_ene)
716       write (istat,'("#    ",20(1pe12.4))') &
717          (weights(print_order(i)),i=1,nprint_ene)
718       if (refstr) then
719         write (istat,'(a5,20a12)')"#    ",&
720          (ename(print_order(i)),i=1,nprint_ene),&
721          "ETOT total","RMSD","nat.contact","nnt.contact"
722       else
723         write (istat,'(a5,14a12)')"#    ",&
724          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
725       endif
726       do while (.not. eof)
727           if (read_cart) then
728             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
729             call read_x(intin,*11)
730 #ifdef MPI
731 ! Broadcast the order to compute internal coordinates to the slaves.
732             if (nfgtasks.gt.1) &
733               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
734 #endif
735             call int_from_cart1(.false.)
736           else
737             read (intin,'(i5)',end=11,err=11) iconf
738             call read_angles(intin,*11)
739             call geom_to_var(nvar,varia)
740             write(iout,*) 'Warning: Calling chainbuild5'
741             call chainbuild
742           endif
743         write (iout,'(a,i7)') 'Conformation #',iconf
744         if (minim) call minimize(etot,varia,iretcode,nfun)
745         call etotal(energy_)
746
747         etot=energy_(0)
748         call enerprint(energy_)
749         if (minim) call briefout(it,etot) 
750         if (refstr) then 
751           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
752           write (istat,'(i5,18(f12.3))') iconf,&
753          (energy_(print_order(i)),i=1,nprint_ene),&
754          etot,rms,frac,frac_nn,co
755 !jlee end
756         else
757           write (istat,'(i5,14(f12.3))') iconf,&
758          (energy_(print_order(i)),i=1,nprint_ene),etot
759         endif
760       enddo
761    11 continue
762 #endif
763       return
764       end subroutine exec_mult_eeval_or_minim
765 !-----------------------------------------------------------------------------
766       subroutine exec_checkgrad
767 !      use MPI_data             !include 'COMMON.SETUP'
768       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
769       use io_units      !include 'COMMON.IOUNITS'
770 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
771       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
772 !      use REMD         !include 'COMMON.REMD'
773       use MD_data               !include 'COMMON.MD'
774       use io_base, only:intout
775       use io_config, only:read_fragments
776       use geometry
777       use energy
778       use comm_srutu
779       implicit none
780 !      implicit real*8 (a-h,o-z)
781 !      include 'DIMENSIONS'
782 #ifdef MPI
783       include 'mpif.h'
784 #endif
785 !el      integer :: icall
786 !el      common /srutu/ icall
787       real(kind=8) :: energy_(0:max_ene)
788       real(kind=8) :: etot
789       integer :: i
790 !      do i=2,nres
791 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
792 !        if (itype(i).ne.10) 
793 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
794 !      enddo
795       if (indpdb.eq.0) then 
796         write(iout,*) 'Warning: Calling chainbuild'
797         call chainbuild
798       endif
799 !      do i=0,nres
800 !        do j=1,3
801 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
802 !        enddo
803 !      enddo
804 !      do i=1,nres-1
805 !        if (itype(i).ne.10) then
806 !          do j=1,3
807 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
808 !          enddo
809 !        endif
810 !      enddo
811 !      do j=1,3
812 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
813 !      enddo
814 #ifdef UMB
815       usampl=.true.
816 #endif
817       totT=1.d0
818       eq_time=0.0d0
819       call read_fragments
820       call chainbuild_cart
821       call cartprint
822       call intout
823       icall=1
824       write (iout,*) "before etotal"
825       call etotal(energy_(0))
826       etot = energy_(0)
827       call enerprint(energy_(0))
828       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
829       print *,'icheckgrad=',icheckgrad
830       goto (10,20,30) icheckgrad
831   10  call check_ecartint
832       call check_ecartint
833       return
834   20  call check_cartgrad
835       call check_cartgrad
836       return
837   30  call check_eint
838       return
839       end subroutine exec_checkgrad
840 !-----------------------------------------------------------------------------
841       subroutine exec_map
842 !      use map_data
843       use map_
844       use io_config, only:map_read
845       implicit none
846 ! Energy maps
847       call alloc_map_arrays
848       call map_read
849       call map
850       return
851       end subroutine exec_map
852 !-----------------------------------------------------------------------------
853       subroutine exec_CSA
854
855       use io_units      !include 'COMMON.IOUNITS'
856       use CSA
857       implicit none
858 #ifdef MPI
859       include "mpif.h"
860 #endif
861 !      include 'DIMENSIONS'
862 ! Conformational Space Annealling programmed by Jooyoung Lee.
863 ! This method works only with parallel machines!
864 #ifdef MPI
865       call alloc_CSA_arrays
866       call together
867 #else
868       write (iout,*) "CSA works on parallel machines only"
869 #endif
870       return
871       end subroutine exec_CSA
872 !-----------------------------------------------------------------------------
873       subroutine exec_softreg
874       use io_units      !include 'COMMON.IOUNITS'
875       use control_data  !include 'COMMON.CONTROL'
876       use energy_data
877       use io_base, only:intout,briefout
878       use geometry, only:chainbuild
879       use energy
880       use compare
881       implicit none
882 !      include 'DIMENSIONS'
883       real(kind=8) :: energy_(0:n_ene)
884 !el local variables
885       real(kind=8) :: rms,frac,frac_nn,co,etot
886       logical :: debug
887
888       call alloc_compare_arrays
889       write(iout,*) 'Warning: Calling chainbuild'
890       call chainbuild
891       call etotal(energy_)
892       call enerprint(energy_)
893       if (.not.lsecondary) then
894         write(iout,*) 'Calling secondary structure recognition'
895         call secondary2(debug)
896       else
897         write(iout,*) 'Using secondary structure supplied in pdb'
898       endif
899
900       call softreg
901
902       call etotal(energy_)
903       etot=energy_(0)
904       call enerprint(energy_)
905       call intout
906       call briefout(0,etot)
907       call secondary2(.true.)
908       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
909       return
910       end subroutine exec_softreg
911 !-----------------------------------------------------------------------------
912 ! minimize_p.F
913 !-----------------------------------------------------------------------------
914 !el#ifdef MPI
915       subroutine ergastulum
916
917 !      implicit real*8 (a-h,o-z)
918 !      include 'DIMENSIONS'
919       use MD_data
920       use energy
921       use MDyn, only:setup_fricmat
922       use REMD, only:fricmat_mult,ginv_mult
923 #ifdef MPI
924       include "mpif.h"
925 #endif
926 !      include 'COMMON.SETUP'
927 !      include 'COMMON.DERIV'
928 !      include 'COMMON.VAR'
929 !      include 'COMMON.IOUNITS'
930 !      include 'COMMON.FFIELD'
931 !      include 'COMMON.INTERACT'
932 !      include 'COMMON.MD'
933 !      include 'COMMON.TIME1'
934       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
935       real(kind=8) :: edum(0:n_ene),time_order(0:10)
936 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
937 !el      common /przechowalnia/ Gcopy
938       integer :: icall = 0
939
940 !el  local variables
941       real(kind=8) :: time00
942       integer :: iorder,i,j,nres2,ierr,ierror
943       nres2=2*nres
944 #ifndef FIVEDIAG
945       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
946 ! common.MD
947       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
948 #endif
949 !      common /mdpmpi/
950       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
951       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
952       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
953       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
954 #ifndef FIVEDIAG
955       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
956 #endif
957 ! Workers wait for variables and NF, and NFL from the boss
958       iorder=0
959       do while (iorder.ge.0)
960 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
961 !     & ' receives order from Master'
962         time00=MPI_Wtime()
963         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
964         time_Bcast=time_Bcast+MPI_Wtime()-time00
965         if (icall.gt.4 .and. iorder.ge.0) &
966          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
967        icall=icall+1
968 !      write (*,*)
969 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
970         if (iorder.eq.0) then
971           call zerograd
972           call etotal(edum)
973 !          write (2,*) "After etotal"
974 !          write (2,*) "dimen",dimen," dimen3",dimen3
975 !          call flush(2)
976         else if (iorder.eq.2) then
977           call zerograd
978           call etotal_short(edum)
979 !          write (2,*) "After etotal_short"
980 !          write (2,*) "dimen",dimen," dimen3",dimen3
981 !          call flush(2)
982         else if (iorder.eq.3) then
983           call zerograd
984           call etotal_long(edum)
985 !          write (2,*) "After etotal_long"
986 !          write (2,*) "dimen",dimen," dimen3",dimen3
987 !          call flush(2)
988         else if (iorder.eq.1) then
989           call sum_gradient
990 !          write (2,*) "After sum_gradient"
991 !          write (2,*) "dimen",dimen," dimen3",dimen3
992 !          call flush(2)
993         else if (iorder.eq.4) then
994           call ginv_mult(z,d_a_tmp)
995         else if (iorder.eq.5) then
996 ! Setup MD things for a slave
997           dimen=(nct-nnt+1)+nside
998           dimen1=(nct-nnt)+(nct-nnt+1)
999           dimen3=dimen*3
1000 !          write (2,*) "dimen",dimen," dimen3",dimen3
1001 !          call flush(2)
1002           call int_bounds(dimen,igmult_start,igmult_end)
1003           igmult_start=igmult_start-1
1004           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1005            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1006            my_ng_count=igmult_end-igmult_start
1007           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1008            MPI_INTEGER,FG_COMM,IERROR)
1009           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1010 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1011           myginv_ng_count=nres2*my_ng_count !el maxres2
1012 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1013 !     &     igmult_end," my_ng_count",my_ng_count
1014 !          call flush(2)
1015           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1016            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1017           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1018            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1019 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1020 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1021 !          call flush(2)
1022 !          call MPI_Barrier(FG_COMM,IERROR)
1023           time00=MPI_Wtime()
1024           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1025            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1026            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1027 #ifdef TIMING
1028           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1029 #endif
1030           do i=1,dimen
1031             do j=1,2*my_ng_count
1032               ginv(j,i)=gcopy(i,j)
1033             enddo
1034           enddo
1035 !          write (2,*) "dimen",dimen," dimen3",dimen3
1036 !          write (2,*) "End MD setup"
1037 !          call flush(2)
1038 !           write (iout,*) "My chunk of ginv_block"
1039 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1040         else if (iorder.eq.6) then
1041           call int_from_cart1(.false.)
1042         else if (iorder.eq.7) then
1043           call chainbuild_cart
1044         else if (iorder.eq.8) then
1045           call intcartderiv
1046         else if (iorder.eq.9) then
1047           call fricmat_mult(z,d_a_tmp)
1048         else if (iorder.eq.10) then
1049           call setup_fricmat
1050         endif
1051       enddo
1052       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1053         ' absolute rank',myrank,' leves ERGASTULUM.'
1054       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1055        (' order[',i,']',time_order(i),i=0,10)
1056       return
1057       end subroutine ergastulum