Adding MARTINI
[unres4.git] / source / unres / unres.F90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80       write (iout,*) "After readrtns"
81       call cartprint
82 !
83       if (me.eq.king .or. .not. out1file) then
84        write (iout,'(2a/)') &
85        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
86        ' calculation.' 
87        if (minim) write (iout,'(a)') &
88        'Conformations will be energy-minimized.'
89        write (iout,'(80(1h*)/)') 
90       endif
91       call flush(iout)
92 !
93       if (modecalc.eq.-2) then
94         call test
95         stop
96       else if (modecalc.eq.-1) then
97         write(iout,*) "call check_sc_map next"
98         call check_bond
99         stop
100       endif
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
102
103 #ifdef MPI
104       if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106         call ergastulum ! slave workhouse in Latin
107       else
108 #endif
109       if (modecalc.eq.0) then
110         call exec_eeval_or_minim
111       else if (modecalc.eq.1) then
112         call exec_regularize
113       else if (modecalc.eq.2) then
114         call exec_thread
115       else if (modecalc.eq.3 .or. modecalc .eq.6) then
116         call exec_MC
117       else if (modecalc.eq.4) then
118         call exec_mult_eeval_or_minim
119       else if (modecalc.eq.5) then
120         call exec_checkgrad
121       else if (ModeCalc.eq.7) then
122         call exec_map
123       else if (ModeCalc.eq.8) then
124         call exec_CSA
125       else if (modecalc.eq.11) then
126         call exec_softreg
127       else if (modecalc.eq.12) then
128         call exec_MD
129         call exec_checkgrad
130       else if (modecalc.eq.14) then
131         call exec_MREMD
132       else
133         write (iout,'(a)') 'This calculation type is not supported',&
134          ModeCalc
135       endif
136
137 #ifdef MPI
138       endif
139 ! Finish task.
140       if (fg_rank.eq.0) call finish_task
141 !      call memmon_print_usage()
142 #ifdef TIMING
143        call print_detailed_timing
144 #endif
145       call MPI_Finalize(ierr)
146       stop 'Bye Bye...'
147 #else
148       call dajczas(tcpu(),hrtime,mintime,sectime)
149       stop '********** Program terminated normally.'
150 #endif
151
152       end program !UNRES
153 !-----------------------------------------------------------------------------
154 !
155 !-----------------------------------------------------------------------------
156       subroutine exec_MD
157       use MPI_data      !include 'COMMON.SETUP'
158       use control_data  !include 'COMMON.CONTROL'
159       use geometry, only:chainbuild,chainbuild_cart
160       use MDyn
161       use io_units      !include 'COMMON.IOUNITS'
162       use compare, only:alloc_compare_arrays
163 !      use io_common
164       implicit none
165 !      include 'DIMENSIONS'
166 #ifdef MPI
167       include "mpif.h"
168 #endif
169       print *,'Start MD'
170       call alloc_MD_arrays
171       call alloc_compare_arrays
172       print *,'After MD alloc'
173       if (me.eq.king .or. .not. out1file) &
174          write (iout,*) "Calling chainbuild"
175       if (extconf) then
176       call chainbuild
177       else
178       call chainbuild_cart
179       endif
180       call MD
181       return
182       end subroutine exec_MD
183 !---------------------------------------------------------------------------
184       subroutine exec_MREMD
185       use MPI_data      !include 'COMMON.SETUP'
186       use control_data  !include 'COMMON.CONTROL'
187       use io_units      !include 'COMMON.IOUNITS'
188 !      use io_common
189       use REMD_data     !include 'COMMON.REMD'
190       use geometry, only:chainbuild
191       use MREMDyn
192       use compare, only:alloc_compare_arrays
193
194       implicit none
195 !      include 'DIMENSIONS'
196 #ifdef MPI
197       include "mpif.h"
198 #endif
199
200       integer :: i
201       call alloc_MD_arrays
202       call alloc_MREMD_arrays
203       call alloc_compare_arrays
204 !     if (me.eq.king .or. .not. out1file) &
205 !         write (iout,*) "Calling chainbuild"
206 !      call chainbuild
207       if (me.eq.king .or. .not. out1file) &
208          write (iout,*) "Calling REMD",remd_mlist,nrep
209       if (remd_mlist) then 
210         call MREMD
211       else
212         do i=1,nrep
213           remd_m(i)=1
214         enddo
215         call MREMD
216       endif
217       return
218       end subroutine exec_MREMD
219 !-----------------------------------------------------------------------------
220       subroutine exec_eeval_or_minim
221       use MPI_data              !include 'COMMON.SETUP'
222       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
223       use io_units      !include 'COMMON.IOUNITS'
224       use names
225 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
226       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
227 !      use REMD         !include 'COMMON.REMD'
228 !      use MD           !include 'COMMON.MD'
229
230       use energy_data
231
232       use io_base
233       use geometry, only:chainbuild
234       use energy
235       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
236       use minimm, only:minimize,minim_dc,sc_move
237       use compare_data !el
238       use comm_srutu
239       implicit none
240 !      implicit real*8 (a-h,o-z)
241 !      include 'DIMENSIONS'
242 #ifdef MPI
243       include 'mpif.h'
244 #endif
245       integer :: i
246 !el      common /srutu/ icall
247       real(kind=8) :: energy_(0:n_ene)
248       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
249       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
250       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
251       integer :: nharp,nft_sc,iretcode,nfun
252       integer,dimension(4,nres/3) :: iharp      !(4,nres/3)(4,maxres/3)
253       logical :: fail
254       real(kind=8) :: rms,frac,frac_nn,co
255
256       integer :: j,k
257       call alloc_compare_arrays
258       if ((indpdb.eq.0).and.(.not.read_cart)) then 
259       call chainbuild
260       write(iout,*) 'Warning: Calling chainbuild'
261       endif
262 #ifdef MPI
263       time00=MPI_Wtime()
264 #endif
265       call chainbuild_cart
266 !      write(iout,*)"in exec_eeval or minimim",split_ene
267 !      do j=1,2*nres+2
268 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
269 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
270 !      enddo
271       if (split_ene) then
272 !      write(iout,*)"in exec_eeval or minimim"
273
274        print *,"Processor",myrank," after chainbuild"
275        icall=1
276
277        call etotal_long(energy_long)
278        write (iout,*) "Printing long range energy"
279        call enerprint(energy_long)
280
281        call etotal_short(energy_short)
282        write (iout,*) "Printing short range energy"
283        call enerprint(energy_short)
284        do i=0,n_ene
285          energy_(i)=energy_long(i)+energy_short(i)
286          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
287        enddo
288        write (iout,*) "Printing long+short range energy"
289        call enerprint(energy_)
290       endif
291
292       call etotal(energy_)
293 #ifdef MPI
294       time_ene=MPI_Wtime()-time00
295 #endif
296       write (iout,*) "Time for energy evaluation",time_ene
297       print *,"after etotal"
298       etota = energy_(0)
299       etot = etota
300       call enerprint(energy_)
301       call hairpin(.true.,nharp,iharp)
302       call secondary2(.true.)
303       if (minim) then
304 !rc overlap test
305         if (overlapsc) then 
306           print *, 'Calling OVERLAP_SC'
307           call overlap_sc(fail)
308         endif 
309
310         if (searchsc) then 
311           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
312           print *,'SC_move',nft_sc,etot
313           write(iout,*) 'SC_move',nft_sc,etot
314         endif 
315
316         if (dccart) then
317           print *, 'Calling MINIM_DC'
318 #ifdef MPI
319           time1=MPI_WTIME()
320 #endif
321 !    call check_ecartint !el
322           call minim_dc(etot,iretcode,nfun)
323 !    call check_ecartint !el
324         else 
325           if (indpdb.ne.0) then 
326             call bond_regular
327             write(iout,*) 'Warning: Calling chainbuild'
328             call chainbuild
329           endif
330           call geom_to_var(nvar,varia)
331           print *,'Calling MINIMIZE.'
332 #ifdef MPI
333           time1=MPI_WTIME()
334 #endif
335 !    call check_eint
336 !        call exec_checkgrad   !el
337           call minimize(etot,varia,iretcode,nfun)
338 !     call check_eint
339 !        call exec_checkgrad   !el
340         endif
341         print *,'SUMSL return code is',iretcode,' eval ',nfun
342 #ifdef MPI
343         evals=nfun/(MPI_WTIME()-time1)
344 #endif
345         print *,'# eval/s',evals
346         print *,'refstr=',refstr
347 !        call hairpin(.true.,nharp,iharp)
348 !        call secondary2(.true.)
349         call etotal(energy_)
350         etot = energy_(0)
351         call enerprint(energy_)
352
353         call intout
354         call briefout(0,etot)
355         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
356           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
357           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
358           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
359       else
360         print *,'refstr=',refstr,frac,frac_nn,co
361         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
362         print *,"after rms_nac_ncc"
363         call briefout(0,etot)
364       endif
365       if (outpdb) call pdbout(etot,titel(:32),ipdb)
366       if (outmol2) call mol2out(etot,titel(:32))
367       write(iout,*) "after exec_eeval_or_minim"
368       return
369       end subroutine exec_eeval_or_minim
370 !-----------------------------------------------------------------------------
371       subroutine exec_regularize
372 !      use MPI_data             !include 'COMMON.SETUP'
373       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
374       use io_units      !include 'COMMON.IOUNITS'
375       use names
376       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
377       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
378 !     use REMD          !include 'COMMON.REMD'
379 !     use MD            !include 'COMMON.MD'
380       use regularize_
381       use compare
382       implicit none
383 !      implicit real*8 (a-h,o-z)
384 !      include 'DIMENSIONS'
385 #ifdef MPI
386       include 'mpif.h'
387 #endif
388       real(kind=8) :: energy_(0:n_ene)
389       real(kind=8) :: etot
390       real(kind=8) :: rms,frac,frac_nn,co
391       integer :: iretcode
392
393       call alloc_compare_arrays
394       call gen_dist_constr
395       call sc_conf
396       call intout
397       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
398       call etotal(energy_)
399       energy_(0)=energy_(0)-energy_(14)
400       etot=energy_(0)
401       call enerprint(energy_)
402       call intout
403       call briefout(0,etot)
404       if (outpdb) call pdbout(etot,titel(:32),ipdb)
405       if (outmol2) call mol2out(etot,titel(:32))
406       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
407       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
408       return
409       end subroutine exec_regularize
410 !-----------------------------------------------------------------------------
411       subroutine exec_thread
412 !      use MPI_data             !include 'COMMON.SETUP'
413       use compare
414       implicit none
415 !      include 'DIMENSIONS'
416 #ifdef MP
417       include "mpif.h"
418 #endif
419       call alloc_compare_arrays
420       call thread_seq
421       return
422       end subroutine exec_thread
423 !-----------------------------------------------------------------------------
424       subroutine exec_MC
425 !      use MPI_data             !include 'COMMON.SETUP'
426       use control_data  !include 'COMMON.CONTROL'
427       use geometry_data
428       use energy_data
429       use mcm_md
430       implicit none
431 !      implicit real*8 (a-h,o-z)
432 !      include 'DIMENSIONS'
433       character(len=10) :: nodeinfo
434       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
435       integer :: ipar
436 #ifdef MPI
437       include "mpif.h"
438 #endif
439       call alloc_MCM_arrays
440       call mcm_setup
441       if (minim) then
442 #ifdef MPI
443         if (modecalc.eq.3) then
444           call do_mcm(ipar)
445         else
446           call entmcm
447         endif
448 #else
449         if (modecalc.eq.3) then
450           call do_mcm(ipar)
451         else
452           call entmcm
453         endif
454 #endif
455       else
456         call monte_carlo
457       endif
458       return
459       end subroutine exec_MC
460 !-----------------------------------------------------------------------------
461       subroutine exec_mult_eeval_or_minim
462       use MPI_data              !include 'COMMON.SETUP'
463       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
464       use io_units      !include 'COMMON.IOUNITS'
465       use names
466       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
467       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
468 !      use REMD         !include 'COMMON.REMD'
469 !      use MD           !include 'COMMON.MD'
470       use io_base
471       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
472       use energy, only:etotal,enerprint
473       use compare, only:rms_nac_nnc
474       use minimm, only:minimize!,minim_mcmf
475 !      implicit real*8 (a-h,o-z)
476 !      include 'DIMENSIONS'
477 #ifdef MPI
478       use minimm, only:minim_mcmf
479       implicit none
480       include 'mpif.h'
481       integer :: ierror,ierr
482       real(kind=8) :: man
483       real(kind=8),dimension(mpi_status_size) :: muster
484 #else
485       implicit none
486 #endif
487       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
488       integer,dimension(6) :: ind
489       real(kind=8) :: energy_(0:n_ene)
490       logical :: eof
491       real(kind=8) :: etot,ene0
492       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
493          nf_mcmf,j
494       real(kind=8) :: rms,frac,frac_nn,co,time,ene
495
496       eof=.false.
497 #ifdef MPI
498       if(me.ne.king) then
499         call minim_mcmf
500         return
501       endif
502
503       close (intin)
504       open(intin,file=intinname,status='old')
505       write (istat,'(a5,20a12)')"#    ",&
506         (wname(print_order(i)),i=1,nprint_ene)
507       if (refstr) then
508         write (istat,'(a5,20a12)')"#    ",&
509          (ename(print_order(i)),i=1,nprint_ene),&
510          "ETOT total","RMSD","nat.contact","nnt.contact"        
511       else
512         write (istat,'(a5,20a12)')"#    ",&
513           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
514       endif
515
516       if (.not.minim) then
517         do while (.not. eof)
518           if (read_cart) then
519             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
520             call read_x(intin,*11)
521 #ifdef MPI
522 ! Broadcast the order to compute internal coordinates to the slaves.
523             if (nfgtasks.gt.1) &
524               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
525 #endif
526             call int_from_cart1(.false.)
527           else
528             read (intin,'(i5)',end=1100,err=1100) iconf
529             call read_angles(intin,*11)
530             call geom_to_var(nvar,varia)
531             write(iout,*) 'Warning: Calling chainbuild1'
532             call chainbuild
533           endif
534           write (iout,'(a,i7)') 'Conformation #',iconf
535           call etotal(energy_)
536           call briefout(iconf,energy_(0))
537           call enerprint(energy_)
538           etot=energy_(0)
539           if (refstr) then 
540             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
541             write (istat,'(i5,20(f12.3))') iconf,&
542             (energy_(print_order(i)),i=1,nprint_ene),etot,&
543              rms,frac,frac_nn,co
544 !jlee end
545           else
546             write (istat,'(i5,16(f12.3))') iconf,&
547            (energy_(print_order(i)),i=1,nprint_ene),etot
548           endif
549         enddo
550 1100    continue
551         goto 1101
552       endif
553
554       mm=0
555       imm=0
556       nft=0
557       ene0=0.0d0
558       n=0
559       iconf=0
560 !      do n=1,nzsc
561       do while (.not. eof)
562         mm=mm+1
563         if (mm.lt.nodes) then
564           if (read_cart) then
565             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
566             call read_x(intin,*11)
567 #ifdef MPI
568 ! Broadcast the order to compute internal coordinates to the slaves.
569             if (nfgtasks.gt.1) &
570               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
571 #endif
572             call int_from_cart1(.false.)
573           else
574             read (intin,'(i5)',end=11,err=11) iconf
575             call read_angles(intin,*11)
576             call geom_to_var(nvar,varia)
577             write(iout,*) 'Warning: Calling chainbuild2'
578             call chainbuild
579           endif
580           write (iout,'(a,i7)') 'Conformation #',iconf
581           n=n+1
582          imm=imm+1
583          ind(1)=1
584          ind(2)=n
585          ind(3)=0
586          ind(4)=0
587          ind(5)=0
588          ind(6)=0
589          ene0=0.0d0
590          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
591                         ierr)
592          call mpi_send(varia,nvar,mpi_double_precision,mm,&
593                         idreal,CG_COMM,ierr)
594          call mpi_send(ene0,1,mpi_double_precision,mm,&
595                         idreal,CG_COMM,ierr)
596 !         print *,'task ',n,' sent to worker ',mm,nvar
597         else
598          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
599                        CG_COMM,muster,ierr)
600          man=muster(mpi_source)
601 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
602          call mpi_recv(varia,nvar,mpi_double_precision,&
603                      man,idreal,CG_COMM,muster,ierr)
604          call mpi_recv(ene,1,&
605                      mpi_double_precision,man,idreal,&
606                      CG_COMM,muster,ierr)
607          call mpi_recv(ene0,1,&
608                      mpi_double_precision,man,idreal,&
609                      CG_COMM,muster,ierr)
610 !         print *,'result received from worker ',man,' sending now'
611
612           call var_to_geom(nvar,varia)
613           write(iout,*) 'Warning: Calling chainbuild3'
614           call chainbuild
615           call etotal(energy_)
616           iconf=ind(2)
617           write (iout,*)
618           write (iout,*)
619           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
620
621           etot=energy_(0)
622           call enerprint(energy_)
623           call briefout(it,etot)
624 !          if (minim) call briefout(it,etot)
625           if (refstr) then 
626             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
627             write (istat,'(i5,19(f12.3))') iconf,&
628            (energy_(print_order(i)),i=1,nprint_ene),etot,&
629            rms,frac,frac_nn,co
630           else
631             write (istat,'(i5,15(f12.3))') iconf,&
632            (energy_(print_order(i)),i=1,nprint_ene),etot
633           endif
634
635           imm=imm-1
636           if (read_cart) then
637             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
638             call read_x(intin,*11)
639 #ifdef MPI
640 ! Broadcast the order to compute internal coordinates to the slaves.
641             if (nfgtasks.gt.1) &
642               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
643 #endif
644             call int_from_cart1(.false.)
645           else
646             read (intin,'(i5)',end=1101,err=1101) iconf
647             call read_angles(intin,*11)
648             call geom_to_var(nvar,varia)
649             write(iout,*) 'Warning: Calling chainbuild4'
650             call chainbuild
651           endif
652           n=n+1
653           imm=imm+1
654           ind(1)=1
655           ind(2)=n
656           ind(3)=0
657           ind(4)=0
658           ind(5)=0
659           ind(6)=0
660           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
661                         ierr)
662           call mpi_send(varia,nvar,mpi_double_precision,man,&
663                         idreal,CG_COMM,ierr)
664           call mpi_send(ene0,1,mpi_double_precision,man,&
665                         idreal,CG_COMM,ierr)
666           nf_mcmf=nf_mcmf+ind(4)
667           nmin=nmin+1
668         endif
669       enddo
670 11    continue
671       do j=1,imm
672         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
673                      CG_COMM,muster,ierr)
674         man=muster(mpi_source)
675         call mpi_recv(varia,nvar,mpi_double_precision,&
676                      man,idreal,CG_COMM,muster,ierr)
677         call mpi_recv(ene,1,&
678                      mpi_double_precision,man,idreal,&
679                      CG_COMM,muster,ierr)
680         call mpi_recv(ene0,1,&
681                      mpi_double_precision,man,idreal,&
682                      CG_COMM,muster,ierr)
683
684         call var_to_geom(nvar,varia)
685         write(iout,*) 'Warning: Calling chainbuild5'
686         call chainbuild
687         call etotal(energy_)
688         iconf=ind(2)
689         write (iout,*)
690         write (iout,*)
691         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
692
693         etot=energy_(0)
694         call enerprint(energy_)
695         call briefout(it,etot)
696         if (refstr) then 
697           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
698           write (istat,'(i5,19(f12.3))') iconf,&
699          (energy_(print_order(i)),i=1,nprint_ene),etot,&
700          rms,frac,frac_nn,co
701         else
702           write (istat,'(i5,15(f12.3))') iconf,&
703           (energy_(print_order(i)),i=1,nprint_ene),etot
704         endif
705         nmin=nmin+1
706       enddo
707 1101  continue
708       do i=1, nodes-1
709          ind(1)=0
710          ind(2)=0
711          ind(3)=0
712          ind(4)=0
713          ind(5)=0
714          ind(6)=0
715          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
716                         ierr)
717       enddo
718 #else
719       close (intin)
720       open(intin,file=intinname,status='old')
721       write (istat,'(a5,20a12)')"#    ",&
722          (wname(print_order(i)),i=1,nprint_ene)
723       write (istat,'("#    ",20(1pe12.4))') &
724          (weights(print_order(i)),i=1,nprint_ene)
725       if (refstr) then
726         write (istat,'(a5,20a12)')"#    ",&
727          (ename(print_order(i)),i=1,nprint_ene),&
728          "ETOT total","RMSD","nat.contact","nnt.contact"
729       else
730         write (istat,'(a5,14a12)')"#    ",&
731          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
732       endif
733       do while (.not. eof)
734           if (read_cart) then
735             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
736             call read_x(intin,*11)
737 #ifdef MPI
738 ! Broadcast the order to compute internal coordinates to the slaves.
739             if (nfgtasks.gt.1) &
740               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
741 #endif
742             call int_from_cart1(.false.)
743           else
744             read (intin,'(i5)',end=11,err=11) iconf
745             call read_angles(intin,*11)
746             call geom_to_var(nvar,varia)
747             write(iout,*) 'Warning: Calling chainbuild5'
748             call chainbuild
749           endif
750         write (iout,'(a,i7)') 'Conformation #',iconf
751         if (minim) call minimize(etot,varia,iretcode,nfun)
752         call etotal(energy_)
753
754         etot=energy_(0)
755         call enerprint(energy_)
756         if (minim) call briefout(it,etot) 
757         if (refstr) then 
758           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
759           write (istat,'(i5,18(f12.3))') iconf,&
760          (energy_(print_order(i)),i=1,nprint_ene),&
761          etot,rms,frac,frac_nn,co
762 !jlee end
763         else
764           write (istat,'(i5,14(f12.3))') iconf,&
765          (energy_(print_order(i)),i=1,nprint_ene),etot
766         endif
767       enddo
768    11 continue
769 #endif
770       return
771       end subroutine exec_mult_eeval_or_minim
772 !-----------------------------------------------------------------------------
773       subroutine exec_checkgrad
774 !      use MPI_data             !include 'COMMON.SETUP'
775       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
776       use io_units      !include 'COMMON.IOUNITS'
777 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
778       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
779 !      use REMD         !include 'COMMON.REMD'
780       use MD_data               !include 'COMMON.MD'
781       use io_base, only:intout
782       use io_config, only:read_fragments
783       use geometry
784       use energy
785       use comm_srutu
786       implicit none
787 !      implicit real*8 (a-h,o-z)
788 !      include 'DIMENSIONS'
789 #ifdef MPI
790       include 'mpif.h'
791 #endif
792 !el      integer :: icall
793 !el      common /srutu/ icall
794       real(kind=8) :: energy_(0:max_ene)
795       real(kind=8) :: etot
796       integer :: i
797 !      do i=2,nres
798 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
799 !        if (itype(i).ne.10) 
800 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
801 !      enddo
802       if (indpdb.eq.0) then 
803         write(iout,*) 'Warning: Calling chainbuild'
804         call chainbuild
805       endif
806 !      do i=0,nres
807 !        do j=1,3
808 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
809 !        enddo
810 !      enddo
811 !      do i=1,nres-1
812 !        if (itype(i).ne.10) then
813 !          do j=1,3
814 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
815 !          enddo
816 !        endif
817 !      enddo
818 !      do j=1,3
819 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
820 !      enddo
821       usampl=.true.
822       totT=1.d0
823       eq_time=0.0d0
824       call read_fragments
825       call chainbuild_cart
826       call cartprint
827       call intout
828       icall=1
829       write (iout,*) "before etotal"
830       call etotal(energy_(0))
831       etot = energy_(0)
832       call enerprint(energy_(0))
833       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
834       print *,'icheckgrad=',icheckgrad
835       goto (10,20,30) icheckgrad
836   10  call check_ecartint
837       call check_ecartint
838       return
839   20  call check_cartgrad
840       call check_cartgrad
841       return
842   30  call check_eint
843       return
844       end subroutine exec_checkgrad
845 !-----------------------------------------------------------------------------
846       subroutine exec_map
847 !      use map_data
848       use map_
849       use io_config, only:map_read
850       implicit none
851 ! Energy maps
852       call alloc_map_arrays
853       call map_read
854       call map
855       return
856       end subroutine exec_map
857 !-----------------------------------------------------------------------------
858       subroutine exec_CSA
859
860       use io_units      !include 'COMMON.IOUNITS'
861       use CSA
862       implicit none
863 #ifdef MPI
864       include "mpif.h"
865 #endif
866 !      include 'DIMENSIONS'
867 ! Conformational Space Annealling programmed by Jooyoung Lee.
868 ! This method works only with parallel machines!
869 #ifdef MPI
870       call alloc_CSA_arrays
871       call together
872 #else
873       write (iout,*) "CSA works on parallel machines only"
874 #endif
875       return
876       end subroutine exec_CSA
877 !-----------------------------------------------------------------------------
878       subroutine exec_softreg
879       use io_units      !include 'COMMON.IOUNITS'
880       use control_data  !include 'COMMON.CONTROL'
881       use energy_data
882       use io_base, only:intout,briefout
883       use geometry, only:chainbuild
884       use energy
885       use compare
886       implicit none
887 !      include 'DIMENSIONS'
888       real(kind=8) :: energy_(0:n_ene)
889 !el local variables
890       real(kind=8) :: rms,frac,frac_nn,co,etot
891       logical :: debug
892
893       call alloc_compare_arrays
894       write(iout,*) 'Warning: Calling chainbuild'
895       call chainbuild
896       call etotal(energy_)
897       call enerprint(energy_)
898       if (.not.lsecondary) then
899         write(iout,*) 'Calling secondary structure recognition'
900         call secondary2(debug)
901       else
902         write(iout,*) 'Using secondary structure supplied in pdb'
903       endif
904
905       call softreg
906
907       call etotal(energy_)
908       etot=energy_(0)
909       call enerprint(energy_)
910       call intout
911       call briefout(0,etot)
912       call secondary2(.true.)
913       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
914       return
915       end subroutine exec_softreg
916 !-----------------------------------------------------------------------------
917 ! minimize_p.F
918 !-----------------------------------------------------------------------------
919 !el#ifdef MPI
920       subroutine ergastulum
921
922 !      implicit real*8 (a-h,o-z)
923 !      include 'DIMENSIONS'
924       use MD_data
925       use energy
926       use MDyn, only:setup_fricmat
927       use REMD, only:fricmat_mult,ginv_mult
928 #ifdef MPI
929       include "mpif.h"
930 #endif
931 !      include 'COMMON.SETUP'
932 !      include 'COMMON.DERIV'
933 !      include 'COMMON.VAR'
934 !      include 'COMMON.IOUNITS'
935 !      include 'COMMON.FFIELD'
936 !      include 'COMMON.INTERACT'
937 !      include 'COMMON.MD'
938 !      include 'COMMON.TIME1'
939       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
940       real(kind=8) :: edum(0:n_ene),time_order(0:10)
941 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
942 !el      common /przechowalnia/ Gcopy
943       integer :: icall = 0
944
945 !el  local variables
946       real(kind=8) :: time00
947       integer :: iorder,i,j,nres2,ierr,ierror
948       nres2=2*nres
949 #ifndef FIVEDIAG
950       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
951 ! common.MD
952       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
953 #endif
954 !      common /mdpmpi/
955       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
956       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
957       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
958       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
959 #ifndef FIVEDIAG
960       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
961 #endif
962 ! Workers wait for variables and NF, and NFL from the boss
963       iorder=0
964       do while (iorder.ge.0)
965 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
966 !     & ' receives order from Master'
967         time00=MPI_Wtime()
968         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
969         time_Bcast=time_Bcast+MPI_Wtime()-time00
970         if (icall.gt.4 .and. iorder.ge.0) &
971          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
972        icall=icall+1
973 !      write (*,*)
974 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
975         if (iorder.eq.0) then
976           call zerograd
977           call etotal(edum)
978 !          write (2,*) "After etotal"
979 !          write (2,*) "dimen",dimen," dimen3",dimen3
980 !          call flush(2)
981         else if (iorder.eq.2) then
982           call zerograd
983           call etotal_short(edum)
984 !          write (2,*) "After etotal_short"
985 !          write (2,*) "dimen",dimen," dimen3",dimen3
986 !          call flush(2)
987         else if (iorder.eq.3) then
988           call zerograd
989           call etotal_long(edum)
990 !          write (2,*) "After etotal_long"
991 !          write (2,*) "dimen",dimen," dimen3",dimen3
992 !          call flush(2)
993         else if (iorder.eq.1) then
994           call sum_gradient
995 !          write (2,*) "After sum_gradient"
996 !          write (2,*) "dimen",dimen," dimen3",dimen3
997 !          call flush(2)
998         else if (iorder.eq.4) then
999           call ginv_mult(z,d_a_tmp)
1000         else if (iorder.eq.5) then
1001 ! Setup MD things for a slave
1002           dimen=(nct-nnt+1)+nside
1003           dimen1=(nct-nnt)+(nct-nnt+1)
1004           dimen3=dimen*3
1005 !          write (2,*) "dimen",dimen," dimen3",dimen3
1006 !          call flush(2)
1007           call int_bounds(dimen,igmult_start,igmult_end)
1008           igmult_start=igmult_start-1
1009           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1010            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1011            my_ng_count=igmult_end-igmult_start
1012           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1013            MPI_INTEGER,FG_COMM,IERROR)
1014           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1015 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1016           myginv_ng_count=nres2*my_ng_count !el maxres2
1017 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1018 !     &     igmult_end," my_ng_count",my_ng_count
1019 !          call flush(2)
1020           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1021            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1022           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1023            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1024 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1025 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1026 !          call flush(2)
1027 !          call MPI_Barrier(FG_COMM,IERROR)
1028           time00=MPI_Wtime()
1029           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1030            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1031            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1032 #ifdef TIMING
1033           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1034 #endif
1035           do i=1,dimen
1036             do j=1,2*my_ng_count
1037               ginv(j,i)=gcopy(i,j)
1038             enddo
1039           enddo
1040 !          write (2,*) "dimen",dimen," dimen3",dimen3
1041 !          write (2,*) "End MD setup"
1042 !          call flush(2)
1043 !           write (iout,*) "My chunk of ginv_block"
1044 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1045         else if (iorder.eq.6) then
1046           call int_from_cart1(.false.)
1047         else if (iorder.eq.7) then
1048           call chainbuild_cart
1049         else if (iorder.eq.8) then
1050           call intcartderiv
1051         else if (iorder.eq.9) then
1052           call fricmat_mult(z,d_a_tmp)
1053         else if (iorder.eq.10) then
1054           call setup_fricmat
1055         endif
1056       enddo
1057       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1058         ' absolute rank',myrank,' leves ERGASTULUM.'
1059       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1060        (' order[',i,']',time_order(i),i=0,10)
1061       return
1062       end subroutine ergastulum