criticall bug fix in energy
[unres4.git] / source / unres / unres.F90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80       write (iout,*) "After readrtns"
81       call cartprint
82 !
83       if (me.eq.king .or. .not. out1file) then
84        write (iout,'(2a/)') &
85        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
86        ' calculation.' 
87        if (minim) write (iout,'(a)') &
88        'Conformations will be energy-minimized.'
89        write (iout,'(80(1h*)/)') 
90       endif
91       call flush(iout)
92 !
93       if (modecalc.eq.-2) then
94         call test
95         stop
96       else if (modecalc.eq.-1) then
97         write(iout,*) "call check_sc_map next"
98         call check_bond
99         stop
100       endif
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
102
103 #ifdef MPI
104       if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106         call ergastulum ! slave workhouse in Latin
107       else
108 #endif
109       if (modecalc.eq.0) then
110         call exec_eeval_or_minim
111       else if (modecalc.eq.1) then
112         call exec_regularize
113       else if (modecalc.eq.2) then
114         call exec_thread
115       else if (modecalc.eq.3 .or. modecalc .eq.6) then
116         call exec_MC
117       else if (modecalc.eq.4) then
118         call exec_mult_eeval_or_minim
119       else if (modecalc.eq.5) then
120         call exec_checkgrad
121       else if (ModeCalc.eq.7) then
122         call exec_map
123       else if (ModeCalc.eq.8) then
124         call exec_CSA
125       else if (modecalc.eq.11) then
126         call exec_softreg
127       else if (modecalc.eq.12) then
128         call exec_MD
129       else if (modecalc.eq.14) then
130         call exec_MREMD
131       else
132         write (iout,'(a)') 'This calculation type is not supported',&
133          ModeCalc
134       endif
135
136 #ifdef MPI
137       endif
138 ! Finish task.
139       if (fg_rank.eq.0) call finish_task
140 !      call memmon_print_usage()
141 #ifdef TIMING
142        call print_detailed_timing
143 #endif
144       call MPI_Finalize(ierr)
145       stop 'Bye Bye...'
146 #else
147       call dajczas(tcpu(),hrtime,mintime,sectime)
148       stop '********** Program terminated normally.'
149 #endif
150
151       end program !UNRES
152 !-----------------------------------------------------------------------------
153 !
154 !-----------------------------------------------------------------------------
155       subroutine exec_MD
156       use MPI_data      !include 'COMMON.SETUP'
157       use control_data  !include 'COMMON.CONTROL'
158       use geometry, only:chainbuild
159       use MDyn
160       use io_units      !include 'COMMON.IOUNITS'
161 !      use io_common
162       implicit none
163 !      include 'DIMENSIONS'
164 #ifdef MPI
165       include "mpif.h"
166 #endif
167       print *,'Start MD'
168       call alloc_MD_arrays
169       print *,'After MD alloc'
170       if (me.eq.king .or. .not. out1file) &
171          write (iout,*) "Calling chainbuild"
172       call chainbuild
173       call MD
174       return
175       end subroutine exec_MD
176 !---------------------------------------------------------------------------
177       subroutine exec_MREMD
178       use MPI_data      !include 'COMMON.SETUP'
179       use control_data  !include 'COMMON.CONTROL'
180       use io_units      !include 'COMMON.IOUNITS'
181 !      use io_common
182       use REMD_data     !include 'COMMON.REMD'
183       use geometry, only:chainbuild
184       use MREMDyn
185
186       implicit none
187 !      include 'DIMENSIONS'
188 #ifdef MPI
189       include "mpif.h"
190 #endif
191
192       integer :: i
193       call alloc_MD_arrays
194       call alloc_MREMD_arrays
195
196 !     if (me.eq.king .or. .not. out1file) &
197 !         write (iout,*) "Calling chainbuild"
198 !      call chainbuild
199       if (me.eq.king .or. .not. out1file) &
200          write (iout,*) "Calling REMD",remd_mlist,nrep
201       if (remd_mlist) then 
202         call MREMD
203       else
204         do i=1,nrep
205           remd_m(i)=1
206         enddo
207         call MREMD
208       endif
209       return
210       end subroutine exec_MREMD
211 !-----------------------------------------------------------------------------
212       subroutine exec_eeval_or_minim
213       use MPI_data              !include 'COMMON.SETUP'
214       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
215       use io_units      !include 'COMMON.IOUNITS'
216       use names
217 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
218       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
219 !      use REMD         !include 'COMMON.REMD'
220 !      use MD           !include 'COMMON.MD'
221
222       use energy_data
223
224       use io_base
225       use geometry, only:chainbuild
226       use energy
227       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
228       use minimm, only:minimize,minim_dc,sc_move
229       use compare_data !el
230       use comm_srutu
231       implicit none
232 !      implicit real*8 (a-h,o-z)
233 !      include 'DIMENSIONS'
234 #ifdef MPI
235       include 'mpif.h'
236 #endif
237       integer :: i
238 !el      common /srutu/ icall
239       real(kind=8) :: energy_(0:n_ene)
240       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
241       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
242       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
243       integer :: nharp,nft_sc,iretcode,nfun
244       integer,dimension(4,nres/3) :: iharp      !(4,nres/3)(4,maxres/3)
245       logical :: fail
246       real(kind=8) :: rms,frac,frac_nn,co
247
248       integer :: j,k
249       call alloc_compare_arrays
250       if ((indpdb.eq.0).and.(.not.read_cart)) then 
251       call chainbuild
252       write(iout,*) 'Warning: Calling chainbuild'
253       endif
254 #ifdef MPI
255       time00=MPI_Wtime()
256 #endif
257       call chainbuild_cart
258 !      write(iout,*)"in exec_eeval or minimim",split_ene
259 !      do j=1,2*nres+2
260 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
261 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
262 !      enddo
263       if (split_ene) then
264 !      write(iout,*)"in exec_eeval or minimim"
265
266        print *,"Processor",myrank," after chainbuild"
267        icall=1
268
269        call etotal_long(energy_long)
270        write (iout,*) "Printing long range energy"
271        call enerprint(energy_long)
272
273        call etotal_short(energy_short)
274        write (iout,*) "Printing short range energy"
275        call enerprint(energy_short)
276        do i=0,n_ene
277          energy_(i)=energy_long(i)+energy_short(i)
278          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
279        enddo
280        write (iout,*) "Printing long+short range energy"
281        call enerprint(energy_)
282       endif
283
284       call etotal(energy_)
285 #ifdef MPI
286       time_ene=MPI_Wtime()-time00
287 #endif
288       write (iout,*) "Time for energy evaluation",time_ene
289       print *,"after etotal"
290       etota = energy_(0)
291       etot = etota
292       call enerprint(energy_)
293       call hairpin(.true.,nharp,iharp)
294       call secondary2(.true.)
295       if (minim) then
296 !rc overlap test
297         if (overlapsc) then 
298           print *, 'Calling OVERLAP_SC'
299           call overlap_sc(fail)
300         endif 
301
302         if (searchsc) then 
303           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
304           print *,'SC_move',nft_sc,etot
305           write(iout,*) 'SC_move',nft_sc,etot
306         endif 
307
308         if (dccart) then
309           print *, 'Calling MINIM_DC'
310 #ifdef MPI
311           time1=MPI_WTIME()
312 #endif
313 !    call check_ecartint !el
314           call minim_dc(etot,iretcode,nfun)
315 !    call check_ecartint !el
316         else 
317           if (indpdb.ne.0) then 
318             call bond_regular
319             write(iout,*) 'Warning: Calling chainbuild'
320             call chainbuild
321           endif
322           call geom_to_var(nvar,varia)
323           print *,'Calling MINIMIZE.'
324 #ifdef MPI
325           time1=MPI_WTIME()
326 #endif
327 !    call check_eint
328 !        call exec_checkgrad   !el
329           call minimize(etot,varia,iretcode,nfun)
330 !     call check_eint
331 !        call exec_checkgrad   !el
332         endif
333         print *,'SUMSL return code is',iretcode,' eval ',nfun
334 #ifdef MPI
335         evals=nfun/(MPI_WTIME()-time1)
336 #endif
337         print *,'# eval/s',evals
338         print *,'refstr=',refstr
339 !        call hairpin(.true.,nharp,iharp)
340 !        call secondary2(.true.)
341         call etotal(energy_)
342         etot = energy_(0)
343         call enerprint(energy_)
344
345         call intout
346         call briefout(0,etot)
347         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
348           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
349           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
350           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
351       else
352         print *,'refstr=',refstr,frac,frac_nn,co
353         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
354         print *,"after rms_nac_ncc"
355         call briefout(0,etot)
356       endif
357       if (outpdb) call pdbout(etot,titel(:32),ipdb)
358       if (outmol2) call mol2out(etot,titel(:32))
359       write(iout,*) "after exec_eeval_or_minim"
360       return
361       end subroutine exec_eeval_or_minim
362 !-----------------------------------------------------------------------------
363       subroutine exec_regularize
364 !      use MPI_data             !include 'COMMON.SETUP'
365       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
366       use io_units      !include 'COMMON.IOUNITS'
367       use names
368       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
369       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
370 !     use REMD          !include 'COMMON.REMD'
371 !     use MD            !include 'COMMON.MD'
372       use regularize_
373       use compare
374       implicit none
375 !      implicit real*8 (a-h,o-z)
376 !      include 'DIMENSIONS'
377 #ifdef MPI
378       include 'mpif.h'
379 #endif
380       real(kind=8) :: energy_(0:n_ene)
381       real(kind=8) :: etot
382       real(kind=8) :: rms,frac,frac_nn,co
383       integer :: iretcode
384
385       call alloc_compare_arrays
386       call gen_dist_constr
387       call sc_conf
388       call intout
389       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
390       call etotal(energy_)
391       energy_(0)=energy_(0)-energy_(14)
392       etot=energy_(0)
393       call enerprint(energy_)
394       call intout
395       call briefout(0,etot)
396       if (outpdb) call pdbout(etot,titel(:32),ipdb)
397       if (outmol2) call mol2out(etot,titel(:32))
398       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
399       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
400       return
401       end subroutine exec_regularize
402 !-----------------------------------------------------------------------------
403       subroutine exec_thread
404 !      use MPI_data             !include 'COMMON.SETUP'
405       use compare
406       implicit none
407 !      include 'DIMENSIONS'
408 #ifdef MP
409       include "mpif.h"
410 #endif
411       call alloc_compare_arrays
412       call thread_seq
413       return
414       end subroutine exec_thread
415 !-----------------------------------------------------------------------------
416       subroutine exec_MC
417 !      use MPI_data             !include 'COMMON.SETUP'
418       use control_data  !include 'COMMON.CONTROL'
419       use geometry_data
420       use energy_data
421       use mcm_md
422       implicit none
423 !      implicit real*8 (a-h,o-z)
424 !      include 'DIMENSIONS'
425       character(len=10) :: nodeinfo
426       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
427       integer :: ipar
428 #ifdef MPI
429       include "mpif.h"
430 #endif
431       call alloc_MCM_arrays
432       call mcm_setup
433       if (minim) then
434 #ifdef MPI
435         if (modecalc.eq.3) then
436           call do_mcm(ipar)
437         else
438           call entmcm
439         endif
440 #else
441         if (modecalc.eq.3) then
442           call do_mcm(ipar)
443         else
444           call entmcm
445         endif
446 #endif
447       else
448         call monte_carlo
449       endif
450       return
451       end subroutine exec_MC
452 !-----------------------------------------------------------------------------
453       subroutine exec_mult_eeval_or_minim
454       use MPI_data              !include 'COMMON.SETUP'
455       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
456       use io_units      !include 'COMMON.IOUNITS'
457       use names
458       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
459       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
460 !      use REMD         !include 'COMMON.REMD'
461 !      use MD           !include 'COMMON.MD'
462       use io_base
463       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
464       use energy, only:etotal,enerprint
465       use compare, only:rms_nac_nnc
466       use minimm, only:minimize!,minim_mcmf
467 !      implicit real*8 (a-h,o-z)
468 !      include 'DIMENSIONS'
469 #ifdef MPI
470       use minimm, only:minim_mcmf
471       implicit none
472       include 'mpif.h'
473       integer :: ierror,ierr
474       real(kind=8) :: man
475       real(kind=8),dimension(mpi_status_size) :: muster
476 #else
477       implicit none
478 #endif
479       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
480       integer,dimension(6) :: ind
481       real(kind=8) :: energy_(0:n_ene)
482       logical :: eof
483       real(kind=8) :: etot,ene0
484       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
485          nf_mcmf,j
486       real(kind=8) :: rms,frac,frac_nn,co,time,ene
487
488       eof=.false.
489 #ifdef MPI
490       if(me.ne.king) then
491         call minim_mcmf
492         return
493       endif
494
495       close (intin)
496       open(intin,file=intinname,status='old')
497       write (istat,'(a5,20a12)')"#    ",&
498         (wname(print_order(i)),i=1,nprint_ene)
499       if (refstr) then
500         write (istat,'(a5,20a12)')"#    ",&
501          (ename(print_order(i)),i=1,nprint_ene),&
502          "ETOT total","RMSD","nat.contact","nnt.contact"        
503       else
504         write (istat,'(a5,20a12)')"#    ",&
505           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
506       endif
507
508       if (.not.minim) then
509         do while (.not. eof)
510           if (read_cart) then
511             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
512             call read_x(intin,*11)
513 #ifdef MPI
514 ! Broadcast the order to compute internal coordinates to the slaves.
515             if (nfgtasks.gt.1) &
516               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
517 #endif
518             call int_from_cart1(.false.)
519           else
520             read (intin,'(i5)',end=1100,err=1100) iconf
521             call read_angles(intin,*11)
522             call geom_to_var(nvar,varia)
523             write(iout,*) 'Warning: Calling chainbuild1'
524             call chainbuild
525           endif
526           write (iout,'(a,i7)') 'Conformation #',iconf
527           call etotal(energy_)
528           call briefout(iconf,energy_(0))
529           call enerprint(energy_)
530           etot=energy_(0)
531           if (refstr) then 
532             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
533             write (istat,'(i5,20(f12.3))') iconf,&
534             (energy_(print_order(i)),i=1,nprint_ene),etot,&
535              rms,frac,frac_nn,co
536 !jlee end
537           else
538             write (istat,'(i5,16(f12.3))') iconf,&
539            (energy_(print_order(i)),i=1,nprint_ene),etot
540           endif
541         enddo
542 1100    continue
543         goto 1101
544       endif
545
546       mm=0
547       imm=0
548       nft=0
549       ene0=0.0d0
550       n=0
551       iconf=0
552 !      do n=1,nzsc
553       do while (.not. eof)
554         mm=mm+1
555         if (mm.lt.nodes) then
556           if (read_cart) then
557             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
558             call read_x(intin,*11)
559 #ifdef MPI
560 ! Broadcast the order to compute internal coordinates to the slaves.
561             if (nfgtasks.gt.1) &
562               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
563 #endif
564             call int_from_cart1(.false.)
565           else
566             read (intin,'(i5)',end=11,err=11) iconf
567             call read_angles(intin,*11)
568             call geom_to_var(nvar,varia)
569             write(iout,*) 'Warning: Calling chainbuild2'
570             call chainbuild
571           endif
572           write (iout,'(a,i7)') 'Conformation #',iconf
573           n=n+1
574          imm=imm+1
575          ind(1)=1
576          ind(2)=n
577          ind(3)=0
578          ind(4)=0
579          ind(5)=0
580          ind(6)=0
581          ene0=0.0d0
582          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
583                         ierr)
584          call mpi_send(varia,nvar,mpi_double_precision,mm,&
585                         idreal,CG_COMM,ierr)
586          call mpi_send(ene0,1,mpi_double_precision,mm,&
587                         idreal,CG_COMM,ierr)
588 !         print *,'task ',n,' sent to worker ',mm,nvar
589         else
590          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
591                        CG_COMM,muster,ierr)
592          man=muster(mpi_source)
593 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
594          call mpi_recv(varia,nvar,mpi_double_precision,&
595                      man,idreal,CG_COMM,muster,ierr)
596          call mpi_recv(ene,1,&
597                      mpi_double_precision,man,idreal,&
598                      CG_COMM,muster,ierr)
599          call mpi_recv(ene0,1,&
600                      mpi_double_precision,man,idreal,&
601                      CG_COMM,muster,ierr)
602 !         print *,'result received from worker ',man,' sending now'
603
604           call var_to_geom(nvar,varia)
605           write(iout,*) 'Warning: Calling chainbuild3'
606           call chainbuild
607           call etotal(energy_)
608           iconf=ind(2)
609           write (iout,*)
610           write (iout,*)
611           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
612
613           etot=energy_(0)
614           call enerprint(energy_)
615           call briefout(it,etot)
616 !          if (minim) call briefout(it,etot)
617           if (refstr) then 
618             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
619             write (istat,'(i5,19(f12.3))') iconf,&
620            (energy_(print_order(i)),i=1,nprint_ene),etot,&
621            rms,frac,frac_nn,co
622           else
623             write (istat,'(i5,15(f12.3))') iconf,&
624            (energy_(print_order(i)),i=1,nprint_ene),etot
625           endif
626
627           imm=imm-1
628           if (read_cart) then
629             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
630             call read_x(intin,*11)
631 #ifdef MPI
632 ! Broadcast the order to compute internal coordinates to the slaves.
633             if (nfgtasks.gt.1) &
634               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
635 #endif
636             call int_from_cart1(.false.)
637           else
638             read (intin,'(i5)',end=1101,err=1101) iconf
639             call read_angles(intin,*11)
640             call geom_to_var(nvar,varia)
641             write(iout,*) 'Warning: Calling chainbuild4'
642             call chainbuild
643           endif
644           n=n+1
645           imm=imm+1
646           ind(1)=1
647           ind(2)=n
648           ind(3)=0
649           ind(4)=0
650           ind(5)=0
651           ind(6)=0
652           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
653                         ierr)
654           call mpi_send(varia,nvar,mpi_double_precision,man,&
655                         idreal,CG_COMM,ierr)
656           call mpi_send(ene0,1,mpi_double_precision,man,&
657                         idreal,CG_COMM,ierr)
658           nf_mcmf=nf_mcmf+ind(4)
659           nmin=nmin+1
660         endif
661       enddo
662 11    continue
663       do j=1,imm
664         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
665                      CG_COMM,muster,ierr)
666         man=muster(mpi_source)
667         call mpi_recv(varia,nvar,mpi_double_precision,&
668                      man,idreal,CG_COMM,muster,ierr)
669         call mpi_recv(ene,1,&
670                      mpi_double_precision,man,idreal,&
671                      CG_COMM,muster,ierr)
672         call mpi_recv(ene0,1,&
673                      mpi_double_precision,man,idreal,&
674                      CG_COMM,muster,ierr)
675
676         call var_to_geom(nvar,varia)
677         write(iout,*) 'Warning: Calling chainbuild5'
678         call chainbuild
679         call etotal(energy_)
680         iconf=ind(2)
681         write (iout,*)
682         write (iout,*)
683         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
684
685         etot=energy_(0)
686         call enerprint(energy_)
687         call briefout(it,etot)
688         if (refstr) then 
689           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
690           write (istat,'(i5,19(f12.3))') iconf,&
691          (energy_(print_order(i)),i=1,nprint_ene),etot,&
692          rms,frac,frac_nn,co
693         else
694           write (istat,'(i5,15(f12.3))') iconf,&
695           (energy_(print_order(i)),i=1,nprint_ene),etot
696         endif
697         nmin=nmin+1
698       enddo
699 1101  continue
700       do i=1, nodes-1
701          ind(1)=0
702          ind(2)=0
703          ind(3)=0
704          ind(4)=0
705          ind(5)=0
706          ind(6)=0
707          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
708                         ierr)
709       enddo
710 #else
711       close (intin)
712       open(intin,file=intinname,status='old')
713       write (istat,'(a5,20a12)')"#    ",&
714          (wname(print_order(i)),i=1,nprint_ene)
715       write (istat,'("#    ",20(1pe12.4))') &
716          (weights(print_order(i)),i=1,nprint_ene)
717       if (refstr) then
718         write (istat,'(a5,20a12)')"#    ",&
719          (ename(print_order(i)),i=1,nprint_ene),&
720          "ETOT total","RMSD","nat.contact","nnt.contact"
721       else
722         write (istat,'(a5,14a12)')"#    ",&
723          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
724       endif
725       do while (.not. eof)
726           if (read_cart) then
727             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
728             call read_x(intin,*11)
729 #ifdef MPI
730 ! Broadcast the order to compute internal coordinates to the slaves.
731             if (nfgtasks.gt.1) &
732               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
733 #endif
734             call int_from_cart1(.false.)
735           else
736             read (intin,'(i5)',end=11,err=11) iconf
737             call read_angles(intin,*11)
738             call geom_to_var(nvar,varia)
739             write(iout,*) 'Warning: Calling chainbuild5'
740             call chainbuild
741           endif
742         write (iout,'(a,i7)') 'Conformation #',iconf
743         if (minim) call minimize(etot,varia,iretcode,nfun)
744         call etotal(energy_)
745
746         etot=energy_(0)
747         call enerprint(energy_)
748         if (minim) call briefout(it,etot) 
749         if (refstr) then 
750           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
751           write (istat,'(i5,18(f12.3))') iconf,&
752          (energy_(print_order(i)),i=1,nprint_ene),&
753          etot,rms,frac,frac_nn,co
754 !jlee end
755         else
756           write (istat,'(i5,14(f12.3))') iconf,&
757          (energy_(print_order(i)),i=1,nprint_ene),etot
758         endif
759       enddo
760    11 continue
761 #endif
762       return
763       end subroutine exec_mult_eeval_or_minim
764 !-----------------------------------------------------------------------------
765       subroutine exec_checkgrad
766 !      use MPI_data             !include 'COMMON.SETUP'
767       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
768       use io_units      !include 'COMMON.IOUNITS'
769 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
770       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
771 !      use REMD         !include 'COMMON.REMD'
772       use MD_data               !include 'COMMON.MD'
773       use io_base, only:intout
774       use io_config, only:read_fragments
775       use geometry
776       use energy
777       use comm_srutu
778       implicit none
779 !      implicit real*8 (a-h,o-z)
780 !      include 'DIMENSIONS'
781 #ifdef MPI
782       include 'mpif.h'
783 #endif
784 !el      integer :: icall
785 !el      common /srutu/ icall
786       real(kind=8) :: energy_(0:max_ene)
787       real(kind=8) :: etot
788       integer :: i
789 !      do i=2,nres
790 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
791 !        if (itype(i).ne.10) 
792 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
793 !      enddo
794       if (indpdb.eq.0) then 
795         write(iout,*) 'Warning: Calling chainbuild'
796         call chainbuild
797       endif
798 !      do i=0,nres
799 !        do j=1,3
800 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
801 !        enddo
802 !      enddo
803 !      do i=1,nres-1
804 !        if (itype(i).ne.10) then
805 !          do j=1,3
806 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
807 !          enddo
808 !        endif
809 !      enddo
810 !      do j=1,3
811 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
812 !      enddo
813       usampl=.true.
814       totT=1.d0
815       eq_time=0.0d0
816       call read_fragments
817       call chainbuild_cart
818       call cartprint
819       call intout
820       icall=1
821       write (iout,*) "before etotal"
822       call etotal(energy_(0))
823       etot = energy_(0)
824       call enerprint(energy_(0))
825       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
826       print *,'icheckgrad=',icheckgrad
827       goto (10,20,30) icheckgrad
828   10  call check_ecartint
829       call check_ecartint
830       return
831   20  call check_cartgrad
832       call check_cartgrad
833       return
834   30  call check_eint
835       return
836       end subroutine exec_checkgrad
837 !-----------------------------------------------------------------------------
838       subroutine exec_map
839 !      use map_data
840       use map_
841       use io_config, only:map_read
842       implicit none
843 ! Energy maps
844       call alloc_map_arrays
845       call map_read
846       call map
847       return
848       end subroutine exec_map
849 !-----------------------------------------------------------------------------
850       subroutine exec_CSA
851
852       use io_units      !include 'COMMON.IOUNITS'
853       use CSA
854       implicit none
855 #ifdef MPI
856       include "mpif.h"
857 #endif
858 !      include 'DIMENSIONS'
859 ! Conformational Space Annealling programmed by Jooyoung Lee.
860 ! This method works only with parallel machines!
861 #ifdef MPI
862       call alloc_CSA_arrays
863       call together
864 #else
865       write (iout,*) "CSA works on parallel machines only"
866 #endif
867       return
868       end subroutine exec_CSA
869 !-----------------------------------------------------------------------------
870       subroutine exec_softreg
871       use io_units      !include 'COMMON.IOUNITS'
872       use control_data  !include 'COMMON.CONTROL'
873       use energy_data
874       use io_base, only:intout,briefout
875       use geometry, only:chainbuild
876       use energy
877       use compare
878       implicit none
879 !      include 'DIMENSIONS'
880       real(kind=8) :: energy_(0:n_ene)
881 !el local variables
882       real(kind=8) :: rms,frac,frac_nn,co,etot
883       logical :: debug
884
885       call alloc_compare_arrays
886       write(iout,*) 'Warning: Calling chainbuild'
887       call chainbuild
888       call etotal(energy_)
889       call enerprint(energy_)
890       if (.not.lsecondary) then
891         write(iout,*) 'Calling secondary structure recognition'
892         call secondary2(debug)
893       else
894         write(iout,*) 'Using secondary structure supplied in pdb'
895       endif
896
897       call softreg
898
899       call etotal(energy_)
900       etot=energy_(0)
901       call enerprint(energy_)
902       call intout
903       call briefout(0,etot)
904       call secondary2(.true.)
905       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
906       return
907       end subroutine exec_softreg
908 !-----------------------------------------------------------------------------
909 ! minimize_p.F
910 !-----------------------------------------------------------------------------
911 !el#ifdef MPI
912       subroutine ergastulum
913
914 !      implicit real*8 (a-h,o-z)
915 !      include 'DIMENSIONS'
916       use MD_data
917       use energy
918       use MDyn, only:setup_fricmat
919       use REMD, only:fricmat_mult,ginv_mult
920 #ifdef MPI
921       include "mpif.h"
922 #endif
923 !      include 'COMMON.SETUP'
924 !      include 'COMMON.DERIV'
925 !      include 'COMMON.VAR'
926 !      include 'COMMON.IOUNITS'
927 !      include 'COMMON.FFIELD'
928 !      include 'COMMON.INTERACT'
929 !      include 'COMMON.MD'
930 !      include 'COMMON.TIME1'
931       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
932       real(kind=8) :: edum(0:n_ene),time_order(0:10)
933 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
934 !el      common /przechowalnia/ Gcopy
935       integer :: icall = 0
936
937 !el  local variables
938       real(kind=8) :: time00
939       integer :: iorder,i,j,nres2,ierr,ierror
940       nres2=2*nres
941 #ifndef FIVEDIAG
942       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
943 ! common.MD
944       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
945 #endif
946 !      common /mdpmpi/
947       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
948       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
949       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
950       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
951 #ifndef FIVEDIAG
952       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
953 #endif
954 ! Workers wait for variables and NF, and NFL from the boss
955       iorder=0
956       do while (iorder.ge.0)
957 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
958 !     & ' receives order from Master'
959         time00=MPI_Wtime()
960         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
961         time_Bcast=time_Bcast+MPI_Wtime()-time00
962         if (icall.gt.4 .and. iorder.ge.0) &
963          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
964        icall=icall+1
965 !      write (*,*)
966 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
967         if (iorder.eq.0) then
968           call zerograd
969           call etotal(edum)
970 !          write (2,*) "After etotal"
971 !          write (2,*) "dimen",dimen," dimen3",dimen3
972 !          call flush(2)
973         else if (iorder.eq.2) then
974           call zerograd
975           call etotal_short(edum)
976 !          write (2,*) "After etotal_short"
977 !          write (2,*) "dimen",dimen," dimen3",dimen3
978 !          call flush(2)
979         else if (iorder.eq.3) then
980           call zerograd
981           call etotal_long(edum)
982 !          write (2,*) "After etotal_long"
983 !          write (2,*) "dimen",dimen," dimen3",dimen3
984 !          call flush(2)
985         else if (iorder.eq.1) then
986           call sum_gradient
987 !          write (2,*) "After sum_gradient"
988 !          write (2,*) "dimen",dimen," dimen3",dimen3
989 !          call flush(2)
990         else if (iorder.eq.4) then
991           call ginv_mult(z,d_a_tmp)
992         else if (iorder.eq.5) then
993 ! Setup MD things for a slave
994           dimen=(nct-nnt+1)+nside
995           dimen1=(nct-nnt)+(nct-nnt+1)
996           dimen3=dimen*3
997 !          write (2,*) "dimen",dimen," dimen3",dimen3
998 !          call flush(2)
999           call int_bounds(dimen,igmult_start,igmult_end)
1000           igmult_start=igmult_start-1
1001           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1002            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1003            my_ng_count=igmult_end-igmult_start
1004           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1005            MPI_INTEGER,FG_COMM,IERROR)
1006           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1007 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1008           myginv_ng_count=nres2*my_ng_count !el maxres2
1009 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1010 !     &     igmult_end," my_ng_count",my_ng_count
1011 !          call flush(2)
1012           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1013            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1014           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1015            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1016 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1017 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1018 !          call flush(2)
1019 !          call MPI_Barrier(FG_COMM,IERROR)
1020           time00=MPI_Wtime()
1021           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1022            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1023            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1024 #ifdef TIMING
1025           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1026 #endif
1027           do i=1,dimen
1028             do j=1,2*my_ng_count
1029               ginv(j,i)=gcopy(i,j)
1030             enddo
1031           enddo
1032 !          write (2,*) "dimen",dimen," dimen3",dimen3
1033 !          write (2,*) "End MD setup"
1034 !          call flush(2)
1035 !           write (iout,*) "My chunk of ginv_block"
1036 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1037         else if (iorder.eq.6) then
1038           call int_from_cart1(.false.)
1039         else if (iorder.eq.7) then
1040           call chainbuild_cart
1041         else if (iorder.eq.8) then
1042           call intcartderiv
1043         else if (iorder.eq.9) then
1044           call fricmat_mult(z,d_a_tmp)
1045         else if (iorder.eq.10) then
1046           call setup_fricmat
1047         endif
1048       enddo
1049       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1050         ' absolute rank',myrank,' leves ERGASTULUM.'
1051       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1052        (' order[',i,']',time_order(i),i=0,10)
1053       return
1054       end subroutine ergastulum