working gradient for WSCP (no -respa only)
[unres4.git] / source / unres / unres.f90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80 !
81       if (me.eq.king .or. .not. out1file) then
82        write (iout,'(2a/)') &
83        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
84        ' calculation.' 
85        if (minim) write (iout,'(a)') &
86        'Conformations will be energy-minimized.'
87        write (iout,'(80(1h*)/)') 
88       endif
89       call flush(iout)
90 !
91       if (modecalc.eq.-2) then
92         call test
93         stop
94       else if (modecalc.eq.-1) then
95         write(iout,*) "call check_sc_map next"
96         call check_bond
97         stop
98       endif
99 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
100
101 #ifdef MPI
102       if (fg_rank.gt.0) then
103 ! Fine-grain slaves just do energy and gradient components.
104         call ergastulum ! slave workhouse in Latin
105       else
106 #endif
107       if (modecalc.eq.0) then
108         call exec_eeval_or_minim
109       else if (modecalc.eq.1) then
110         call exec_regularize
111       else if (modecalc.eq.2) then
112         call exec_thread
113       else if (modecalc.eq.3 .or. modecalc .eq.6) then
114         call exec_MC
115       else if (modecalc.eq.4) then
116         call exec_mult_eeval_or_minim
117       else if (modecalc.eq.5) then
118         call exec_checkgrad
119       else if (ModeCalc.eq.7) then
120         call exec_map
121       else if (ModeCalc.eq.8) then
122         call exec_CSA
123       else if (modecalc.eq.11) then
124         call exec_softreg
125       else if (modecalc.eq.12) then
126         call exec_MD
127       else if (modecalc.eq.14) then
128         call exec_MREMD
129       else
130         write (iout,'(a)') 'This calculation type is not supported',&
131          ModeCalc
132       endif
133
134 #ifdef MPI
135       endif
136 ! Finish task.
137       if (fg_rank.eq.0) call finish_task
138 !      call memmon_print_usage()
139 #ifdef TIMING
140        call print_detailed_timing
141 #endif
142       call MPI_Finalize(ierr)
143       stop 'Bye Bye...'
144 #else
145       call dajczas(tcpu(),hrtime,mintime,sectime)
146       stop '********** Program terminated normally.'
147 #endif
148
149       end program !UNRES
150 !-----------------------------------------------------------------------------
151 !
152 !-----------------------------------------------------------------------------
153       subroutine exec_MD
154       use MPI_data      !include 'COMMON.SETUP'
155       use control_data  !include 'COMMON.CONTROL'
156       use geometry, only:chainbuild
157       use MDyn
158       use io_units      !include 'COMMON.IOUNITS'
159 !      use io_common
160       implicit none
161 !      include 'DIMENSIONS'
162 #ifdef MPI
163       include "mpif.h"
164 #endif
165       call alloc_MD_arrays
166       if (me.eq.king .or. .not. out1file) &
167          write (iout,*) "Calling chainbuild"
168       call chainbuild
169       call MD
170       return
171       end subroutine exec_MD
172 !---------------------------------------------------------------------------
173       subroutine exec_MREMD
174       use MPI_data      !include 'COMMON.SETUP'
175       use control_data  !include 'COMMON.CONTROL'
176       use io_units      !include 'COMMON.IOUNITS'
177 !      use io_common
178       use REMD_data     !include 'COMMON.REMD'
179       use geometry, only:chainbuild
180       use MREMDyn
181
182       implicit none
183 !      include 'DIMENSIONS'
184 #ifdef MPI
185       include "mpif.h"
186 #endif
187
188       integer :: i
189       call alloc_MD_arrays
190       call alloc_MREMD_arrays
191
192       if (me.eq.king .or. .not. out1file) &
193          write (iout,*) "Calling chainbuild"
194       call chainbuild
195       if (me.eq.king .or. .not. out1file) &
196          write (iout,*) "Calling REMD"
197       if (remd_mlist) then 
198         call MREMD
199       else
200         do i=1,nrep
201           remd_m(i)=1
202         enddo
203         call MREMD
204       endif
205       return
206       end subroutine exec_MREMD
207 !-----------------------------------------------------------------------------
208       subroutine exec_eeval_or_minim
209       use MPI_data              !include 'COMMON.SETUP'
210       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
211       use io_units      !include 'COMMON.IOUNITS'
212       use names
213 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
214       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
215 !      use REMD         !include 'COMMON.REMD'
216 !      use MD           !include 'COMMON.MD'
217
218       use energy_data
219
220       use io_base
221       use geometry, only:chainbuild
222       use energy
223       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
224       use minimm, only:minimize,minim_dc,sc_move
225       use compare_data !el
226       use comm_srutu
227       implicit none
228 !      implicit real*8 (a-h,o-z)
229 !      include 'DIMENSIONS'
230 #ifdef MPI
231       include 'mpif.h'
232 #endif
233       integer :: i
234 !el      common /srutu/ icall
235       real(kind=8) :: energy_(0:n_ene)
236       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
237       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
238       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
239       integer :: nharp,nft_sc,iretcode,nfun
240       integer,dimension(4,nres/3) :: iharp      !(4,nres/3)(4,maxres/3)
241       logical :: fail
242       real(kind=8) :: rms,frac,frac_nn,co
243
244       integer :: j,k
245       call alloc_compare_arrays
246       if (indpdb.eq.0) then 
247       call chainbuild
248       write(iout,*) 'Warning: Calling chainbuild'
249       endif
250 #ifdef MPI
251       time00=MPI_Wtime()
252 #endif
253       call chainbuild_cart
254 !      write(iout,*)"in exec_eeval or minimim",split_ene
255 !      do j=1,2*nres+2
256 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
257 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
258 !      enddo
259       if (split_ene) then
260 !      write(iout,*)"in exec_eeval or minimim"
261
262        print *,"Processor",myrank," after chainbuild"
263        icall=1
264
265        call etotal_long(energy_long)
266        write (iout,*) "Printing long range energy"
267        call enerprint(energy_long)
268
269        call etotal_short(energy_short)
270        write (iout,*) "Printing short range energy"
271        call enerprint(energy_short)
272        do i=0,n_ene
273          energy_(i)=energy_long(i)+energy_short(i)
274          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
275        enddo
276        write (iout,*) "Printing long+short range energy"
277        call enerprint(energy_)
278       endif
279
280       call etotal(energy_)
281 #ifdef MPI
282       time_ene=MPI_Wtime()-time00
283 #endif
284       write (iout,*) "Time for energy evaluation",time_ene
285       print *,"after etotal"
286       etota = energy_(0)
287       etot = etota
288       call enerprint(energy_)
289       call hairpin(.true.,nharp,iharp)
290       call secondary2(.true.)
291       if (minim) then
292 !rc overlap test
293         if (overlapsc) then 
294           print *, 'Calling OVERLAP_SC'
295           call overlap_sc(fail)
296         endif 
297
298         if (searchsc) then 
299           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
300           print *,'SC_move',nft_sc,etot
301           write(iout,*) 'SC_move',nft_sc,etot
302         endif 
303
304         if (dccart) then
305           print *, 'Calling MINIM_DC'
306 #ifdef MPI
307           time1=MPI_WTIME()
308 #endif
309 !    call check_ecartint !el
310           call minim_dc(etot,iretcode,nfun)
311 !    call check_ecartint !el
312         else 
313           if (indpdb.ne.0) then 
314             call bond_regular
315             write(iout,*) 'Warning: Calling chainbuild'
316             call chainbuild
317           endif
318           call geom_to_var(nvar,varia)
319           print *,'Calling MINIMIZE.'
320 #ifdef MPI
321           time1=MPI_WTIME()
322 #endif
323 !    call check_eint
324 !        call exec_checkgrad   !el
325           call minimize(etot,varia,iretcode,nfun)
326 !     call check_eint
327 !        call exec_checkgrad   !el
328         endif
329         print *,'SUMSL return code is',iretcode,' eval ',nfun
330 #ifdef MPI
331         evals=nfun/(MPI_WTIME()-time1)
332 #endif
333         print *,'# eval/s',evals
334         print *,'refstr=',refstr
335         call hairpin(.true.,nharp,iharp)
336         call secondary2(.true.)
337         call etotal(energy_)
338         etot = energy_(0)
339         call enerprint(energy_)
340
341         call intout
342         call briefout(0,etot)
343         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
344           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
345           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
346           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
347       else
348         print *,'refstr=',refstr
349         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
350         call briefout(0,etot)
351       endif
352       if (outpdb) call pdbout(etot,titel(:32),ipdb)
353       if (outmol2) call mol2out(etot,titel(:32))
354 !elwrite(iout,*) "after exec_eeval_or_minim"
355       return
356       end subroutine exec_eeval_or_minim
357 !-----------------------------------------------------------------------------
358       subroutine exec_regularize
359 !      use MPI_data             !include 'COMMON.SETUP'
360       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
361       use io_units      !include 'COMMON.IOUNITS'
362       use names
363       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
364       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
365 !     use REMD          !include 'COMMON.REMD'
366 !     use MD            !include 'COMMON.MD'
367       use regularize_
368       use compare
369       implicit none
370 !      implicit real*8 (a-h,o-z)
371 !      include 'DIMENSIONS'
372 #ifdef MPI
373       include 'mpif.h'
374 #endif
375       real(kind=8) :: energy_(0:n_ene)
376       real(kind=8) :: etot
377       real(kind=8) :: rms,frac,frac_nn,co
378       integer :: iretcode
379
380       call alloc_compare_arrays
381       call gen_dist_constr
382       call sc_conf
383       call intout
384       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
385       call etotal(energy_)
386       energy_(0)=energy_(0)-energy_(14)
387       etot=energy_(0)
388       call enerprint(energy_)
389       call intout
390       call briefout(0,etot)
391       if (outpdb) call pdbout(etot,titel(:32),ipdb)
392       if (outmol2) call mol2out(etot,titel(:32))
393       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
394       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
395       return
396       end subroutine exec_regularize
397 !-----------------------------------------------------------------------------
398       subroutine exec_thread
399 !      use MPI_data             !include 'COMMON.SETUP'
400       use compare
401       implicit none
402 !      include 'DIMENSIONS'
403 #ifdef MP
404       include "mpif.h"
405 #endif
406       call alloc_compare_arrays
407       call thread_seq
408       return
409       end subroutine exec_thread
410 !-----------------------------------------------------------------------------
411       subroutine exec_MC
412 !      use MPI_data             !include 'COMMON.SETUP'
413       use control_data  !include 'COMMON.CONTROL'
414       use geometry_data
415       use energy_data
416       use mcm_md
417       implicit none
418 !      implicit real*8 (a-h,o-z)
419 !      include 'DIMENSIONS'
420       character(len=10) :: nodeinfo
421       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
422       integer :: ipar
423 #ifdef MPI
424       include "mpif.h"
425 #endif
426       call alloc_MCM_arrays
427       call mcm_setup
428       if (minim) then
429 #ifdef MPI
430         if (modecalc.eq.3) then
431           call do_mcm(ipar)
432         else
433           call entmcm
434         endif
435 #else
436         if (modecalc.eq.3) then
437           call do_mcm(ipar)
438         else
439           call entmcm
440         endif
441 #endif
442       else
443         call monte_carlo
444       endif
445       return
446       end subroutine exec_MC
447 !-----------------------------------------------------------------------------
448       subroutine exec_mult_eeval_or_minim
449       use MPI_data              !include 'COMMON.SETUP'
450       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
451       use io_units      !include 'COMMON.IOUNITS'
452       use names
453       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
454       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
455 !      use REMD         !include 'COMMON.REMD'
456 !      use MD           !include 'COMMON.MD'
457       use io_base
458       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
459       use energy, only:etotal,enerprint
460       use compare, only:rms_nac_nnc
461       use minimm, only:minimize!,minim_mcmf
462 !      implicit real*8 (a-h,o-z)
463 !      include 'DIMENSIONS'
464 #ifdef MPI
465       use minimm, only:minim_mcmf
466       implicit none
467       include 'mpif.h'
468       integer :: ierror,ierr
469       real(kind=8) :: man
470       real(kind=8),dimension(mpi_status_size) :: muster
471 #else
472       implicit none
473 #endif
474       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
475       integer,dimension(6) :: ind
476       real(kind=8) :: energy_(0:n_ene)
477       logical :: eof
478       real(kind=8) :: etot,ene0
479       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
480          nf_mcmf,j
481       real(kind=8) :: rms,frac,frac_nn,co,time,ene
482
483       eof=.false.
484 #ifdef MPI
485       if(me.ne.king) then
486         call minim_mcmf
487         return
488       endif
489
490       close (intin)
491       open(intin,file=intinname,status='old')
492       write (istat,'(a5,20a12)')"#    ",&
493         (wname(print_order(i)),i=1,nprint_ene)
494       if (refstr) then
495         write (istat,'(a5,20a12)')"#    ",&
496          (ename(print_order(i)),i=1,nprint_ene),&
497          "ETOT total","RMSD","nat.contact","nnt.contact"        
498       else
499         write (istat,'(a5,20a12)')"#    ",&
500           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
501       endif
502
503       if (.not.minim) then
504         do while (.not. eof)
505           if (read_cart) then
506             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
507             call read_x(intin,*11)
508 #ifdef MPI
509 ! Broadcast the order to compute internal coordinates to the slaves.
510             if (nfgtasks.gt.1) &
511               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
512 #endif
513             call int_from_cart1(.false.)
514           else
515             read (intin,'(i5)',end=1100,err=1100) iconf
516             call read_angles(intin,*11)
517             call geom_to_var(nvar,varia)
518             write(iout,*) 'Warning: Calling chainbuild'
519             call chainbuild
520           endif
521           write (iout,'(a,i7)') 'Conformation #',iconf
522           call etotal(energy_)
523           call briefout(iconf,energy_(0))
524           call enerprint(energy_)
525           etot=energy_(0)
526           if (refstr) then 
527             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
528             write (istat,'(i5,20(f12.3))') iconf,&
529             (energy_(print_order(i)),i=1,nprint_ene),etot,&
530              rms,frac,frac_nn,co
531 !jlee end
532           else
533             write (istat,'(i5,16(f12.3))') iconf,&
534            (energy_(print_order(i)),i=1,nprint_ene),etot
535           endif
536         enddo
537 1100    continue
538         goto 1101
539       endif
540
541       mm=0
542       imm=0
543       nft=0
544       ene0=0.0d0
545       n=0
546       iconf=0
547 !      do n=1,nzsc
548       do while (.not. eof)
549         mm=mm+1
550         if (mm.lt.nodes) then
551           if (read_cart) then
552             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
553             call read_x(intin,*11)
554 #ifdef MPI
555 ! Broadcast the order to compute internal coordinates to the slaves.
556             if (nfgtasks.gt.1) &
557               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
558 #endif
559             call int_from_cart1(.false.)
560           else
561             read (intin,'(i5)',end=11,err=11) iconf
562             call read_angles(intin,*11)
563             call geom_to_var(nvar,varia)
564             write(iout,*) 'Warning: Calling chainbuild'
565             call chainbuild
566           endif
567           write (iout,'(a,i7)') 'Conformation #',iconf
568           n=n+1
569          imm=imm+1
570          ind(1)=1
571          ind(2)=n
572          ind(3)=0
573          ind(4)=0
574          ind(5)=0
575          ind(6)=0
576          ene0=0.0d0
577          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
578                         ierr)
579          call mpi_send(varia,nvar,mpi_double_precision,mm,&
580                         idreal,CG_COMM,ierr)
581          call mpi_send(ene0,1,mpi_double_precision,mm,&
582                         idreal,CG_COMM,ierr)
583 !         print *,'task ',n,' sent to worker ',mm,nvar
584         else
585          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
586                        CG_COMM,muster,ierr)
587          man=muster(mpi_source)
588 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
589          call mpi_recv(varia,nvar,mpi_double_precision,&
590                      man,idreal,CG_COMM,muster,ierr)
591          call mpi_recv(ene,1,&
592                      mpi_double_precision,man,idreal,&
593                      CG_COMM,muster,ierr)
594          call mpi_recv(ene0,1,&
595                      mpi_double_precision,man,idreal,&
596                      CG_COMM,muster,ierr)
597 !         print *,'result received from worker ',man,' sending now'
598
599           call var_to_geom(nvar,varia)
600           write(iout,*) 'Warning: Calling chainbuild'
601           call chainbuild
602           call etotal(energy_)
603           iconf=ind(2)
604           write (iout,*)
605           write (iout,*)
606           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
607
608           etot=energy_(0)
609           call enerprint(energy_)
610           call briefout(it,etot)
611 !          if (minim) call briefout(it,etot)
612           if (refstr) then 
613             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
614             write (istat,'(i5,19(f12.3))') iconf,&
615            (energy_(print_order(i)),i=1,nprint_ene),etot,&
616            rms,frac,frac_nn,co
617           else
618             write (istat,'(i5,15(f12.3))') iconf,&
619            (energy_(print_order(i)),i=1,nprint_ene),etot
620           endif
621
622           imm=imm-1
623           if (read_cart) then
624             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
625             call read_x(intin,*11)
626 #ifdef MPI
627 ! Broadcast the order to compute internal coordinates to the slaves.
628             if (nfgtasks.gt.1) &
629               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
630 #endif
631             call int_from_cart1(.false.)
632           else
633             read (intin,'(i5)',end=1101,err=1101) iconf
634             call read_angles(intin,*11)
635             call geom_to_var(nvar,varia)
636             write(iout,*) 'Warning: Calling chainbuild'
637             call chainbuild
638           endif
639           n=n+1
640           imm=imm+1
641           ind(1)=1
642           ind(2)=n
643           ind(3)=0
644           ind(4)=0
645           ind(5)=0
646           ind(6)=0
647           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
648                         ierr)
649           call mpi_send(varia,nvar,mpi_double_precision,man,&
650                         idreal,CG_COMM,ierr)
651           call mpi_send(ene0,1,mpi_double_precision,man,&
652                         idreal,CG_COMM,ierr)
653           nf_mcmf=nf_mcmf+ind(4)
654           nmin=nmin+1
655         endif
656       enddo
657 11    continue
658       do j=1,imm
659         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
660                      CG_COMM,muster,ierr)
661         man=muster(mpi_source)
662         call mpi_recv(varia,nvar,mpi_double_precision,&
663                      man,idreal,CG_COMM,muster,ierr)
664         call mpi_recv(ene,1,&
665                      mpi_double_precision,man,idreal,&
666                      CG_COMM,muster,ierr)
667         call mpi_recv(ene0,1,&
668                      mpi_double_precision,man,idreal,&
669                      CG_COMM,muster,ierr)
670
671         call var_to_geom(nvar,varia)
672         write(iout,*) 'Warning: Calling chainbuild'
673         call chainbuild
674         call etotal(energy_)
675         iconf=ind(2)
676         write (iout,*)
677         write (iout,*)
678         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
679
680         etot=energy_(0)
681         call enerprint(energy_)
682         call briefout(it,etot)
683         if (refstr) then 
684           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
685           write (istat,'(i5,19(f12.3))') iconf,&
686          (energy_(print_order(i)),i=1,nprint_ene),etot,&
687          rms,frac,frac_nn,co
688         else
689           write (istat,'(i5,15(f12.3))') iconf,&
690           (energy_(print_order(i)),i=1,nprint_ene),etot
691         endif
692         nmin=nmin+1
693       enddo
694 1101  continue
695       do i=1, nodes-1
696          ind(1)=0
697          ind(2)=0
698          ind(3)=0
699          ind(4)=0
700          ind(5)=0
701          ind(6)=0
702          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
703                         ierr)
704       enddo
705 #else
706       close (intin)
707       open(intin,file=intinname,status='old')
708       write (istat,'(a5,20a12)')"#    ",&
709          (wname(print_order(i)),i=1,nprint_ene)
710       write (istat,'("#    ",20(1pe12.4))') &
711          (weights(print_order(i)),i=1,nprint_ene)
712       if (refstr) then
713         write (istat,'(a5,20a12)')"#    ",&
714          (ename(print_order(i)),i=1,nprint_ene),&
715          "ETOT total","RMSD","nat.contact","nnt.contact"
716       else
717         write (istat,'(a5,14a12)')"#    ",&
718          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
719       endif
720       do while (.not. eof)
721           if (read_cart) then
722             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
723             call read_x(intin,*11)
724 #ifdef MPI
725 ! Broadcast the order to compute internal coordinates to the slaves.
726             if (nfgtasks.gt.1) &
727               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
728 #endif
729             call int_from_cart1(.false.)
730           else
731             read (intin,'(i5)',end=11,err=11) iconf
732             call read_angles(intin,*11)
733             call geom_to_var(nvar,varia)
734             write(iout,*) 'Warning: Calling chainbuild'
735             call chainbuild
736           endif
737         write (iout,'(a,i7)') 'Conformation #',iconf
738         if (minim) call minimize(etot,varia,iretcode,nfun)
739         call etotal(energy_)
740
741         etot=energy_(0)
742         call enerprint(energy_)
743         if (minim) call briefout(it,etot) 
744         if (refstr) then 
745           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
746           write (istat,'(i5,18(f12.3))') iconf,&
747          (energy_(print_order(i)),i=1,nprint_ene),&
748          etot,rms,frac,frac_nn,co
749 !jlee end
750         else
751           write (istat,'(i5,14(f12.3))') iconf,&
752          (energy_(print_order(i)),i=1,nprint_ene),etot
753         endif
754       enddo
755    11 continue
756 #endif
757       return
758       end subroutine exec_mult_eeval_or_minim
759 !-----------------------------------------------------------------------------
760       subroutine exec_checkgrad
761 !      use MPI_data             !include 'COMMON.SETUP'
762       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
763       use io_units      !include 'COMMON.IOUNITS'
764 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
765       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
766 !      use REMD         !include 'COMMON.REMD'
767       use MD_data               !include 'COMMON.MD'
768       use io_base, only:intout
769       use io_config, only:read_fragments
770       use geometry
771       use energy
772       use comm_srutu
773       implicit none
774 !      implicit real*8 (a-h,o-z)
775 !      include 'DIMENSIONS'
776 #ifdef MPI
777       include 'mpif.h'
778 #endif
779 !el      integer :: icall
780 !el      common /srutu/ icall
781       real(kind=8) :: energy_(0:max_ene)
782       real(kind=8) :: etot
783       integer :: i
784 !      do i=2,nres
785 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
786 !        if (itype(i).ne.10) 
787 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
788 !      enddo
789       if (indpdb.eq.0) then 
790         write(iout,*) 'Warning: Calling chainbuild'
791         call chainbuild
792       endif
793 !      do i=0,nres
794 !        do j=1,3
795 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
796 !        enddo
797 !      enddo
798 !      do i=1,nres-1
799 !        if (itype(i).ne.10) then
800 !          do j=1,3
801 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
802 !          enddo
803 !        endif
804 !      enddo
805 !      do j=1,3
806 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
807 !      enddo
808       usampl=.true.
809       totT=1.d0
810       eq_time=0.0d0
811       call read_fragments
812       call chainbuild_cart
813       call cartprint
814       call intout
815       icall=1
816       call etotal(energy_(0))
817       etot = energy_(0)
818       call enerprint(energy_(0))
819       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
820       print *,'icheckgrad=',icheckgrad
821       goto (10,20,30) icheckgrad
822   10  call check_ecartint
823       return
824   20  call check_cartgrad
825       return
826   30  call check_eint
827       return
828       end subroutine exec_checkgrad
829 !-----------------------------------------------------------------------------
830       subroutine exec_map
831 !      use map_data
832       use map_
833       use io_config, only:map_read
834       implicit none
835 ! Energy maps
836       call alloc_map_arrays
837       call map_read
838       call map
839       return
840       end subroutine exec_map
841 !-----------------------------------------------------------------------------
842       subroutine exec_CSA
843
844       use io_units      !include 'COMMON.IOUNITS'
845       use CSA
846       implicit none
847 #ifdef MPI
848       include "mpif.h"
849 #endif
850 !      include 'DIMENSIONS'
851 ! Conformational Space Annealling programmed by Jooyoung Lee.
852 ! This method works only with parallel machines!
853 #ifdef MPI
854       call alloc_CSA_arrays
855       call together
856 #else
857       write (iout,*) "CSA works on parallel machines only"
858 #endif
859       return
860       end subroutine exec_CSA
861 !-----------------------------------------------------------------------------
862       subroutine exec_softreg
863       use io_units      !include 'COMMON.IOUNITS'
864       use control_data  !include 'COMMON.CONTROL'
865       use energy_data
866       use io_base, only:intout,briefout
867       use geometry, only:chainbuild
868       use energy
869       use compare
870       implicit none
871 !      include 'DIMENSIONS'
872       real(kind=8) :: energy_(0:n_ene)
873 !el local variables
874       real(kind=8) :: rms,frac,frac_nn,co,etot
875       logical :: debug
876
877       call alloc_compare_arrays
878       write(iout,*) 'Warning: Calling chainbuild'
879       call chainbuild
880       call etotal(energy_)
881       call enerprint(energy_)
882       if (.not.lsecondary) then
883         write(iout,*) 'Calling secondary structure recognition'
884         call secondary2(debug)
885       else
886         write(iout,*) 'Using secondary structure supplied in pdb'
887       endif
888
889       call softreg
890
891       call etotal(energy_)
892       etot=energy_(0)
893       call enerprint(energy_)
894       call intout
895       call briefout(0,etot)
896       call secondary2(.true.)
897       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
898       return
899       end subroutine exec_softreg
900 !-----------------------------------------------------------------------------
901 ! minimize_p.F
902 !-----------------------------------------------------------------------------
903 !el#ifdef MPI
904       subroutine ergastulum
905
906 !      implicit real*8 (a-h,o-z)
907 !      include 'DIMENSIONS'
908       use MD_data
909       use energy
910       use MDyn, only:setup_fricmat
911       use REMD, only:fricmat_mult,ginv_mult
912 #ifdef MPI
913       include "mpif.h"
914 #endif
915 !      include 'COMMON.SETUP'
916 !      include 'COMMON.DERIV'
917 !      include 'COMMON.VAR'
918 !      include 'COMMON.IOUNITS'
919 !      include 'COMMON.FFIELD'
920 !      include 'COMMON.INTERACT'
921 !      include 'COMMON.MD'
922 !      include 'COMMON.TIME1'
923       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
924       real(kind=8) :: edum(0:n_ene),time_order(0:10)
925 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
926 !el      common /przechowalnia/ Gcopy
927       integer :: icall = 0
928
929 !el  local variables
930       real(kind=8) :: time00
931       integer :: iorder,i,j,nres2,ierr,ierror
932       nres2=2*nres
933 #ifndef FIVEDIAG
934       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
935 ! common.MD
936       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
937 #endif
938 !      common /mdpmpi/
939       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
940       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
941       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
942       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
943 #ifndef FIVEDIAG
944       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
945 #endif
946 ! Workers wait for variables and NF, and NFL from the boss
947       iorder=0
948       do while (iorder.ge.0)
949 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
950 !     & ' receives order from Master'
951         time00=MPI_Wtime()
952         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
953         time_Bcast=time_Bcast+MPI_Wtime()-time00
954         if (icall.gt.4 .and. iorder.ge.0) &
955          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
956        icall=icall+1
957 !      write (*,*)
958 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
959         if (iorder.eq.0) then
960           call zerograd
961           call etotal(edum)
962 !          write (2,*) "After etotal"
963 !          write (2,*) "dimen",dimen," dimen3",dimen3
964 !          call flush(2)
965         else if (iorder.eq.2) then
966           call zerograd
967           call etotal_short(edum)
968 !          write (2,*) "After etotal_short"
969 !          write (2,*) "dimen",dimen," dimen3",dimen3
970 !          call flush(2)
971         else if (iorder.eq.3) then
972           call zerograd
973           call etotal_long(edum)
974 !          write (2,*) "After etotal_long"
975 !          write (2,*) "dimen",dimen," dimen3",dimen3
976 !          call flush(2)
977         else if (iorder.eq.1) then
978           call sum_gradient
979 !          write (2,*) "After sum_gradient"
980 !          write (2,*) "dimen",dimen," dimen3",dimen3
981 !          call flush(2)
982         else if (iorder.eq.4) then
983           call ginv_mult(z,d_a_tmp)
984         else if (iorder.eq.5) then
985 ! Setup MD things for a slave
986           dimen=(nct-nnt+1)+nside
987           dimen1=(nct-nnt)+(nct-nnt+1)
988           dimen3=dimen*3
989 !          write (2,*) "dimen",dimen," dimen3",dimen3
990 !          call flush(2)
991           call int_bounds(dimen,igmult_start,igmult_end)
992           igmult_start=igmult_start-1
993           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
994            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
995            my_ng_count=igmult_end-igmult_start
996           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
997            MPI_INTEGER,FG_COMM,IERROR)
998           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
999 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1000           myginv_ng_count=nres2*my_ng_count !el maxres2
1001 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1002 !     &     igmult_end," my_ng_count",my_ng_count
1003 !          call flush(2)
1004           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1005            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1006           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1007            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1008 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1009 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1010 !          call flush(2)
1011 !          call MPI_Barrier(FG_COMM,IERROR)
1012           time00=MPI_Wtime()
1013           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1014            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1015            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1016 #ifdef TIMING
1017           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1018 #endif
1019           do i=1,dimen
1020             do j=1,2*my_ng_count
1021               ginv(j,i)=gcopy(i,j)
1022             enddo
1023           enddo
1024 !          write (2,*) "dimen",dimen," dimen3",dimen3
1025 !          write (2,*) "End MD setup"
1026 !          call flush(2)
1027 !           write (iout,*) "My chunk of ginv_block"
1028 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1029         else if (iorder.eq.6) then
1030           call int_from_cart1(.false.)
1031         else if (iorder.eq.7) then
1032           call chainbuild_cart
1033         else if (iorder.eq.8) then
1034           call intcartderiv
1035         else if (iorder.eq.9) then
1036           call fricmat_mult(z,d_a_tmp)
1037         else if (iorder.eq.10) then
1038           call setup_fricmat
1039         endif
1040       enddo
1041       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1042         ' absolute rank',myrank,' leves ERGASTULUM.'
1043       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1044        (' order[',i,']',time_order(i),i=0,10)
1045       return
1046       end subroutine ergastulum