corrections of max... ranges of arrays
[unres4.git] / source / unres / unres.f90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80 !
81       if (me.eq.king .or. .not. out1file) then
82        write (iout,'(2a/)') &
83        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
84        ' calculation.' 
85        if (minim) write (iout,'(a)') &
86        'Conformations will be energy-minimized.'
87        write (iout,'(80(1h*)/)') 
88       endif
89       call flush(iout)
90 !
91       if (modecalc.eq.-2) then
92         call test
93         stop
94       else if (modecalc.eq.-1) then
95         write(iout,*) "call check_sc_map next"
96         call check_bond
97         stop
98       endif
99 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
100
101 #ifdef MPI
102       if (fg_rank.gt.0) then
103 ! Fine-grain slaves just do energy and gradient components.
104         call ergastulum ! slave workhouse in Latin
105       else
106 #endif
107       if (modecalc.eq.0) then
108         call exec_eeval_or_minim
109       else if (modecalc.eq.1) then
110         call exec_regularize
111       else if (modecalc.eq.2) then
112         call exec_thread
113       else if (modecalc.eq.3 .or. modecalc .eq.6) then
114         call exec_MC
115       else if (modecalc.eq.4) then
116         call exec_mult_eeval_or_minim
117       else if (modecalc.eq.5) then
118         call exec_checkgrad
119       else if (ModeCalc.eq.7) then
120         call exec_map
121       else if (ModeCalc.eq.8) then
122         call exec_CSA
123       else if (modecalc.eq.11) then
124         call exec_softreg
125       else if (modecalc.eq.12) then
126         call exec_MD
127       else if (modecalc.eq.14) then
128         call exec_MREMD
129       else
130         write (iout,'(a)') 'This calculation type is not supported',&
131          ModeCalc
132       endif
133
134 #ifdef MPI
135       endif
136 ! Finish task.
137       if (fg_rank.eq.0) call finish_task
138 !      call memmon_print_usage()
139 #ifdef TIMING
140        call print_detailed_timing
141 #endif
142       call MPI_Finalize(ierr)
143       stop 'Bye Bye...'
144 #else
145       call dajczas(tcpu(),hrtime,mintime,sectime)
146       stop '********** Program terminated normally.'
147 #endif
148
149       end program !UNRES
150 !-----------------------------------------------------------------------------
151 !
152 !-----------------------------------------------------------------------------
153       subroutine exec_MD
154       use MPI_data      !include 'COMMON.SETUP'
155       use control_data  !include 'COMMON.CONTROL'
156       use geometry, only:chainbuild
157       use MDyn
158       use io_units      !include 'COMMON.IOUNITS'
159 !      use io_common
160       implicit none
161 !      include 'DIMENSIONS'
162 #ifdef MPI
163       include "mpif.h"
164 #endif
165       call alloc_MD_arrays
166       if (me.eq.king .or. .not. out1file) &
167          write (iout,*) "Calling chainbuild"
168       call chainbuild
169       call MD
170       return
171       end subroutine exec_MD
172 !---------------------------------------------------------------------------
173       subroutine exec_MREMD
174       use MPI_data      !include 'COMMON.SETUP'
175       use control_data  !include 'COMMON.CONTROL'
176       use io_units      !include 'COMMON.IOUNITS'
177 !      use io_common
178       use REMD_data     !include 'COMMON.REMD'
179       use geometry, only:chainbuild
180       use MREMDyn
181
182       implicit none
183 !      include 'DIMENSIONS'
184 #ifdef MPI
185       include "mpif.h"
186 #endif
187
188       integer :: i
189       call alloc_MD_arrays
190       call alloc_MREMD_arrays
191
192       if (me.eq.king .or. .not. out1file) &
193          write (iout,*) "Calling chainbuild"
194       call chainbuild
195       if (me.eq.king .or. .not. out1file) &
196          write (iout,*) "Calling REMD"
197       if (remd_mlist) then 
198         call MREMD
199       else
200         do i=1,nrep
201           remd_m(i)=1
202         enddo
203         call MREMD
204       endif
205       return
206       end subroutine exec_MREMD
207 !-----------------------------------------------------------------------------
208       subroutine exec_eeval_or_minim
209       use MPI_data              !include 'COMMON.SETUP'
210       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
211       use io_units      !include 'COMMON.IOUNITS'
212       use names
213 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
214       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
215 !      use REMD         !include 'COMMON.REMD'
216 !      use MD           !include 'COMMON.MD'
217
218       use energy_data
219
220       use io_base
221       use geometry, only:chainbuild
222       use energy
223       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
224       use minimm, only:minimize,minim_dc,sc_move
225       use compare_data !el
226       use comm_srutu
227       implicit none
228 !      implicit real*8 (a-h,o-z)
229 !      include 'DIMENSIONS'
230 #ifdef MPI
231       include 'mpif.h'
232 #endif
233       integer :: i
234 !el      common /srutu/ icall
235       real(kind=8) :: energy_(0:n_ene)
236       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
237       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
238       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
239       integer :: nharp,nft_sc,iretcode,nfun
240       integer,dimension(4,nres/3) :: iharp      !(4,nres/3)(4,maxres/3)
241       logical :: fail
242       real(kind=8) :: rms,frac,frac_nn,co
243
244       integer :: j,k
245       call alloc_compare_arrays
246       if (indpdb.eq.0) call chainbuild
247 #ifdef MPI
248       time00=MPI_Wtime()
249 #endif
250       call chainbuild_cart
251 !      write(iout,*)"in exec_eeval or minimim",split_ene
252 !      do j=1,2*nres+2
253 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
254 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
255 !      enddo
256       if (split_ene) then
257 !      write(iout,*)"in exec_eeval or minimim"
258
259        print *,"Processor",myrank," after chainbuild"
260        icall=1
261
262        call etotal_long(energy_long)
263        write (iout,*) "Printing long range energy"
264        call enerprint(energy_long)
265
266        call etotal_short(energy_short)
267        write (iout,*) "Printing short range energy"
268        call enerprint(energy_short)
269        do i=0,n_ene
270          energy_(i)=energy_long(i)+energy_short(i)
271          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
272        enddo
273        write (iout,*) "Printing long+short range energy"
274        call enerprint(energy_)
275       endif
276
277       call etotal(energy_)
278 #ifdef MPI
279       time_ene=MPI_Wtime()-time00
280 #endif
281       write (iout,*) "Time for energy evaluation",time_ene
282       print *,"after etotal"
283       etota = energy_(0)
284       etot = etota
285       call enerprint(energy_)
286       call hairpin(.true.,nharp,iharp)
287       call secondary2(.true.)
288       if (minim) then
289 !rc overlap test
290         if (overlapsc) then 
291           print *, 'Calling OVERLAP_SC'
292           call overlap_sc(fail)
293         endif 
294
295         if (searchsc) then 
296           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
297           print *,'SC_move',nft_sc,etot
298           write(iout,*) 'SC_move',nft_sc,etot
299         endif 
300
301         if (dccart) then
302           print *, 'Calling MINIM_DC'
303 #ifdef MPI
304           time1=MPI_WTIME()
305 #endif
306 !    call check_ecartint !el
307           call minim_dc(etot,iretcode,nfun)
308 !    call check_ecartint !el
309         else 
310           if (indpdb.ne.0) then 
311             call bond_regular
312             call chainbuild
313           endif
314           call geom_to_var(nvar,varia)
315           print *,'Calling MINIMIZE.'
316 #ifdef MPI
317           time1=MPI_WTIME()
318 #endif
319 !    call check_eint
320 !        call exec_checkgrad   !el
321           call minimize(etot,varia,iretcode,nfun)
322 !     call check_eint
323 !        call exec_checkgrad   !el
324         endif
325         print *,'SUMSL return code is',iretcode,' eval ',nfun
326 #ifdef MPI
327         evals=nfun/(MPI_WTIME()-time1)
328 #endif
329         print *,'# eval/s',evals
330         print *,'refstr=',refstr
331         call hairpin(.true.,nharp,iharp)
332         call secondary2(.true.)
333         call etotal(energy_)
334         etot = energy_(0)
335         call enerprint(energy_)
336
337         call intout
338         call briefout(0,etot)
339         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
340           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
341           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
342           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
343       else
344         print *,'refstr=',refstr
345         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
346         call briefout(0,etot)
347       endif
348       if (outpdb) call pdbout(etot,titel(:32),ipdb)
349       if (outmol2) call mol2out(etot,titel(:32))
350 !elwrite(iout,*) "after exec_eeval_or_minim"
351       return
352       end subroutine exec_eeval_or_minim
353 !-----------------------------------------------------------------------------
354       subroutine exec_regularize
355 !      use MPI_data             !include 'COMMON.SETUP'
356       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
357       use io_units      !include 'COMMON.IOUNITS'
358       use names
359       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
360       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
361 !     use REMD          !include 'COMMON.REMD'
362 !     use MD            !include 'COMMON.MD'
363       use regularize_
364       use compare
365       implicit none
366 !      implicit real*8 (a-h,o-z)
367 !      include 'DIMENSIONS'
368 #ifdef MPI
369       include 'mpif.h'
370 #endif
371       real(kind=8) :: energy_(0:n_ene)
372       real(kind=8) :: etot
373       real(kind=8) :: rms,frac,frac_nn,co
374       integer :: iretcode
375
376       call alloc_compare_arrays
377       call gen_dist_constr
378       call sc_conf
379       call intout
380       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
381       call etotal(energy_)
382       energy_(0)=energy_(0)-energy_(14)
383       etot=energy_(0)
384       call enerprint(energy_)
385       call intout
386       call briefout(0,etot)
387       if (outpdb) call pdbout(etot,titel(:32),ipdb)
388       if (outmol2) call mol2out(etot,titel(:32))
389       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
390       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
391       return
392       end subroutine exec_regularize
393 !-----------------------------------------------------------------------------
394       subroutine exec_thread
395 !      use MPI_data             !include 'COMMON.SETUP'
396       use compare
397       implicit none
398 !      include 'DIMENSIONS'
399 #ifdef MP
400       include "mpif.h"
401 #endif
402       call alloc_compare_arrays
403       call thread_seq
404       return
405       end subroutine exec_thread
406 !-----------------------------------------------------------------------------
407       subroutine exec_MC
408 !      use MPI_data             !include 'COMMON.SETUP'
409       use control_data  !include 'COMMON.CONTROL'
410       use geometry_data
411       use energy_data
412       use mcm_md
413       implicit none
414 !      implicit real*8 (a-h,o-z)
415 !      include 'DIMENSIONS'
416       character(len=10) :: nodeinfo
417       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
418       integer :: ipar
419 #ifdef MPI
420       include "mpif.h"
421 #endif
422       call alloc_MCM_arrays
423       call mcm_setup
424       if (minim) then
425 #ifdef MPI
426         if (modecalc.eq.3) then
427           call do_mcm(ipar)
428         else
429           call entmcm
430         endif
431 #else
432         if (modecalc.eq.3) then
433           call do_mcm(ipar)
434         else
435           call entmcm
436         endif
437 #endif
438       else
439         call monte_carlo
440       endif
441       return
442       end subroutine exec_MC
443 !-----------------------------------------------------------------------------
444       subroutine exec_mult_eeval_or_minim
445       use MPI_data              !include 'COMMON.SETUP'
446       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
447       use io_units      !include 'COMMON.IOUNITS'
448       use names
449       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
450       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
451 !      use REMD         !include 'COMMON.REMD'
452 !      use MD           !include 'COMMON.MD'
453       use io_base
454       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
455       use energy, only:etotal,enerprint
456       use compare, only:rms_nac_nnc
457       use minimm, only:minimize!,minim_mcmf
458 !      implicit real*8 (a-h,o-z)
459 !      include 'DIMENSIONS'
460 #ifdef MPI
461       use minimm, only:minim_mcmf
462       implicit none
463       include 'mpif.h'
464       integer :: ierror,ierr
465       real(kind=8) :: man
466       real(kind=8),dimension(mpi_status_size) :: muster
467 #else
468       implicit none
469 #endif
470       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
471       integer,dimension(6) :: ind
472       real(kind=8) :: energy_(0:n_ene)
473       logical :: eof
474       real(kind=8) :: etot,ene0
475       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
476          nf_mcmf,j
477       real(kind=8) :: rms,frac,frac_nn,co,time,ene
478
479       eof=.false.
480 #ifdef MPI
481       if(me.ne.king) then
482         call minim_mcmf
483         return
484       endif
485
486       close (intin)
487       open(intin,file=intinname,status='old')
488       write (istat,'(a5,20a12)')"#    ",&
489         (wname(print_order(i)),i=1,nprint_ene)
490       if (refstr) then
491         write (istat,'(a5,20a12)')"#    ",&
492          (ename(print_order(i)),i=1,nprint_ene),&
493          "ETOT total","RMSD","nat.contact","nnt.contact"        
494       else
495         write (istat,'(a5,20a12)')"#    ",&
496           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
497       endif
498
499       if (.not.minim) then
500         do while (.not. eof)
501           if (read_cart) then
502             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
503             call read_x(intin,*11)
504 #ifdef MPI
505 ! Broadcast the order to compute internal coordinates to the slaves.
506             if (nfgtasks.gt.1) &
507               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
508 #endif
509             call int_from_cart1(.false.)
510           else
511             read (intin,'(i5)',end=1100,err=1100) iconf
512             call read_angles(intin,*11)
513             call geom_to_var(nvar,varia)
514             call chainbuild
515           endif
516           write (iout,'(a,i7)') 'Conformation #',iconf
517           call etotal(energy_)
518           call briefout(iconf,energy_(0))
519           call enerprint(energy_)
520           etot=energy_(0)
521           if (refstr) then 
522             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
523             write (istat,'(i5,20(f12.3))') iconf,&
524             (energy_(print_order(i)),i=1,nprint_ene),etot,&
525              rms,frac,frac_nn,co
526 !jlee end
527           else
528             write (istat,'(i5,16(f12.3))') iconf,&
529            (energy_(print_order(i)),i=1,nprint_ene),etot
530           endif
531         enddo
532 1100    continue
533         goto 1101
534       endif
535
536       mm=0
537       imm=0
538       nft=0
539       ene0=0.0d0
540       n=0
541       iconf=0
542 !      do n=1,nzsc
543       do while (.not. eof)
544         mm=mm+1
545         if (mm.lt.nodes) then
546           if (read_cart) then
547             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
548             call read_x(intin,*11)
549 #ifdef MPI
550 ! Broadcast the order to compute internal coordinates to the slaves.
551             if (nfgtasks.gt.1) &
552               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
553 #endif
554             call int_from_cart1(.false.)
555           else
556             read (intin,'(i5)',end=11,err=11) iconf
557             call read_angles(intin,*11)
558             call geom_to_var(nvar,varia)
559             call chainbuild
560           endif
561           write (iout,'(a,i7)') 'Conformation #',iconf
562           n=n+1
563          imm=imm+1
564          ind(1)=1
565          ind(2)=n
566          ind(3)=0
567          ind(4)=0
568          ind(5)=0
569          ind(6)=0
570          ene0=0.0d0
571          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
572                         ierr)
573          call mpi_send(varia,nvar,mpi_double_precision,mm,&
574                         idreal,CG_COMM,ierr)
575          call mpi_send(ene0,1,mpi_double_precision,mm,&
576                         idreal,CG_COMM,ierr)
577 !         print *,'task ',n,' sent to worker ',mm,nvar
578         else
579          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
580                        CG_COMM,muster,ierr)
581          man=muster(mpi_source)
582 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
583          call mpi_recv(varia,nvar,mpi_double_precision,&
584                      man,idreal,CG_COMM,muster,ierr)
585          call mpi_recv(ene,1,&
586                      mpi_double_precision,man,idreal,&
587                      CG_COMM,muster,ierr)
588          call mpi_recv(ene0,1,&
589                      mpi_double_precision,man,idreal,&
590                      CG_COMM,muster,ierr)
591 !         print *,'result received from worker ',man,' sending now'
592
593           call var_to_geom(nvar,varia)
594           call chainbuild
595           call etotal(energy_)
596           iconf=ind(2)
597           write (iout,*)
598           write (iout,*)
599           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
600
601           etot=energy_(0)
602           call enerprint(energy_)
603           call briefout(it,etot)
604 !          if (minim) call briefout(it,etot)
605           if (refstr) then 
606             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
607             write (istat,'(i5,19(f12.3))') iconf,&
608            (energy_(print_order(i)),i=1,nprint_ene),etot,&
609            rms,frac,frac_nn,co
610           else
611             write (istat,'(i5,15(f12.3))') iconf,&
612            (energy_(print_order(i)),i=1,nprint_ene),etot
613           endif
614
615           imm=imm-1
616           if (read_cart) then
617             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
618             call read_x(intin,*11)
619 #ifdef MPI
620 ! Broadcast the order to compute internal coordinates to the slaves.
621             if (nfgtasks.gt.1) &
622               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
623 #endif
624             call int_from_cart1(.false.)
625           else
626             read (intin,'(i5)',end=1101,err=1101) iconf
627             call read_angles(intin,*11)
628             call geom_to_var(nvar,varia)
629             call chainbuild
630           endif
631           n=n+1
632           imm=imm+1
633           ind(1)=1
634           ind(2)=n
635           ind(3)=0
636           ind(4)=0
637           ind(5)=0
638           ind(6)=0
639           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
640                         ierr)
641           call mpi_send(varia,nvar,mpi_double_precision,man,&
642                         idreal,CG_COMM,ierr)
643           call mpi_send(ene0,1,mpi_double_precision,man,&
644                         idreal,CG_COMM,ierr)
645           nf_mcmf=nf_mcmf+ind(4)
646           nmin=nmin+1
647         endif
648       enddo
649 11    continue
650       do j=1,imm
651         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
652                      CG_COMM,muster,ierr)
653         man=muster(mpi_source)
654         call mpi_recv(varia,nvar,mpi_double_precision,&
655                      man,idreal,CG_COMM,muster,ierr)
656         call mpi_recv(ene,1,&
657                      mpi_double_precision,man,idreal,&
658                      CG_COMM,muster,ierr)
659         call mpi_recv(ene0,1,&
660                      mpi_double_precision,man,idreal,&
661                      CG_COMM,muster,ierr)
662
663         call var_to_geom(nvar,varia)
664         call chainbuild
665         call etotal(energy_)
666         iconf=ind(2)
667         write (iout,*)
668         write (iout,*)
669         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
670
671         etot=energy_(0)
672         call enerprint(energy_)
673         call briefout(it,etot)
674         if (refstr) then 
675           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
676           write (istat,'(i5,19(f12.3))') iconf,&
677          (energy_(print_order(i)),i=1,nprint_ene),etot,&
678          rms,frac,frac_nn,co
679         else
680           write (istat,'(i5,15(f12.3))') iconf,&
681           (energy_(print_order(i)),i=1,nprint_ene),etot
682         endif
683         nmin=nmin+1
684       enddo
685 1101  continue
686       do i=1, nodes-1
687          ind(1)=0
688          ind(2)=0
689          ind(3)=0
690          ind(4)=0
691          ind(5)=0
692          ind(6)=0
693          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
694                         ierr)
695       enddo
696 #else
697       close (intin)
698       open(intin,file=intinname,status='old')
699       write (istat,'(a5,20a12)')"#    ",&
700          (wname(print_order(i)),i=1,nprint_ene)
701       write (istat,'("#    ",20(1pe12.4))') &
702          (weights(print_order(i)),i=1,nprint_ene)
703       if (refstr) then
704         write (istat,'(a5,20a12)')"#    ",&
705          (ename(print_order(i)),i=1,nprint_ene),&
706          "ETOT total","RMSD","nat.contact","nnt.contact"
707       else
708         write (istat,'(a5,14a12)')"#    ",&
709          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
710       endif
711       do while (.not. eof)
712           if (read_cart) then
713             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
714             call read_x(intin,*11)
715 #ifdef MPI
716 ! Broadcast the order to compute internal coordinates to the slaves.
717             if (nfgtasks.gt.1) &
718               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
719 #endif
720             call int_from_cart1(.false.)
721           else
722             read (intin,'(i5)',end=11,err=11) iconf
723             call read_angles(intin,*11)
724             call geom_to_var(nvar,varia)
725             call chainbuild
726           endif
727         write (iout,'(a,i7)') 'Conformation #',iconf
728         if (minim) call minimize(etot,varia,iretcode,nfun)
729         call etotal(energy_)
730
731         etot=energy_(0)
732         call enerprint(energy_)
733         if (minim) call briefout(it,etot) 
734         if (refstr) then 
735           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
736           write (istat,'(i5,18(f12.3))') iconf,&
737          (energy_(print_order(i)),i=1,nprint_ene),&
738          etot,rms,frac,frac_nn,co
739 !jlee end
740         else
741           write (istat,'(i5,14(f12.3))') iconf,&
742          (energy_(print_order(i)),i=1,nprint_ene),etot
743         endif
744       enddo
745    11 continue
746 #endif
747       return
748       end subroutine exec_mult_eeval_or_minim
749 !-----------------------------------------------------------------------------
750       subroutine exec_checkgrad
751 !      use MPI_data             !include 'COMMON.SETUP'
752       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
753       use io_units      !include 'COMMON.IOUNITS'
754 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
755       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
756 !      use REMD         !include 'COMMON.REMD'
757       use MD_data               !include 'COMMON.MD'
758       use io_base, only:intout
759       use io_config, only:read_fragments
760       use geometry
761       use energy
762       use comm_srutu
763       implicit none
764 !      implicit real*8 (a-h,o-z)
765 !      include 'DIMENSIONS'
766 #ifdef MPI
767       include 'mpif.h'
768 #endif
769 !el      integer :: icall
770 !el      common /srutu/ icall
771       real(kind=8) :: energy_(0:max_ene)
772       real(kind=8) :: etot
773       integer :: i
774 !      do i=2,nres
775 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
776 !        if (itype(i).ne.10) 
777 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
778 !      enddo
779       if (indpdb.eq.0) call chainbuild
780 !      do i=0,nres
781 !        do j=1,3
782 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
783 !        enddo
784 !      enddo
785 !      do i=1,nres-1
786 !        if (itype(i).ne.10) then
787 !          do j=1,3
788 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
789 !          enddo
790 !        endif
791 !      enddo
792 !      do j=1,3
793 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
794 !      enddo
795       usampl=.true.
796       totT=1.d0
797       eq_time=0.0d0
798       call read_fragments
799       call chainbuild_cart
800       call cartprint
801       call intout
802       icall=1
803       call etotal(energy_(0))
804       etot = energy_(0)
805       call enerprint(energy_(0))
806       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
807       print *,'icheckgrad=',icheckgrad
808       goto (10,20,30) icheckgrad
809   10  call check_ecartint
810       return
811   20  call check_cartgrad
812       return
813   30  call check_eint
814       return
815       end subroutine exec_checkgrad
816 !-----------------------------------------------------------------------------
817       subroutine exec_map
818 !      use map_data
819       use map_
820       use io_config, only:map_read
821       implicit none
822 ! Energy maps
823       call alloc_map_arrays
824       call map_read
825       call map
826       return
827       end subroutine exec_map
828 !-----------------------------------------------------------------------------
829       subroutine exec_CSA
830
831       use io_units      !include 'COMMON.IOUNITS'
832       use CSA
833       implicit none
834 #ifdef MPI
835       include "mpif.h"
836 #endif
837 !      include 'DIMENSIONS'
838 ! Conformational Space Annealling programmed by Jooyoung Lee.
839 ! This method works only with parallel machines!
840 #ifdef MPI
841       call alloc_CSA_arrays
842       call together
843 #else
844       write (iout,*) "CSA works on parallel machines only"
845 #endif
846       return
847       end subroutine exec_CSA
848 !-----------------------------------------------------------------------------
849       subroutine exec_softreg
850       use io_units      !include 'COMMON.IOUNITS'
851       use control_data  !include 'COMMON.CONTROL'
852       use energy_data
853       use io_base, only:intout,briefout
854       use geometry, only:chainbuild
855       use energy
856       use compare
857       implicit none
858 !      include 'DIMENSIONS'
859       real(kind=8) :: energy_(0:n_ene)
860 !el local variables
861       real(kind=8) :: rms,frac,frac_nn,co,etot
862       logical :: debug
863
864       call alloc_compare_arrays
865       call chainbuild
866       call etotal(energy_)
867       call enerprint(energy_)
868       if (.not.lsecondary) then
869         write(iout,*) 'Calling secondary structure recognition'
870         call secondary2(debug)
871       else
872         write(iout,*) 'Using secondary structure supplied in pdb'
873       endif
874
875       call softreg
876
877       call etotal(energy_)
878       etot=energy_(0)
879       call enerprint(energy_)
880       call intout
881       call briefout(0,etot)
882       call secondary2(.true.)
883       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
884       return
885       end subroutine exec_softreg
886 !-----------------------------------------------------------------------------
887 ! minimize_p.F
888 !-----------------------------------------------------------------------------
889 !el#ifdef MPI
890       subroutine ergastulum
891
892 !      implicit real*8 (a-h,o-z)
893 !      include 'DIMENSIONS'
894       use MD_data
895       use energy
896       use MDyn, only:setup_fricmat
897       use REMD, only:fricmat_mult,ginv_mult
898 #ifdef MPI
899       include "mpif.h"
900 #endif
901 !      include 'COMMON.SETUP'
902 !      include 'COMMON.DERIV'
903 !      include 'COMMON.VAR'
904 !      include 'COMMON.IOUNITS'
905 !      include 'COMMON.FFIELD'
906 !      include 'COMMON.INTERACT'
907 !      include 'COMMON.MD'
908 !      include 'COMMON.TIME1'
909       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
910       real(kind=8) :: edum(0:n_ene),time_order(0:10)
911 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
912 !el      common /przechowalnia/ Gcopy
913       integer :: icall = 0
914
915 !el  local variables
916       real(kind=8) :: time00
917       integer :: iorder,i,j,nres2,ierr,ierror
918       nres2=2*nres
919 #ifndef FIVEDIAG
920       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
921 ! common.MD
922       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
923 #endif
924 !      common /mdpmpi/
925       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
926       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
927       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
928       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
929 #ifndef FIVEDIAG
930       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
931 #endif
932 ! Workers wait for variables and NF, and NFL from the boss
933       iorder=0
934       do while (iorder.ge.0)
935 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
936 !     & ' receives order from Master'
937         time00=MPI_Wtime()
938         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
939         time_Bcast=time_Bcast+MPI_Wtime()-time00
940         if (icall.gt.4 .and. iorder.ge.0) &
941          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
942        icall=icall+1
943 !      write (*,*)
944 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
945         if (iorder.eq.0) then
946           call zerograd
947           call etotal(edum)
948 !          write (2,*) "After etotal"
949 !          write (2,*) "dimen",dimen," dimen3",dimen3
950 !          call flush(2)
951         else if (iorder.eq.2) then
952           call zerograd
953           call etotal_short(edum)
954 !          write (2,*) "After etotal_short"
955 !          write (2,*) "dimen",dimen," dimen3",dimen3
956 !          call flush(2)
957         else if (iorder.eq.3) then
958           call zerograd
959           call etotal_long(edum)
960 !          write (2,*) "After etotal_long"
961 !          write (2,*) "dimen",dimen," dimen3",dimen3
962 !          call flush(2)
963         else if (iorder.eq.1) then
964           call sum_gradient
965 !          write (2,*) "After sum_gradient"
966 !          write (2,*) "dimen",dimen," dimen3",dimen3
967 !          call flush(2)
968         else if (iorder.eq.4) then
969           call ginv_mult(z,d_a_tmp)
970         else if (iorder.eq.5) then
971 ! Setup MD things for a slave
972           dimen=(nct-nnt+1)+nside
973           dimen1=(nct-nnt)+(nct-nnt+1)
974           dimen3=dimen*3
975 !          write (2,*) "dimen",dimen," dimen3",dimen3
976 !          call flush(2)
977           call int_bounds(dimen,igmult_start,igmult_end)
978           igmult_start=igmult_start-1
979           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
980            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
981            my_ng_count=igmult_end-igmult_start
982           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
983            MPI_INTEGER,FG_COMM,IERROR)
984           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
985 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
986           myginv_ng_count=nres2*my_ng_count !el maxres2
987 !          write (2,*) "igmult_start",igmult_start," igmult_end",
988 !     &     igmult_end," my_ng_count",my_ng_count
989 !          call flush(2)
990           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
991            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
992           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
993            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
994 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
995 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
996 !          call flush(2)
997 !          call MPI_Barrier(FG_COMM,IERROR)
998           time00=MPI_Wtime()
999           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1000            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1001            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1002 #ifdef TIMING
1003           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1004 #endif
1005           do i=1,dimen
1006             do j=1,2*my_ng_count
1007               ginv(j,i)=gcopy(i,j)
1008             enddo
1009           enddo
1010 !          write (2,*) "dimen",dimen," dimen3",dimen3
1011 !          write (2,*) "End MD setup"
1012 !          call flush(2)
1013 !           write (iout,*) "My chunk of ginv_block"
1014 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1015         else if (iorder.eq.6) then
1016           call int_from_cart1(.false.)
1017         else if (iorder.eq.7) then
1018           call chainbuild_cart
1019         else if (iorder.eq.8) then
1020           call intcartderiv
1021         else if (iorder.eq.9) then
1022           call fricmat_mult(z,d_a_tmp)
1023         else if (iorder.eq.10) then
1024           call setup_fricmat
1025         endif
1026       enddo
1027       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1028         ' absolute rank',myrank,' leves ERGASTULUM.'
1029       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1030        (' order[',i,']',time_order(i),i=0,10)
1031       return
1032       end subroutine ergastulum