1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
5 ! Program to carry out conformational search of proteins in an united-residue !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
13 use control, only:tcpu
14 use io_base, only:ilen
15 use geometry, only:chainbuild
16 use control, only:dajczas
19 use compare, only: test
25 ! implicit real*8 (a-h,o-z)
26 ! include 'DIMENSIONS'
28 use MPI_data ! include 'COMMON.SETUP'
33 use MPI_data, only: me,king
36 ! include 'COMMON.TIME1'
37 ! include 'COMMON.INTERACT'
38 ! include 'COMMON.NAMES'
39 ! include 'COMMON.GEO'
40 ! include 'COMMON.HEADER'
41 ! include 'COMMON.CONTROL'
42 ! include 'COMMON.CONTACTS'
43 ! include 'COMMON.CHAIN'
44 ! include 'COMMON.VAR'
45 ! include 'COMMON.IOUNITS'
46 ! include 'COMMON.FFIELD'
47 ! include 'COMMON.REMD'
49 ! include 'COMMON.SBRIDGE'
51 real(kind=8) :: hrtime,mintime,sectime
52 character(len=64) :: text_mode_calc(-2:14)
53 text_mode_calc(-2) = 'test'
54 text_mode_calc(-1) = 'cos'
55 text_mode_calc(0) = 'Energy evaluation or minimization'
56 text_mode_calc(1) = 'Regularization of PDB structure'
57 text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58 text_mode_calc(3) = 'Monte Carlo (with minimization) '
59 text_mode_calc(4) = 'Energy minimization of multiple conformations'
60 text_mode_calc(5) = 'Checking energy gradient'
61 text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62 text_mode_calc(7) = 'Energy map'
63 text_mode_calc(8) = 'CSA calculations'
64 text_mode_calc(9) = 'Not used 9'
65 text_mode_calc(10) = 'Not used 10'
66 text_mode_calc(11) = 'Soft regularization of PDB structure'
67 text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68 text_mode_calc(13) = 'Not used 13'
69 text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
71 ! call memmon_print_usage()
75 write(iout,*)'### LAST MODIFIED 09/03/15 15:32PM by EL'
76 if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
80 write (iout,*) "After readrtns"
83 if (me.eq.king .or. .not. out1file) then
84 write (iout,'(2a/)') &
85 text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
87 if (minim) write (iout,'(a)') &
88 'Conformations will be energy-minimized.'
89 write (iout,'(80(1h*)/)')
93 if (modecalc.eq.-2) then
96 else if (modecalc.eq.-1) then
97 write(iout,*) "call check_sc_map next"
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
104 if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106 call ergastulum ! slave workhouse in Latin
109 if (modecalc.eq.0) then
110 call exec_eeval_or_minim
111 else if (modecalc.eq.1) then
113 else if (modecalc.eq.2) then
115 else if (modecalc.eq.3 .or. modecalc .eq.6) then
117 else if (modecalc.eq.4) then
118 call exec_mult_eeval_or_minim
119 else if (modecalc.eq.5) then
121 else if (ModeCalc.eq.7) then
123 else if (ModeCalc.eq.8) then
125 else if (modecalc.eq.11) then
127 else if (modecalc.eq.12) then
129 else if (modecalc.eq.14) then
132 write (iout,'(a)') 'This calculation type is not supported',&
139 if (fg_rank.eq.0) call finish_task
140 ! call memmon_print_usage()
142 call print_detailed_timing
144 call MPI_Finalize(ierr)
147 call dajczas(tcpu(),hrtime,mintime,sectime)
148 stop '********** Program terminated normally.'
152 !-----------------------------------------------------------------------------
154 !-----------------------------------------------------------------------------
156 use MPI_data !include 'COMMON.SETUP'
157 use control_data !include 'COMMON.CONTROL'
158 use geometry, only:chainbuild
160 use io_units !include 'COMMON.IOUNITS'
163 ! include 'DIMENSIONS'
169 print *,'After MD alloc'
170 if (me.eq.king .or. .not. out1file) &
171 write (iout,*) "Calling chainbuild"
175 end subroutine exec_MD
176 !---------------------------------------------------------------------------
177 subroutine exec_MREMD
178 use MPI_data !include 'COMMON.SETUP'
179 use control_data !include 'COMMON.CONTROL'
180 use io_units !include 'COMMON.IOUNITS'
182 use REMD_data !include 'COMMON.REMD'
183 use geometry, only:chainbuild
187 ! include 'DIMENSIONS'
194 call alloc_MREMD_arrays
196 ! if (me.eq.king .or. .not. out1file) &
197 ! write (iout,*) "Calling chainbuild"
199 if (me.eq.king .or. .not. out1file) &
200 write (iout,*) "Calling REMD",remd_mlist,nrep
210 end subroutine exec_MREMD
211 !-----------------------------------------------------------------------------
212 subroutine exec_eeval_or_minim
213 use MPI_data !include 'COMMON.SETUP'
214 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
215 use io_units !include 'COMMON.IOUNITS'
217 ! use energy !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
218 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
219 ! use REMD !include 'COMMON.REMD'
220 ! use MD !include 'COMMON.MD'
225 use geometry, only:chainbuild
227 use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
228 use minimm, only:minimize,minim_dc,sc_move
232 ! implicit real*8 (a-h,o-z)
233 ! include 'DIMENSIONS'
238 !el common /srutu/ icall
239 real(kind=8) :: energy_(0:n_ene)
240 real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
241 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
242 real(kind=8) :: time00, evals, etota, etot, time_ene, time1
243 integer :: nharp,nft_sc,iretcode,nfun
244 integer,dimension(4,nres/3) :: iharp !(4,nres/3)(4,maxres/3)
246 real(kind=8) :: rms,frac,frac_nn,co
249 call alloc_compare_arrays
250 if ((indpdb.eq.0).and.(.not.read_cart)) then
252 write(iout,*) 'Warning: Calling chainbuild'
258 ! write(iout,*)"in exec_eeval or minimim",split_ene
260 ! write(iout,*)"cccccc",j,(c(i,j),i=1,3)
261 ! write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
264 ! write(iout,*)"in exec_eeval or minimim"
266 print *,"Processor",myrank," after chainbuild"
269 call etotal_long(energy_long)
270 write (iout,*) "Printing long range energy"
271 call enerprint(energy_long)
273 call etotal_short(energy_short)
274 write (iout,*) "Printing short range energy"
275 call enerprint(energy_short)
277 energy_(i)=energy_long(i)+energy_short(i)
278 write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
280 write (iout,*) "Printing long+short range energy"
281 call enerprint(energy_)
286 time_ene=MPI_Wtime()-time00
288 write (iout,*) "Time for energy evaluation",time_ene
289 print *,"after etotal"
292 call enerprint(energy_)
293 call hairpin(.true.,nharp,iharp)
294 call secondary2(.true.)
298 print *, 'Calling OVERLAP_SC'
299 call overlap_sc(fail)
303 call sc_move(2,nres-1,10,1d10,nft_sc,etot)
304 print *,'SC_move',nft_sc,etot
305 write(iout,*) 'SC_move',nft_sc,etot
309 print *, 'Calling MINIM_DC'
313 ! call check_ecartint !el
314 call minim_dc(etot,iretcode,nfun)
315 ! call check_ecartint !el
317 if (indpdb.ne.0) then
319 write(iout,*) 'Warning: Calling chainbuild'
322 call geom_to_var(nvar,varia)
323 print *,'Calling MINIMIZE.'
328 ! call exec_checkgrad !el
329 call minimize(etot,varia,iretcode,nfun)
331 ! call exec_checkgrad !el
333 print *,'SUMSL return code is',iretcode,' eval ',nfun
335 evals=nfun/(MPI_WTIME()-time1)
337 print *,'# eval/s',evals
338 print *,'refstr=',refstr
339 ! call hairpin(.true.,nharp,iharp)
340 ! call secondary2(.true.)
343 call enerprint(energy_)
346 call briefout(0,etot)
347 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
348 write (iout,'(a,i3)') 'SUMSL return code:',iretcode
349 write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
350 write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
352 print *,'refstr=',refstr,frac,frac_nn,co
353 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
354 print *,"after rms_nac_ncc"
355 call briefout(0,etot)
357 if (outpdb) call pdbout(etot,titel(:32),ipdb)
358 if (outmol2) call mol2out(etot,titel(:32))
359 write(iout,*) "after exec_eeval_or_minim"
361 end subroutine exec_eeval_or_minim
362 !-----------------------------------------------------------------------------
363 subroutine exec_regularize
364 ! use MPI_data !include 'COMMON.SETUP'
365 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
366 use io_units !include 'COMMON.IOUNITS'
368 use energy_data !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
369 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
370 ! use REMD !include 'COMMON.REMD'
371 ! use MD !include 'COMMON.MD'
375 ! implicit real*8 (a-h,o-z)
376 ! include 'DIMENSIONS'
380 real(kind=8) :: energy_(0:n_ene)
382 real(kind=8) :: rms,frac,frac_nn,co
385 call alloc_compare_arrays
389 call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
391 energy_(0)=energy_(0)-energy_(14)
393 call enerprint(energy_)
395 call briefout(0,etot)
396 if (outpdb) call pdbout(etot,titel(:32),ipdb)
397 if (outmol2) call mol2out(etot,titel(:32))
398 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
399 write (iout,'(a,i3)') 'SUMSL return code:',iretcode
401 end subroutine exec_regularize
402 !-----------------------------------------------------------------------------
403 subroutine exec_thread
404 ! use MPI_data !include 'COMMON.SETUP'
407 ! include 'DIMENSIONS'
411 call alloc_compare_arrays
414 end subroutine exec_thread
415 !-----------------------------------------------------------------------------
417 ! use MPI_data !include 'COMMON.SETUP'
418 use control_data !include 'COMMON.CONTROL'
423 ! implicit real*8 (a-h,o-z)
424 ! include 'DIMENSIONS'
425 character(len=10) :: nodeinfo
426 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
431 call alloc_MCM_arrays
435 if (modecalc.eq.3) then
441 if (modecalc.eq.3) then
451 end subroutine exec_MC
452 !-----------------------------------------------------------------------------
453 subroutine exec_mult_eeval_or_minim
454 use MPI_data !include 'COMMON.SETUP'
455 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
456 use io_units !include 'COMMON.IOUNITS'
458 use energy_data !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
459 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
460 ! use REMD !include 'COMMON.REMD'
461 ! use MD !include 'COMMON.MD'
463 use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
464 use energy, only:etotal,enerprint
465 use compare, only:rms_nac_nnc
466 use minimm, only:minimize!,minim_mcmf
467 ! implicit real*8 (a-h,o-z)
468 ! include 'DIMENSIONS'
470 use minimm, only:minim_mcmf
473 integer :: ierror,ierr
475 real(kind=8),dimension(mpi_status_size) :: muster
479 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
480 integer,dimension(6) :: ind
481 real(kind=8) :: energy_(0:n_ene)
483 real(kind=8) :: etot,ene0
484 integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
486 real(kind=8) :: rms,frac,frac_nn,co,time,ene
496 open(intin,file=intinname,status='old')
497 write (istat,'(a5,20a12)')"# ",&
498 (wname(print_order(i)),i=1,nprint_ene)
500 write (istat,'(a5,20a12)')"# ",&
501 (ename(print_order(i)),i=1,nprint_ene),&
502 "ETOT total","RMSD","nat.contact","nnt.contact"
504 write (istat,'(a5,20a12)')"# ",&
505 (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
511 read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
512 call read_x(intin,*11)
514 ! Broadcast the order to compute internal coordinates to the slaves.
516 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
518 call int_from_cart1(.false.)
520 read (intin,'(i5)',end=1100,err=1100) iconf
521 call read_angles(intin,*11)
522 call geom_to_var(nvar,varia)
523 write(iout,*) 'Warning: Calling chainbuild1'
526 write (iout,'(a,i7)') 'Conformation #',iconf
528 call briefout(iconf,energy_(0))
529 call enerprint(energy_)
532 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
533 write (istat,'(i5,20(f12.3))') iconf,&
534 (energy_(print_order(i)),i=1,nprint_ene),etot,&
538 write (istat,'(i5,16(f12.3))') iconf,&
539 (energy_(print_order(i)),i=1,nprint_ene),etot
555 if (mm.lt.nodes) then
557 read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
558 call read_x(intin,*11)
560 ! Broadcast the order to compute internal coordinates to the slaves.
562 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
564 call int_from_cart1(.false.)
566 read (intin,'(i5)',end=11,err=11) iconf
567 call read_angles(intin,*11)
568 call geom_to_var(nvar,varia)
569 write(iout,*) 'Warning: Calling chainbuild2'
572 write (iout,'(a,i7)') 'Conformation #',iconf
582 call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
584 call mpi_send(varia,nvar,mpi_double_precision,mm,&
586 call mpi_send(ene0,1,mpi_double_precision,mm,&
588 ! print *,'task ',n,' sent to worker ',mm,nvar
590 call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
592 man=muster(mpi_source)
593 ! print *,'receiving result from worker ',man,' (',iii1,iii,')'
594 call mpi_recv(varia,nvar,mpi_double_precision,&
595 man,idreal,CG_COMM,muster,ierr)
596 call mpi_recv(ene,1,&
597 mpi_double_precision,man,idreal,&
599 call mpi_recv(ene0,1,&
600 mpi_double_precision,man,idreal,&
602 ! print *,'result received from worker ',man,' sending now'
604 call var_to_geom(nvar,varia)
605 write(iout,*) 'Warning: Calling chainbuild3'
611 write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
614 call enerprint(energy_)
615 call briefout(it,etot)
616 ! if (minim) call briefout(it,etot)
618 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
619 write (istat,'(i5,19(f12.3))') iconf,&
620 (energy_(print_order(i)),i=1,nprint_ene),etot,&
623 write (istat,'(i5,15(f12.3))') iconf,&
624 (energy_(print_order(i)),i=1,nprint_ene),etot
629 read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
630 call read_x(intin,*11)
632 ! Broadcast the order to compute internal coordinates to the slaves.
634 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
636 call int_from_cart1(.false.)
638 read (intin,'(i5)',end=1101,err=1101) iconf
639 call read_angles(intin,*11)
640 call geom_to_var(nvar,varia)
641 write(iout,*) 'Warning: Calling chainbuild4'
652 call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
654 call mpi_send(varia,nvar,mpi_double_precision,man,&
656 call mpi_send(ene0,1,mpi_double_precision,man,&
658 nf_mcmf=nf_mcmf+ind(4)
664 call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
666 man=muster(mpi_source)
667 call mpi_recv(varia,nvar,mpi_double_precision,&
668 man,idreal,CG_COMM,muster,ierr)
669 call mpi_recv(ene,1,&
670 mpi_double_precision,man,idreal,&
672 call mpi_recv(ene0,1,&
673 mpi_double_precision,man,idreal,&
676 call var_to_geom(nvar,varia)
677 write(iout,*) 'Warning: Calling chainbuild5'
683 write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
686 call enerprint(energy_)
687 call briefout(it,etot)
689 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
690 write (istat,'(i5,19(f12.3))') iconf,&
691 (energy_(print_order(i)),i=1,nprint_ene),etot,&
694 write (istat,'(i5,15(f12.3))') iconf,&
695 (energy_(print_order(i)),i=1,nprint_ene),etot
707 call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
712 open(intin,file=intinname,status='old')
713 write (istat,'(a5,20a12)')"# ",&
714 (wname(print_order(i)),i=1,nprint_ene)
715 write (istat,'("# ",20(1pe12.4))') &
716 (weights(print_order(i)),i=1,nprint_ene)
718 write (istat,'(a5,20a12)')"# ",&
719 (ename(print_order(i)),i=1,nprint_ene),&
720 "ETOT total","RMSD","nat.contact","nnt.contact"
722 write (istat,'(a5,14a12)')"# ",&
723 (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
727 read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
728 call read_x(intin,*11)
730 ! Broadcast the order to compute internal coordinates to the slaves.
732 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
734 call int_from_cart1(.false.)
736 read (intin,'(i5)',end=11,err=11) iconf
737 call read_angles(intin,*11)
738 call geom_to_var(nvar,varia)
739 write(iout,*) 'Warning: Calling chainbuild5'
742 write (iout,'(a,i7)') 'Conformation #',iconf
743 if (minim) call minimize(etot,varia,iretcode,nfun)
747 call enerprint(energy_)
748 if (minim) call briefout(it,etot)
750 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
751 write (istat,'(i5,18(f12.3))') iconf,&
752 (energy_(print_order(i)),i=1,nprint_ene),&
753 etot,rms,frac,frac_nn,co
756 write (istat,'(i5,14(f12.3))') iconf,&
757 (energy_(print_order(i)),i=1,nprint_ene),etot
763 end subroutine exec_mult_eeval_or_minim
764 !-----------------------------------------------------------------------------
765 subroutine exec_checkgrad
766 ! use MPI_data !include 'COMMON.SETUP'
767 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
768 use io_units !include 'COMMON.IOUNITS'
769 !el use energy_data, only:icall !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
770 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
771 ! use REMD !include 'COMMON.REMD'
772 use MD_data !include 'COMMON.MD'
773 use io_base, only:intout
774 use io_config, only:read_fragments
779 ! implicit real*8 (a-h,o-z)
780 ! include 'DIMENSIONS'
785 !el common /srutu/ icall
786 real(kind=8) :: energy_(0:max_ene)
790 ! vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
791 ! if (itype(i).ne.10)
792 ! & vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
794 if (indpdb.eq.0) then
795 write(iout,*) 'Warning: Calling chainbuild'
800 ! dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
804 ! if (itype(i).ne.10) then
806 ! dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
811 ! dc(j,0)=ran_number(-0.2d0,0.2d0)
821 write (iout,*) "before etotal"
822 call etotal(energy_(0))
824 call enerprint(energy_(0))
825 write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
826 print *,'icheckgrad=',icheckgrad
827 goto (10,20,30) icheckgrad
828 10 call check_ecartint
831 20 call check_cartgrad
836 end subroutine exec_checkgrad
837 !-----------------------------------------------------------------------------
841 use io_config, only:map_read
844 call alloc_map_arrays
848 end subroutine exec_map
849 !-----------------------------------------------------------------------------
852 use io_units !include 'COMMON.IOUNITS'
858 ! include 'DIMENSIONS'
859 ! Conformational Space Annealling programmed by Jooyoung Lee.
860 ! This method works only with parallel machines!
862 call alloc_CSA_arrays
865 write (iout,*) "CSA works on parallel machines only"
868 end subroutine exec_CSA
869 !-----------------------------------------------------------------------------
870 subroutine exec_softreg
871 use io_units !include 'COMMON.IOUNITS'
872 use control_data !include 'COMMON.CONTROL'
874 use io_base, only:intout,briefout
875 use geometry, only:chainbuild
879 ! include 'DIMENSIONS'
880 real(kind=8) :: energy_(0:n_ene)
882 real(kind=8) :: rms,frac,frac_nn,co,etot
885 call alloc_compare_arrays
886 write(iout,*) 'Warning: Calling chainbuild'
889 call enerprint(energy_)
890 if (.not.lsecondary) then
891 write(iout,*) 'Calling secondary structure recognition'
892 call secondary2(debug)
894 write(iout,*) 'Using secondary structure supplied in pdb'
901 call enerprint(energy_)
903 call briefout(0,etot)
904 call secondary2(.true.)
905 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
907 end subroutine exec_softreg
908 !-----------------------------------------------------------------------------
910 !-----------------------------------------------------------------------------
912 subroutine ergastulum
914 ! implicit real*8 (a-h,o-z)
915 ! include 'DIMENSIONS'
918 use MDyn, only:setup_fricmat
919 use REMD, only:fricmat_mult,ginv_mult
923 ! include 'COMMON.SETUP'
924 ! include 'COMMON.DERIV'
925 ! include 'COMMON.VAR'
926 ! include 'COMMON.IOUNITS'
927 ! include 'COMMON.FFIELD'
928 ! include 'COMMON.INTERACT'
929 ! include 'COMMON.MD'
930 ! include 'COMMON.TIME1'
931 real(kind=8),dimension(6*nres) :: z,d_a_tmp !(maxres6) maxres6=6*maxres
932 real(kind=8) :: edum(0:n_ene),time_order(0:10)
933 !el real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
934 !el common /przechowalnia/ Gcopy
938 real(kind=8) :: time00
939 integer :: iorder,i,j,nres2,ierr,ierror
942 if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
944 if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
947 if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
948 if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
949 if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
950 if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
952 if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
954 ! Workers wait for variables and NF, and NFL from the boss
956 do while (iorder.ge.0)
957 ! write (*,*) 'Processor',fg_rank,' CG group',kolor,
958 ! & ' receives order from Master'
960 call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
961 time_Bcast=time_Bcast+MPI_Wtime()-time00
962 if (icall.gt.4 .and. iorder.ge.0) &
963 time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
966 ! & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
967 if (iorder.eq.0) then
970 ! write (2,*) "After etotal"
971 ! write (2,*) "dimen",dimen," dimen3",dimen3
973 else if (iorder.eq.2) then
975 call etotal_short(edum)
976 ! write (2,*) "After etotal_short"
977 ! write (2,*) "dimen",dimen," dimen3",dimen3
979 else if (iorder.eq.3) then
981 call etotal_long(edum)
982 ! write (2,*) "After etotal_long"
983 ! write (2,*) "dimen",dimen," dimen3",dimen3
985 else if (iorder.eq.1) then
987 ! write (2,*) "After sum_gradient"
988 ! write (2,*) "dimen",dimen," dimen3",dimen3
990 else if (iorder.eq.4) then
991 call ginv_mult(z,d_a_tmp)
992 else if (iorder.eq.5) then
993 ! Setup MD things for a slave
994 dimen=(nct-nnt+1)+nside
995 dimen1=(nct-nnt)+(nct-nnt+1)
997 ! write (2,*) "dimen",dimen," dimen3",dimen3
999 call int_bounds(dimen,igmult_start,igmult_end)
1000 igmult_start=igmult_start-1
1001 call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1002 ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1003 my_ng_count=igmult_end-igmult_start
1004 call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1005 MPI_INTEGER,FG_COMM,IERROR)
1006 write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1007 ! write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1008 myginv_ng_count=nres2*my_ng_count !el maxres2
1009 ! write (2,*) "igmult_start",igmult_start," igmult_end",
1010 ! & igmult_end," my_ng_count",my_ng_count
1012 call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1013 nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1014 call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1015 nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1016 ! write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1017 ! write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1019 ! call MPI_Barrier(FG_COMM,IERROR)
1021 call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1022 nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1023 myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1025 time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1028 do j=1,2*my_ng_count
1029 ginv(j,i)=gcopy(i,j)
1032 ! write (2,*) "dimen",dimen," dimen3",dimen3
1033 ! write (2,*) "End MD setup"
1035 ! write (iout,*) "My chunk of ginv_block"
1036 ! call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1037 else if (iorder.eq.6) then
1038 call int_from_cart1(.false.)
1039 else if (iorder.eq.7) then
1040 call chainbuild_cart
1041 else if (iorder.eq.8) then
1043 else if (iorder.eq.9) then
1044 call fricmat_mult(z,d_a_tmp)
1045 else if (iorder.eq.10) then
1049 write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1050 ' absolute rank',myrank,' leves ERGASTULUM.'
1051 write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1052 (' order[',i,']',time_order(i),i=0,10)
1054 end subroutine ergastulum