1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
5 ! Program to carry out conformational search of proteins in an united-residue !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
13 use control, only:tcpu
14 use io_base, only:ilen
15 use geometry, only:chainbuild
16 use control, only:dajczas
19 use compare, only: test
25 ! implicit real*8 (a-h,o-z)
26 ! include 'DIMENSIONS'
28 use MPI_data ! include 'COMMON.SETUP'
33 use MPI_data, only: me,king
36 ! include 'COMMON.TIME1'
37 ! include 'COMMON.INTERACT'
38 ! include 'COMMON.NAMES'
39 ! include 'COMMON.GEO'
40 ! include 'COMMON.HEADER'
41 ! include 'COMMON.CONTROL'
42 ! include 'COMMON.CONTACTS'
43 ! include 'COMMON.CHAIN'
44 ! include 'COMMON.VAR'
45 ! include 'COMMON.IOUNITS'
46 ! include 'COMMON.FFIELD'
47 ! include 'COMMON.REMD'
49 ! include 'COMMON.SBRIDGE'
51 real(kind=8) :: hrtime,mintime,sectime
52 character(len=64) :: text_mode_calc(-2:14)
53 text_mode_calc(-2) = 'test'
54 text_mode_calc(-1) = 'cos'
55 text_mode_calc(0) = 'Energy evaluation or minimization'
56 text_mode_calc(1) = 'Regularization of PDB structure'
57 text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58 text_mode_calc(3) = 'Monte Carlo (with minimization) '
59 text_mode_calc(4) = 'Energy minimization of multiple conformations'
60 text_mode_calc(5) = 'Checking energy gradient'
61 text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62 text_mode_calc(7) = 'Energy map'
63 text_mode_calc(8) = 'CSA calculations'
64 text_mode_calc(9) = 'Not used 9'
65 text_mode_calc(10) = 'Not used 10'
66 text_mode_calc(11) = 'Soft regularization of PDB structure'
67 text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68 text_mode_calc(13) = 'Not used 13'
69 text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
72 ! call memmon_print_usage()
76 write(iout,*)'### LAST MODIFIED 09/03/15 15:32PM by EL'
77 if (me.eq.king) call cinfo
78 ! Read force field parameters and job setup data
82 if (me.eq.king .or. .not. out1file) then
83 write (iout,'(2a/)') &
84 text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
86 if (minim) write (iout,'(a)') &
87 'Conformations will be energy-minimized.'
88 write (iout,'(80(1h*)/)')
92 if (modecalc.eq.-2) then
95 else if (modecalc.eq.-1) then
96 write(iout,*) "call check_sc_map next"
100 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
103 if (fg_rank.gt.0) then
104 ! Fine-grain slaves just do energy and gradient components.
105 call ergastulum ! slave workhouse in Latin
108 if (modecalc.eq.0) then
109 !write(iout,*)"!!!!!!!!!!!!!!!!! in unres"
111 call exec_eeval_or_minim
112 !write(iout,*)"!!!!!!!!!!!!!!!!! in unres"
114 else if (modecalc.eq.1) then
116 else if (modecalc.eq.2) then
118 else if (modecalc.eq.3 .or. modecalc .eq.6) then
120 else if (modecalc.eq.4) then
121 call exec_mult_eeval_or_minim
122 else if (modecalc.eq.5) then
124 !write(iout,*) "check grad dwa razy"
125 !el call exec_checkgrad
126 else if (ModeCalc.eq.7) then
128 else if (ModeCalc.eq.8) then
130 else if (modecalc.eq.11) then
132 else if (modecalc.eq.12) then
134 else if (modecalc.eq.14) then
137 write (iout,'(a)') 'This calculation type is not supported',&
140 !elwrite(iout,*)"!!!!!!!!!!!!!!!!!"
145 if (fg_rank.eq.0) call finish_task
146 ! call memmon_print_usage()
148 call print_detailed_timing
150 call MPI_Finalize(ierr)
153 call dajczas(tcpu(),hrtime,mintime,sectime)
154 stop '********** Program terminated normally.'
158 !-----------------------------------------------------------------------------
160 !-----------------------------------------------------------------------------
162 use MPI_data !include 'COMMON.SETUP'
163 use control_data !include 'COMMON.CONTROL'
164 use geometry, only:chainbuild
166 use io_units !include 'COMMON.IOUNITS'
168 ! include 'DIMENSIONS'
173 if (me.eq.king .or. .not. out1file) &
174 write (iout,*) "Calling chainbuild"
178 end subroutine exec_MD
179 !---------------------------------------------------------------------------
180 subroutine exec_MREMD
181 use MPI_data !include 'COMMON.SETUP'
182 use control_data !include 'COMMON.CONTROL'
183 use io_units !include 'COMMON.IOUNITS'
185 use REMD_data !include 'COMMON.REMD'
186 use geometry, only:chainbuild
190 ! include 'DIMENSIONS'
197 call alloc_MREMD_arrays
199 if (me.eq.king .or. .not. out1file) &
200 write (iout,*) "Calling chainbuild"
202 if (me.eq.king .or. .not. out1file) &
203 write (iout,*) "Calling REMD"
213 end subroutine exec_MREMD
214 !-----------------------------------------------------------------------------
215 subroutine exec_eeval_or_minim
216 use MPI_data !include 'COMMON.SETUP'
217 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
218 use io_units !include 'COMMON.IOUNITS'
221 ! use energy !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
222 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
223 ! use REMD !include 'COMMON.REMD'
224 ! use MD !include 'COMMON.MD'
226 use geometry, only:chainbuild
228 use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
229 use minimm, only:minimize,minim_dc,sc_move
233 ! implicit real*8 (a-h,o-z)
234 ! include 'DIMENSIONS'
239 !el common /srutu/ icall
240 real(kind=8) :: energy_(0:n_ene)
241 real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
242 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
243 real(kind=8) :: time00, evals, etota, etot, time_ene, time1
244 integer :: nharp,nft_sc,iretcode,nfun
245 integer,dimension(4,nres/3) :: iharp !(4,nres/3)(4,maxres/3)
247 real(kind=8) :: rms,frac,frac_nn,co
248 call alloc_compare_arrays
249 if (indpdb.eq.0) call chainbuild
257 print *,"Processor",myrank," after chainbuild"
260 call etotal_long(energy_long)
261 write (iout,*) "Printing long range energy"
262 call enerprint(energy_long)
264 call etotal_short(energy_short)
265 write (iout,*) "Printing short range energy"
266 call enerprint(energy_short)
268 energy_(i)=energy_long(i)+energy_short(i)
269 write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
271 write (iout,*) "Printing long+short range energy"
272 call enerprint(energy_)
277 time_ene=MPI_Wtime()-time00
279 write (iout,*) "Time for energy evaluation",time_ene
280 print *,"after etotal"
283 call enerprint(energy_)
284 call hairpin(.true.,nharp,iharp)
285 call secondary2(.true.)
289 print *, 'Calling OVERLAP_SC'
290 call overlap_sc(fail)
294 call sc_move(2,nres-1,10,1d10,nft_sc,etot)
295 print *,'SC_move',nft_sc,etot
296 write(iout,*) 'SC_move',nft_sc,etot
300 print *, 'Calling MINIM_DC'
304 call minim_dc(etot,iretcode,nfun)
306 if (indpdb.ne.0) then
310 call geom_to_var(nvar,varia)
311 print *,'Calling MINIMIZE.'
315 call minimize(etot,varia,iretcode,nfun)
317 print *,'SUMSL return code is',iretcode,' eval ',nfun
319 evals=nfun/(MPI_WTIME()-time1)
321 print *,'# eval/s',evals
322 print *,'refstr=',refstr
323 call hairpin(.true.,nharp,iharp)
324 call secondary2(.true.)
327 call enerprint(energy_)
330 call briefout(0,etot)
331 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
332 write (iout,'(a,i3)') 'SUMSL return code:',iretcode
333 write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
334 write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
336 print *,'refstr=',refstr
337 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
338 call briefout(0,etot)
340 if (outpdb) call pdbout(etot,titel(:32),ipdb)
341 if (outmol2) call mol2out(etot,titel(:32))
343 end subroutine exec_eeval_or_minim
344 !-----------------------------------------------------------------------------
345 subroutine exec_regularize
346 ! use MPI_data !include 'COMMON.SETUP'
347 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
348 use io_units !include 'COMMON.IOUNITS'
350 use energy_data !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
351 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
352 ! use REMD !include 'COMMON.REMD'
353 ! use MD !include 'COMMON.MD'
357 ! implicit real*8 (a-h,o-z)
358 ! include 'DIMENSIONS'
362 real(kind=8) :: energy_(0:n_ene)
364 real(kind=8) :: rms,frac,frac_nn,co
367 call alloc_compare_arrays
371 call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
373 energy_(0)=energy_(0)-energy_(14)
375 call enerprint(energy_)
377 call briefout(0,etot)
378 if (outpdb) call pdbout(etot,titel(:32),ipdb)
379 if (outmol2) call mol2out(etot,titel(:32))
380 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
381 write (iout,'(a,i3)') 'SUMSL return code:',iretcode
383 end subroutine exec_regularize
384 !-----------------------------------------------------------------------------
385 subroutine exec_thread
386 ! use MPI_data !include 'COMMON.SETUP'
389 ! include 'DIMENSIONS'
393 call alloc_compare_arrays
396 end subroutine exec_thread
397 !-----------------------------------------------------------------------------
399 ! use MPI_data !include 'COMMON.SETUP'
400 use control_data !include 'COMMON.CONTROL'
405 ! implicit real*8 (a-h,o-z)
406 ! include 'DIMENSIONS'
407 character(len=10) :: nodeinfo
408 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
413 call alloc_MCM_arrays
417 if (modecalc.eq.3) then
423 if (modecalc.eq.3) then
433 end subroutine exec_MC
434 !-----------------------------------------------------------------------------
435 subroutine exec_mult_eeval_or_minim
436 use MPI_data !include 'COMMON.SETUP'
437 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
438 use io_units !include 'COMMON.IOUNITS'
440 use energy_data !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
441 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
442 ! use REMD !include 'COMMON.REMD'
443 ! use MD !include 'COMMON.MD'
445 use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
446 use energy, only:etotal,enerprint
447 use compare, only:rms_nac_nnc
448 use minimm, only:minimize!,minim_mcmf
449 ! implicit real*8 (a-h,o-z)
450 ! include 'DIMENSIONS'
452 use minimm, only:minim_mcmf
455 integer :: ierror,ierr
457 real(kind=8),dimension(mpi_status_size) :: muster
461 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
462 integer,dimension(6) :: ind
463 real(kind=8) :: energy_(0:n_ene)
465 real(kind=8) :: etot,ene0
466 integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
468 real(kind=8) :: rms,frac,frac_nn,co,time,ene
478 open(intin,file=intinname,status='old')
479 write (istat,'(a5,20a12)')"# ",&
480 (wname(print_order(i)),i=1,nprint_ene)
482 write (istat,'(a5,20a12)')"# ",&
483 (ename(print_order(i)),i=1,nprint_ene),&
484 "ETOT total","RMSD","nat.contact","nnt.contact"
486 write (istat,'(a5,20a12)')"# ",&
487 (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
493 read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
494 call read_x(intin,*11)
496 ! Broadcast the order to compute internal coordinates to the slaves.
498 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
500 call int_from_cart1(.false.)
502 read (intin,'(i5)',end=1100,err=1100) iconf
503 call read_angles(intin,*11)
504 call geom_to_var(nvar,varia)
507 write (iout,'(a,i7)') 'Conformation #',iconf
509 call briefout(iconf,energy_(0))
510 call enerprint(energy_)
513 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
514 write (istat,'(i5,20(f12.3))') iconf,&
515 (energy_(print_order(i)),i=1,nprint_ene),etot,&
519 write (istat,'(i5,16(f12.3))') iconf,&
520 (energy_(print_order(i)),i=1,nprint_ene),etot
536 if (mm.lt.nodes) then
538 read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
539 call read_x(intin,*11)
541 ! Broadcast the order to compute internal coordinates to the slaves.
543 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
545 call int_from_cart1(.false.)
547 read (intin,'(i5)',end=11,err=11) iconf
548 call read_angles(intin,*11)
549 call geom_to_var(nvar,varia)
552 write (iout,'(a,i7)') 'Conformation #',iconf
562 call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
564 call mpi_send(varia,nvar,mpi_double_precision,mm,&
566 call mpi_send(ene0,1,mpi_double_precision,mm,&
568 ! print *,'task ',n,' sent to worker ',mm,nvar
570 call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
572 man=muster(mpi_source)
573 ! print *,'receiving result from worker ',man,' (',iii1,iii,')'
574 call mpi_recv(varia,nvar,mpi_double_precision,&
575 man,idreal,CG_COMM,muster,ierr)
576 call mpi_recv(ene,1,&
577 mpi_double_precision,man,idreal,&
579 call mpi_recv(ene0,1,&
580 mpi_double_precision,man,idreal,&
582 ! print *,'result received from worker ',man,' sending now'
584 call var_to_geom(nvar,varia)
590 write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
593 call enerprint(energy_)
594 call briefout(it,etot)
595 ! if (minim) call briefout(it,etot)
597 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
598 write (istat,'(i5,19(f12.3))') iconf,&
599 (energy_(print_order(i)),i=1,nprint_ene),etot,&
602 write (istat,'(i5,15(f12.3))') iconf,&
603 (energy_(print_order(i)),i=1,nprint_ene),etot
608 read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
609 call read_x(intin,*11)
611 ! Broadcast the order to compute internal coordinates to the slaves.
613 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
615 call int_from_cart1(.false.)
617 read (intin,'(i5)',end=1101,err=1101) iconf
618 call read_angles(intin,*11)
619 call geom_to_var(nvar,varia)
630 call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
632 call mpi_send(varia,nvar,mpi_double_precision,man,&
634 call mpi_send(ene0,1,mpi_double_precision,man,&
636 nf_mcmf=nf_mcmf+ind(4)
642 call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
644 man=muster(mpi_source)
645 call mpi_recv(varia,nvar,mpi_double_precision,&
646 man,idreal,CG_COMM,muster,ierr)
647 call mpi_recv(ene,1,&
648 mpi_double_precision,man,idreal,&
650 call mpi_recv(ene0,1,&
651 mpi_double_precision,man,idreal,&
654 call var_to_geom(nvar,varia)
660 write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
663 call enerprint(energy_)
664 call briefout(it,etot)
666 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
667 write (istat,'(i5,19(f12.3))') iconf,&
668 (energy_(print_order(i)),i=1,nprint_ene),etot,&
671 write (istat,'(i5,15(f12.3))') iconf,&
672 (energy_(print_order(i)),i=1,nprint_ene),etot
684 call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
689 open(intin,file=intinname,status='old')
690 write (istat,'(a5,20a12)')"# ",&
691 (wname(print_order(i)),i=1,nprint_ene)
692 write (istat,'("# ",20(1pe12.4))') &
693 (weights(print_order(i)),i=1,nprint_ene)
695 write (istat,'(a5,20a12)')"# ",&
696 (ename(print_order(i)),i=1,nprint_ene),&
697 "ETOT total","RMSD","nat.contact","nnt.contact"
699 write (istat,'(a5,14a12)')"# ",&
700 (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
704 read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
705 call read_x(intin,*11)
707 ! Broadcast the order to compute internal coordinates to the slaves.
709 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
711 call int_from_cart1(.false.)
713 read (intin,'(i5)',end=11,err=11) iconf
714 call read_angles(intin,*11)
715 call geom_to_var(nvar,varia)
718 write (iout,'(a,i7)') 'Conformation #',iconf
719 if (minim) call minimize(etot,varia,iretcode,nfun)
723 call enerprint(energy_)
724 if (minim) call briefout(it,etot)
726 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
727 write (istat,'(i5,18(f12.3))') iconf,&
728 (energy_(print_order(i)),i=1,nprint_ene),&
729 etot,rms,frac,frac_nn,co
732 write (istat,'(i5,14(f12.3))') iconf,&
733 (energy_(print_order(i)),i=1,nprint_ene),etot
739 end subroutine exec_mult_eeval_or_minim
740 !-----------------------------------------------------------------------------
741 subroutine exec_checkgrad
742 ! use MPI_data !include 'COMMON.SETUP'
743 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
744 use io_units !include 'COMMON.IOUNITS'
745 !el use energy_data, only:icall !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
746 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
747 ! use REMD !include 'COMMON.REMD'
748 use MD_data !include 'COMMON.MD'
749 use io_base, only:intout
750 use io_config, only:read_fragments
755 ! implicit real*8 (a-h,o-z)
756 ! include 'DIMENSIONS'
761 !el common /srutu/ icall
762 real(kind=8) :: energy_(0:max_ene)
766 ! vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
767 ! if (itype(i).ne.10)
768 ! & vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
770 if (indpdb.eq.0) call chainbuild
773 ! dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
777 ! if (itype(i).ne.10) then
779 ! dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
784 ! dc(j,0)=ran_number(-0.2d0,0.2d0)
794 call etotal(energy_(0))
796 call enerprint(energy_(0))
797 write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
798 print *,'icheckgrad=',icheckgrad
799 goto (10,20,30) icheckgrad
800 10 call check_ecartint
802 20 call check_cartgrad
806 end subroutine exec_checkgrad
807 !-----------------------------------------------------------------------------
810 use io_config, only:map_read
813 call alloc_map_arrays
817 end subroutine exec_map
818 !-----------------------------------------------------------------------------
821 use io_units !include 'COMMON.IOUNITS'
827 ! include 'DIMENSIONS'
828 ! Conformational Space Annealling programmed by Jooyoung Lee.
829 ! This method works only with parallel machines!
831 call alloc_CSA_arrays
834 write (iout,*) "CSA works on parallel machines only"
837 end subroutine exec_CSA
838 !-----------------------------------------------------------------------------
839 subroutine exec_softreg
840 use io_units !include 'COMMON.IOUNITS'
841 use control_data !include 'COMMON.CONTROL'
843 use io_base, only:intout,briefout
844 use geometry, only:chainbuild
848 ! include 'DIMENSIONS'
849 real(kind=8) :: energy_(0:n_ene)
851 real(kind=8) :: rms,frac,frac_nn,co,etot
854 call alloc_compare_arrays
857 call enerprint(energy_)
858 if (.not.lsecondary) then
859 write(iout,*) 'Calling secondary structure recognition'
860 call secondary2(debug)
862 write(iout,*) 'Using secondary structure supplied in pdb'
869 call enerprint(energy_)
871 call briefout(0,etot)
872 call secondary2(.true.)
873 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
875 end subroutine exec_softreg
876 !-----------------------------------------------------------------------------
878 !-----------------------------------------------------------------------------
880 subroutine ergastulum
882 ! implicit real*8 (a-h,o-z)
883 ! include 'DIMENSIONS'
886 use MDyn, only:setup_fricmat
887 use REMD, only:fricmat_mult,ginv_mult
891 ! include 'COMMON.SETUP'
892 ! include 'COMMON.DERIV'
893 ! include 'COMMON.VAR'
894 ! include 'COMMON.IOUNITS'
895 ! include 'COMMON.FFIELD'
896 ! include 'COMMON.INTERACT'
897 ! include 'COMMON.MD'
898 ! include 'COMMON.TIME1'
899 real(kind=8),dimension(6*nres) :: z,d_a_tmp !(maxres6) maxres6=6*maxres
900 real(kind=8) :: edum(0:n_ene),time_order(0:10)
901 !el real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
902 !el common /przechowalnia/ Gcopy
906 real(kind=8) :: time00
907 integer :: iorder,i,j,nres2,ierr,ierror
909 if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
911 if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
913 if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
914 if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
915 if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
916 if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
918 if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
920 ! Workers wait for variables and NF, and NFL from the boss
922 do while (iorder.ge.0)
923 ! write (*,*) 'Processor',fg_rank,' CG group',kolor,
924 ! & ' receives order from Master'
926 call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
927 time_Bcast=time_Bcast+MPI_Wtime()-time00
928 if (icall.gt.4 .and. iorder.ge.0) &
929 time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
932 ! & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
933 if (iorder.eq.0) then
936 ! write (2,*) "After etotal"
937 ! write (2,*) "dimen",dimen," dimen3",dimen3
939 else if (iorder.eq.2) then
941 call etotal_short(edum)
942 ! write (2,*) "After etotal_short"
943 ! write (2,*) "dimen",dimen," dimen3",dimen3
945 else if (iorder.eq.3) then
947 call etotal_long(edum)
948 ! write (2,*) "After etotal_long"
949 ! write (2,*) "dimen",dimen," dimen3",dimen3
951 else if (iorder.eq.1) then
953 ! write (2,*) "After sum_gradient"
954 ! write (2,*) "dimen",dimen," dimen3",dimen3
956 else if (iorder.eq.4) then
957 call ginv_mult(z,d_a_tmp)
958 else if (iorder.eq.5) then
959 ! Setup MD things for a slave
960 dimen=(nct-nnt+1)+nside
961 dimen1=(nct-nnt)+(nct-nnt+1)
963 ! write (2,*) "dimen",dimen," dimen3",dimen3
965 call int_bounds(dimen,igmult_start,igmult_end)
966 igmult_start=igmult_start-1
967 call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
968 ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
969 my_ng_count=igmult_end-igmult_start
970 call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
971 MPI_INTEGER,FG_COMM,IERROR)
972 write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
973 ! write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
974 myginv_ng_count=nres2*my_ng_count !el maxres2
975 ! write (2,*) "igmult_start",igmult_start," igmult_end",
976 ! & igmult_end," my_ng_count",my_ng_count
978 call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
979 nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
980 call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
981 nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
982 ! write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
983 ! write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
985 ! call MPI_Barrier(FG_COMM,IERROR)
987 call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
988 nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
989 myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
991 time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
998 ! write (2,*) "dimen",dimen," dimen3",dimen3
999 ! write (2,*) "End MD setup"
1001 ! write (iout,*) "My chunk of ginv_block"
1002 ! call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1003 else if (iorder.eq.6) then
1004 call int_from_cart1(.false.)
1005 else if (iorder.eq.7) then
1006 call chainbuild_cart
1007 else if (iorder.eq.8) then
1009 else if (iorder.eq.9) then
1010 write(iout,*) "przed fricmat_mult"
1011 call fricmat_mult(z,d_a_tmp)
1012 write(iout,*) "po fricmat_mult"
1013 else if (iorder.eq.10) then
1014 write(iout,*) "przed setup_fricmat"
1016 write(iout,*) "o setup_fricmat"
1019 write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1020 ' absolute rank',myrank,' leves ERGASTULUM.'
1021 write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1022 (' order[',i,']',time_order(i),i=0,10)
1024 end subroutine ergastulum