1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
5 ! Program to carry out conformational search of proteins in an united-residue !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
13 use control, only:tcpu
14 use io_base, only:ilen
15 use geometry, only:chainbuild
16 use control, only:dajczas
19 use compare, only: test
25 ! implicit real*8 (a-h,o-z)
26 ! include 'DIMENSIONS'
28 use MPI_data ! include 'COMMON.SETUP'
33 use MPI_data, only: me,king
36 ! include 'COMMON.TIME1'
37 ! include 'COMMON.INTERACT'
38 ! include 'COMMON.NAMES'
39 ! include 'COMMON.GEO'
40 ! include 'COMMON.HEADER'
41 ! include 'COMMON.CONTROL'
42 ! include 'COMMON.CONTACTS'
43 ! include 'COMMON.CHAIN'
44 ! include 'COMMON.VAR'
45 ! include 'COMMON.IOUNITS'
46 ! include 'COMMON.FFIELD'
47 ! include 'COMMON.REMD'
49 ! include 'COMMON.SBRIDGE'
51 real(kind=8) :: hrtime,mintime,sectime
52 character(len=64) :: text_mode_calc(-2:14)
53 text_mode_calc(-2) = 'test'
54 text_mode_calc(-1) = 'cos'
55 text_mode_calc(0) = 'Energy evaluation or minimization'
56 text_mode_calc(1) = 'Regularization of PDB structure'
57 text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58 text_mode_calc(3) = 'Monte Carlo (with minimization) '
59 text_mode_calc(4) = 'Energy minimization of multiple conformations'
60 text_mode_calc(5) = 'Checking energy gradient'
61 text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62 text_mode_calc(7) = 'Energy map'
63 text_mode_calc(8) = 'CSA calculations'
64 text_mode_calc(9) = 'Not used 9'
65 text_mode_calc(10) = 'Not used 10'
66 text_mode_calc(11) = 'Soft regularization of PDB structure'
67 text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68 text_mode_calc(13) = 'Not used 13'
69 text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
71 ! call memmon_print_usage()
75 write(iout,*)'### LAST MODIFIED 09/03/15 15:32PM by EL'
76 if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
80 write (iout,*) "After readrtns"
83 if (me.eq.king .or. .not. out1file) then
84 write (iout,'(2a/)') &
85 text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
87 if (minim) write (iout,'(a)') &
88 'Conformations will be energy-minimized.'
89 write (iout,'(80(1h*)/)')
93 if (modecalc.eq.-2) then
96 else if (modecalc.eq.-1) then
97 write(iout,*) "call check_sc_map next"
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
104 if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106 call ergastulum ! slave workhouse in Latin
109 if (modecalc.eq.0) then
110 call exec_eeval_or_minim
111 else if (modecalc.eq.1) then
113 else if (modecalc.eq.2) then
115 else if (modecalc.eq.3 .or. modecalc .eq.6) then
117 else if (modecalc.eq.4) then
118 call exec_mult_eeval_or_minim
119 else if (modecalc.eq.5) then
121 else if (ModeCalc.eq.7) then
123 else if (ModeCalc.eq.8) then
125 else if (modecalc.eq.11) then
127 else if (modecalc.eq.12) then
130 else if (modecalc.eq.14) then
133 write (iout,'(a)') 'This calculation type is not supported',&
140 if (fg_rank.eq.0) call finish_task
141 ! call memmon_print_usage()
143 call print_detailed_timing
145 call MPI_Finalize(ierr)
148 call dajczas(tcpu(),hrtime,mintime,sectime)
149 stop '********** Program terminated normally.'
153 !-----------------------------------------------------------------------------
155 !-----------------------------------------------------------------------------
157 use MPI_data !include 'COMMON.SETUP'
158 use control_data !include 'COMMON.CONTROL'
159 use geometry, only:chainbuild,chainbuild_cart
161 use io_units !include 'COMMON.IOUNITS'
162 use compare, only:alloc_compare_arrays
165 ! include 'DIMENSIONS'
171 call alloc_compare_arrays
172 print *,'After MD alloc'
173 if (me.eq.king .or. .not. out1file) &
174 write (iout,*) "Calling chainbuild"
182 end subroutine exec_MD
183 !---------------------------------------------------------------------------
184 subroutine exec_MREMD
185 use MPI_data !include 'COMMON.SETUP'
186 use control_data !include 'COMMON.CONTROL'
187 use io_units !include 'COMMON.IOUNITS'
189 use REMD_data !include 'COMMON.REMD'
190 use geometry, only:chainbuild
192 use compare, only:alloc_compare_arrays
195 ! include 'DIMENSIONS'
202 call alloc_MREMD_arrays
203 call alloc_compare_arrays
204 ! if (me.eq.king .or. .not. out1file) &
205 ! write (iout,*) "Calling chainbuild"
207 if (me.eq.king .or. .not. out1file) &
208 write (iout,*) "Calling REMD",remd_mlist,nrep
218 end subroutine exec_MREMD
219 !-----------------------------------------------------------------------------
220 subroutine exec_eeval_or_minim
221 use MPI_data !include 'COMMON.SETUP'
222 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
223 use io_units !include 'COMMON.IOUNITS'
225 ! use energy !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
226 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
227 ! use REMD !include 'COMMON.REMD'
228 ! use MD !include 'COMMON.MD'
233 use geometry, only:chainbuild
235 use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
236 use minimm, only:minimize,minim_dc,sc_move
240 ! implicit real*8 (a-h,o-z)
241 ! include 'DIMENSIONS'
246 !el common /srutu/ icall
247 real(kind=8) :: energy_(0:n_ene)
248 real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
249 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
250 real(kind=8) :: time00, evals, etota, etot, time_ene, time1
251 integer :: nharp,nft_sc,iretcode,nfun
252 integer,dimension(4,nres/3) :: iharp !(4,nres/3)(4,maxres/3)
254 real(kind=8) :: rms,frac,frac_nn,co
257 call alloc_compare_arrays
258 if ((indpdb.eq.0).and.(.not.read_cart)) then
260 write(iout,*) 'Warning: Calling chainbuild'
266 ! write(iout,*)"in exec_eeval or minimim",split_ene
268 ! write(iout,*)"cccccc",j,(c(i,j),i=1,3)
269 ! write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
272 ! write(iout,*)"in exec_eeval or minimim"
274 print *,"Processor",myrank," after chainbuild"
277 call etotal_long(energy_long)
278 write (iout,*) "Printing long range energy"
279 call enerprint(energy_long)
281 call etotal_short(energy_short)
282 write (iout,*) "Printing short range energy"
283 call enerprint(energy_short)
285 energy_(i)=energy_long(i)+energy_short(i)
286 write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
288 write (iout,*) "Printing long+short range energy"
289 call enerprint(energy_)
294 time_ene=MPI_Wtime()-time00
296 write (iout,*) "Time for energy evaluation",time_ene
297 print *,"after etotal"
300 call enerprint(energy_)
301 call hairpin(.true.,nharp,iharp)
302 call secondary2(.true.)
306 print *, 'Calling OVERLAP_SC'
307 call overlap_sc(fail)
311 call sc_move(2,nres-1,10,1d10,nft_sc,etot)
312 print *,'SC_move',nft_sc,etot
313 write(iout,*) 'SC_move',nft_sc,etot
317 print *, 'Calling MINIM_DC'
321 ! call check_ecartint !el
322 call minim_dc(etot,iretcode,nfun)
323 ! call check_ecartint !el
325 if (indpdb.ne.0) then
327 write(iout,*) 'Warning: Calling chainbuild'
330 call geom_to_var(nvar,varia)
331 print *,'Calling MINIMIZE.'
336 ! call exec_checkgrad !el
337 call minimize(etot,varia,iretcode,nfun)
339 ! call exec_checkgrad !el
341 print *,'SUMSL return code is',iretcode,' eval ',nfun
343 evals=nfun/(MPI_WTIME()-time1)
345 print *,'# eval/s',evals
346 print *,'refstr=',refstr
347 ! call hairpin(.true.,nharp,iharp)
348 ! call secondary2(.true.)
351 call enerprint(energy_)
354 call briefout(0,etot)
355 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
356 write (iout,'(a,i3)') 'SUMSL return code:',iretcode
357 write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
358 write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
360 print *,'refstr=',refstr,frac,frac_nn,co
361 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
362 print *,"after rms_nac_ncc"
363 call briefout(0,etot)
365 if (outpdb) call pdbout(etot,titel(:32),ipdb)
366 if (outmol2) call mol2out(etot,titel(:32))
367 write(iout,*) "after exec_eeval_or_minim"
369 end subroutine exec_eeval_or_minim
370 !-----------------------------------------------------------------------------
371 subroutine exec_regularize
372 ! use MPI_data !include 'COMMON.SETUP'
373 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
374 use io_units !include 'COMMON.IOUNITS'
376 use energy_data !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
377 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
378 ! use REMD !include 'COMMON.REMD'
379 ! use MD !include 'COMMON.MD'
383 ! implicit real*8 (a-h,o-z)
384 ! include 'DIMENSIONS'
388 real(kind=8) :: energy_(0:n_ene)
390 real(kind=8) :: rms,frac,frac_nn,co
393 call alloc_compare_arrays
397 call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
399 energy_(0)=energy_(0)-energy_(14)
401 call enerprint(energy_)
403 call briefout(0,etot)
404 if (outpdb) call pdbout(etot,titel(:32),ipdb)
405 if (outmol2) call mol2out(etot,titel(:32))
406 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
407 write (iout,'(a,i3)') 'SUMSL return code:',iretcode
409 end subroutine exec_regularize
410 !-----------------------------------------------------------------------------
411 subroutine exec_thread
412 ! use MPI_data !include 'COMMON.SETUP'
415 ! include 'DIMENSIONS'
419 call alloc_compare_arrays
422 end subroutine exec_thread
423 !-----------------------------------------------------------------------------
425 ! use MPI_data !include 'COMMON.SETUP'
426 use control_data !include 'COMMON.CONTROL'
431 ! implicit real*8 (a-h,o-z)
432 ! include 'DIMENSIONS'
433 character(len=10) :: nodeinfo
434 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
439 call alloc_MCM_arrays
443 if (modecalc.eq.3) then
449 if (modecalc.eq.3) then
459 end subroutine exec_MC
460 !-----------------------------------------------------------------------------
461 subroutine exec_mult_eeval_or_minim
462 use MPI_data !include 'COMMON.SETUP'
463 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
464 use io_units !include 'COMMON.IOUNITS'
466 use energy_data !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
467 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
468 ! use REMD !include 'COMMON.REMD'
469 ! use MD !include 'COMMON.MD'
471 use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
472 use energy, only:etotal,enerprint
473 use compare, only:rms_nac_nnc
474 use minimm, only:minimize!,minim_mcmf
475 ! implicit real*8 (a-h,o-z)
476 ! include 'DIMENSIONS'
478 use minimm, only:minim_mcmf
481 integer :: ierror,ierr
483 real(kind=8),dimension(mpi_status_size) :: muster
487 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
488 integer,dimension(6) :: ind
489 real(kind=8) :: energy_(0:n_ene)
491 real(kind=8) :: etot,ene0
492 integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
494 real(kind=8) :: rms,frac,frac_nn,co,time,ene
504 open(intin,file=intinname,status='old')
505 write (istat,'(a5,20a12)')"# ",&
506 (wname(print_order(i)),i=1,nprint_ene)
508 write (istat,'(a5,20a12)')"# ",&
509 (ename(print_order(i)),i=1,nprint_ene),&
510 "ETOT total","RMSD","nat.contact","nnt.contact"
512 write (istat,'(a5,20a12)')"# ",&
513 (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
519 read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
520 call read_x(intin,*11)
522 ! Broadcast the order to compute internal coordinates to the slaves.
524 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
526 call int_from_cart1(.false.)
528 read (intin,'(i5)',end=1100,err=1100) iconf
529 call read_angles(intin,*11)
530 call geom_to_var(nvar,varia)
531 write(iout,*) 'Warning: Calling chainbuild1'
534 write (iout,'(a,i7)') 'Conformation #',iconf
536 call briefout(iconf,energy_(0))
537 call enerprint(energy_)
540 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
541 write (istat,'(i5,20(f12.3))') iconf,&
542 (energy_(print_order(i)),i=1,nprint_ene),etot,&
546 write (istat,'(i5,16(f12.3))') iconf,&
547 (energy_(print_order(i)),i=1,nprint_ene),etot
563 if (mm.lt.nodes) then
565 read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
566 call read_x(intin,*11)
568 ! Broadcast the order to compute internal coordinates to the slaves.
570 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
572 call int_from_cart1(.false.)
574 read (intin,'(i5)',end=11,err=11) iconf
575 call read_angles(intin,*11)
576 call geom_to_var(nvar,varia)
577 write(iout,*) 'Warning: Calling chainbuild2'
580 write (iout,'(a,i7)') 'Conformation #',iconf
590 call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
592 call mpi_send(varia,nvar,mpi_double_precision,mm,&
594 call mpi_send(ene0,1,mpi_double_precision,mm,&
596 ! print *,'task ',n,' sent to worker ',mm,nvar
598 call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
600 man=muster(mpi_source)
601 ! print *,'receiving result from worker ',man,' (',iii1,iii,')'
602 call mpi_recv(varia,nvar,mpi_double_precision,&
603 man,idreal,CG_COMM,muster,ierr)
604 call mpi_recv(ene,1,&
605 mpi_double_precision,man,idreal,&
607 call mpi_recv(ene0,1,&
608 mpi_double_precision,man,idreal,&
610 ! print *,'result received from worker ',man,' sending now'
612 call var_to_geom(nvar,varia)
613 write(iout,*) 'Warning: Calling chainbuild3'
619 write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
622 call enerprint(energy_)
623 call briefout(it,etot)
624 ! if (minim) call briefout(it,etot)
626 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
627 write (istat,'(i5,19(f12.3))') iconf,&
628 (energy_(print_order(i)),i=1,nprint_ene),etot,&
631 write (istat,'(i5,15(f12.3))') iconf,&
632 (energy_(print_order(i)),i=1,nprint_ene),etot
637 read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
638 call read_x(intin,*11)
640 ! Broadcast the order to compute internal coordinates to the slaves.
642 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
644 call int_from_cart1(.false.)
646 read (intin,'(i5)',end=1101,err=1101) iconf
647 call read_angles(intin,*11)
648 call geom_to_var(nvar,varia)
649 write(iout,*) 'Warning: Calling chainbuild4'
660 call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
662 call mpi_send(varia,nvar,mpi_double_precision,man,&
664 call mpi_send(ene0,1,mpi_double_precision,man,&
666 nf_mcmf=nf_mcmf+ind(4)
672 call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
674 man=muster(mpi_source)
675 call mpi_recv(varia,nvar,mpi_double_precision,&
676 man,idreal,CG_COMM,muster,ierr)
677 call mpi_recv(ene,1,&
678 mpi_double_precision,man,idreal,&
680 call mpi_recv(ene0,1,&
681 mpi_double_precision,man,idreal,&
684 call var_to_geom(nvar,varia)
685 write(iout,*) 'Warning: Calling chainbuild5'
691 write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
694 call enerprint(energy_)
695 call briefout(it,etot)
697 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
698 write (istat,'(i5,19(f12.3))') iconf,&
699 (energy_(print_order(i)),i=1,nprint_ene),etot,&
702 write (istat,'(i5,15(f12.3))') iconf,&
703 (energy_(print_order(i)),i=1,nprint_ene),etot
715 call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
720 open(intin,file=intinname,status='old')
721 write (istat,'(a5,20a12)')"# ",&
722 (wname(print_order(i)),i=1,nprint_ene)
723 write (istat,'("# ",20(1pe12.4))') &
724 (weights(print_order(i)),i=1,nprint_ene)
726 write (istat,'(a5,20a12)')"# ",&
727 (ename(print_order(i)),i=1,nprint_ene),&
728 "ETOT total","RMSD","nat.contact","nnt.contact"
730 write (istat,'(a5,14a12)')"# ",&
731 (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
735 read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
736 call read_x(intin,*11)
738 ! Broadcast the order to compute internal coordinates to the slaves.
740 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
742 call int_from_cart1(.false.)
744 read (intin,'(i5)',end=11,err=11) iconf
745 call read_angles(intin,*11)
746 call geom_to_var(nvar,varia)
747 write(iout,*) 'Warning: Calling chainbuild5'
750 write (iout,'(a,i7)') 'Conformation #',iconf
751 if (minim) call minimize(etot,varia,iretcode,nfun)
755 call enerprint(energy_)
756 if (minim) call briefout(it,etot)
758 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
759 write (istat,'(i5,18(f12.3))') iconf,&
760 (energy_(print_order(i)),i=1,nprint_ene),&
761 etot,rms,frac,frac_nn,co
764 write (istat,'(i5,14(f12.3))') iconf,&
765 (energy_(print_order(i)),i=1,nprint_ene),etot
771 end subroutine exec_mult_eeval_or_minim
772 !-----------------------------------------------------------------------------
773 subroutine exec_checkgrad
774 ! use MPI_data !include 'COMMON.SETUP'
775 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
776 use io_units !include 'COMMON.IOUNITS'
777 !el use energy_data, only:icall !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
778 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
779 ! use REMD !include 'COMMON.REMD'
780 use MD_data !include 'COMMON.MD'
781 use io_base, only:intout
782 use io_config, only:read_fragments
787 ! implicit real*8 (a-h,o-z)
788 ! include 'DIMENSIONS'
793 !el common /srutu/ icall
794 real(kind=8) :: energy_(0:max_ene)
798 ! vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
799 ! if (itype(i).ne.10)
800 ! & vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
802 if (indpdb.eq.0) then
803 write(iout,*) 'Warning: Calling chainbuild'
808 ! dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
812 ! if (itype(i).ne.10) then
814 ! dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
819 ! dc(j,0)=ran_number(-0.2d0,0.2d0)
829 write (iout,*) "before etotal"
830 call etotal(energy_(0))
832 call enerprint(energy_(0))
833 write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
834 print *,'icheckgrad=',icheckgrad
835 goto (10,20,30) icheckgrad
836 10 call check_ecartint
839 20 call check_cartgrad
844 end subroutine exec_checkgrad
845 !-----------------------------------------------------------------------------
849 use io_config, only:map_read
852 call alloc_map_arrays
856 end subroutine exec_map
857 !-----------------------------------------------------------------------------
860 use io_units !include 'COMMON.IOUNITS'
866 ! include 'DIMENSIONS'
867 ! Conformational Space Annealling programmed by Jooyoung Lee.
868 ! This method works only with parallel machines!
870 call alloc_CSA_arrays
873 write (iout,*) "CSA works on parallel machines only"
876 end subroutine exec_CSA
877 !-----------------------------------------------------------------------------
878 subroutine exec_softreg
879 use io_units !include 'COMMON.IOUNITS'
880 use control_data !include 'COMMON.CONTROL'
882 use io_base, only:intout,briefout
883 use geometry, only:chainbuild
887 ! include 'DIMENSIONS'
888 real(kind=8) :: energy_(0:n_ene)
890 real(kind=8) :: rms,frac,frac_nn,co,etot
893 call alloc_compare_arrays
894 write(iout,*) 'Warning: Calling chainbuild'
897 call enerprint(energy_)
898 if (.not.lsecondary) then
899 write(iout,*) 'Calling secondary structure recognition'
900 call secondary2(debug)
902 write(iout,*) 'Using secondary structure supplied in pdb'
909 call enerprint(energy_)
911 call briefout(0,etot)
912 call secondary2(.true.)
913 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
915 end subroutine exec_softreg
916 !-----------------------------------------------------------------------------
918 !-----------------------------------------------------------------------------
920 subroutine ergastulum
922 ! implicit real*8 (a-h,o-z)
923 ! include 'DIMENSIONS'
926 use MDyn, only:setup_fricmat
927 use REMD, only:fricmat_mult,ginv_mult
931 ! include 'COMMON.SETUP'
932 ! include 'COMMON.DERIV'
933 ! include 'COMMON.VAR'
934 ! include 'COMMON.IOUNITS'
935 ! include 'COMMON.FFIELD'
936 ! include 'COMMON.INTERACT'
937 ! include 'COMMON.MD'
938 ! include 'COMMON.TIME1'
939 real(kind=8),dimension(6*nres) :: z,d_a_tmp !(maxres6) maxres6=6*maxres
940 real(kind=8) :: edum(0:n_ene),time_order(0:10)
941 !el real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
942 !el common /przechowalnia/ Gcopy
946 real(kind=8) :: time00
947 integer :: iorder,i,j,nres2,ierr,ierror
950 if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
952 if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
955 if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
956 if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
957 if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
958 if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
960 if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
962 ! Workers wait for variables and NF, and NFL from the boss
964 do while (iorder.ge.0)
965 ! write (*,*) 'Processor',fg_rank,' CG group',kolor,
966 ! & ' receives order from Master'
968 call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
969 time_Bcast=time_Bcast+MPI_Wtime()-time00
970 if (icall.gt.4 .and. iorder.ge.0) &
971 time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
974 ! & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
975 if (iorder.eq.0) then
978 ! write (2,*) "After etotal"
979 ! write (2,*) "dimen",dimen," dimen3",dimen3
981 else if (iorder.eq.2) then
983 call etotal_short(edum)
984 ! write (2,*) "After etotal_short"
985 ! write (2,*) "dimen",dimen," dimen3",dimen3
987 else if (iorder.eq.3) then
989 call etotal_long(edum)
990 ! write (2,*) "After etotal_long"
991 ! write (2,*) "dimen",dimen," dimen3",dimen3
993 else if (iorder.eq.1) then
995 ! write (2,*) "After sum_gradient"
996 ! write (2,*) "dimen",dimen," dimen3",dimen3
998 else if (iorder.eq.4) then
999 call ginv_mult(z,d_a_tmp)
1000 else if (iorder.eq.5) then
1001 ! Setup MD things for a slave
1002 dimen=(nct-nnt+1)+nside
1003 dimen1=(nct-nnt)+(nct-nnt+1)
1005 ! write (2,*) "dimen",dimen," dimen3",dimen3
1007 call int_bounds(dimen,igmult_start,igmult_end)
1008 igmult_start=igmult_start-1
1009 call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1010 ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1011 my_ng_count=igmult_end-igmult_start
1012 call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1013 MPI_INTEGER,FG_COMM,IERROR)
1014 write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1015 ! write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1016 myginv_ng_count=nres2*my_ng_count !el maxres2
1017 ! write (2,*) "igmult_start",igmult_start," igmult_end",
1018 ! & igmult_end," my_ng_count",my_ng_count
1020 call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1021 nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1022 call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1023 nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1024 ! write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1025 ! write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1027 ! call MPI_Barrier(FG_COMM,IERROR)
1029 call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1030 nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1031 myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1033 time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1036 do j=1,2*my_ng_count
1037 ginv(j,i)=gcopy(i,j)
1040 ! write (2,*) "dimen",dimen," dimen3",dimen3
1041 ! write (2,*) "End MD setup"
1043 ! write (iout,*) "My chunk of ginv_block"
1044 ! call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1045 else if (iorder.eq.6) then
1046 call int_from_cart1(.false.)
1047 else if (iorder.eq.7) then
1048 call chainbuild_cart
1049 else if (iorder.eq.8) then
1051 else if (iorder.eq.9) then
1052 call fricmat_mult(z,d_a_tmp)
1053 else if (iorder.eq.10) then
1057 write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1058 ' absolute rank',myrank,' leves ERGASTULUM.'
1059 write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1060 (' order[',i,']',time_order(i),i=0,10)
1062 end subroutine ergastulum