1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
5 ! Program to carry out conformational search of proteins in an united-residue !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
13 use control, only:tcpu
14 use io_base, only:ilen
15 use geometry, only:chainbuild
16 use control, only:dajczas
19 use compare, only: test
25 ! implicit real*8 (a-h,o-z)
26 ! include 'DIMENSIONS'
28 use MPI_data ! include 'COMMON.SETUP'
33 use MPI_data, only: me,king
36 ! include 'COMMON.TIME1'
37 ! include 'COMMON.INTERACT'
38 ! include 'COMMON.NAMES'
39 ! include 'COMMON.GEO'
40 ! include 'COMMON.HEADER'
41 ! include 'COMMON.CONTROL'
42 ! include 'COMMON.CONTACTS'
43 ! include 'COMMON.CHAIN'
44 ! include 'COMMON.VAR'
45 ! include 'COMMON.IOUNITS'
46 ! include 'COMMON.FFIELD'
47 ! include 'COMMON.REMD'
49 ! include 'COMMON.SBRIDGE'
51 real(kind=8) :: hrtime,mintime,sectime
52 character(len=64) :: text_mode_calc(-2:14)
53 text_mode_calc(-2) = 'test'
54 text_mode_calc(-1) = 'cos'
55 text_mode_calc(0) = 'Energy evaluation or minimization'
56 text_mode_calc(1) = 'Regularization of PDB structure'
57 text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58 text_mode_calc(3) = 'Monte Carlo (with minimization) '
59 text_mode_calc(4) = 'Energy minimization of multiple conformations'
60 text_mode_calc(5) = 'Checking energy gradient'
61 text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62 text_mode_calc(7) = 'Energy map'
63 text_mode_calc(8) = 'CSA calculations'
64 text_mode_calc(9) = 'Not used 9'
65 text_mode_calc(10) = 'Not used 10'
66 text_mode_calc(11) = 'Soft regularization of PDB structure'
67 text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68 text_mode_calc(13) = 'Not used 13'
69 text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
71 ! call memmon_print_usage()
75 write(iout,*)'### LAST MODIFIED 09/03/15 15:32PM by EL'
76 if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
80 write (iout,*) "After readrtns"
83 if (me.eq.king .or. .not. out1file) then
84 write (iout,'(2a/)') &
85 text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
87 if (minim) write (iout,'(a)') &
88 'Conformations will be energy-minimized.'
89 write (iout,'(80(1h*)/)')
93 if (modecalc.eq.-2) then
96 else if (modecalc.eq.-1) then
97 write(iout,*) "call check_sc_map next"
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
104 if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106 call ergastulum ! slave workhouse in Latin
109 if (modecalc.eq.0) then
110 call exec_eeval_or_minim
111 else if (modecalc.eq.1) then
113 else if (modecalc.eq.2) then
115 else if (modecalc.eq.3 .or. modecalc .eq.6) then
117 else if (modecalc.eq.4) then
118 call exec_mult_eeval_or_minim
119 else if (modecalc.eq.5) then
121 else if (ModeCalc.eq.7) then
123 else if (ModeCalc.eq.8) then
125 else if (modecalc.eq.11) then
127 else if (modecalc.eq.12) then
130 else if (modecalc.eq.14) then
133 write (iout,'(a)') 'This calculation type is not supported',&
140 if (fg_rank.eq.0) call finish_task
141 ! call memmon_print_usage()
143 call print_detailed_timing
145 call MPI_Finalize(ierr)
148 call dajczas(tcpu(),hrtime,mintime,sectime)
149 stop '********** Program terminated normally.'
153 !-----------------------------------------------------------------------------
155 !-----------------------------------------------------------------------------
157 use MPI_data !include 'COMMON.SETUP'
158 use control_data !include 'COMMON.CONTROL'
159 use geometry, only:chainbuild,chainbuild_cart
161 use io_units !include 'COMMON.IOUNITS'
162 use compare, only:alloc_compare_arrays
165 ! include 'DIMENSIONS'
171 call alloc_compare_arrays
172 print *,'After MD alloc'
173 if (me.eq.king .or. .not. out1file) &
174 write (iout,*) "Calling chainbuild"
182 end subroutine exec_MD
183 !---------------------------------------------------------------------------
184 subroutine exec_MREMD
185 use MPI_data !include 'COMMON.SETUP'
186 use control_data !include 'COMMON.CONTROL'
187 use io_units !include 'COMMON.IOUNITS'
189 use REMD_data !include 'COMMON.REMD'
190 use geometry, only:chainbuild
192 use compare, only:alloc_compare_arrays
195 ! include 'DIMENSIONS'
202 call alloc_MREMD_arrays
203 call alloc_compare_arrays
204 ! if (me.eq.king .or. .not. out1file) &
205 ! write (iout,*) "Calling chainbuild"
207 if (me.eq.king .or. .not. out1file) &
208 write (iout,*) "Calling REMD",remd_mlist,nrep
218 end subroutine exec_MREMD
219 !-----------------------------------------------------------------------------
220 subroutine exec_eeval_or_minim
221 use MPI_data !include 'COMMON.SETUP'
222 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
223 use io_units !include 'COMMON.IOUNITS'
225 ! use energy !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
226 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
227 ! use REMD !include 'COMMON.REMD'
228 ! use MD !include 'COMMON.MD'
231 use MD_data, only: iset
233 use geometry, only:chainbuild
235 use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
236 use minimm, only:minimize,minim_dc,sc_move
240 ! implicit real*8 (a-h,o-z)
241 ! include 'DIMENSIONS'
246 !el common /srutu/ icall
247 real(kind=8) :: energy_(0:n_ene)
248 real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
249 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
250 real(kind=8) :: time00, evals, etota, etot, time_ene, time1
251 integer :: nharp,nft_sc,iretcode,nfun
252 integer,dimension(4,nres) :: iharp !(4,nres/3)(4,maxres/3)
254 real(kind=8) :: rms,frac,frac_nn,co
257 if (iset.eq.0) iset=1
258 call alloc_compare_arrays
259 if ((indpdb.eq.0).and.(.not.read_cart)) then
261 write(iout,*) 'Warning: Calling chainbuild'
267 ! write(iout,*)"in exec_eeval or minimim",split_ene
269 ! write(iout,*)"cccccc",j,(c(i,j),i=1,3)
270 ! write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
273 ! write(iout,*)"in exec_eeval or minimim"
275 print *,"Processor",myrank," after chainbuild"
278 call etotal_long(energy_long)
279 write (iout,*) "Printing long range energy"
280 call enerprint(energy_long)
282 call etotal_short(energy_short)
283 write (iout,*) "Printing short range energy"
284 call enerprint(energy_short)
286 energy_(i)=energy_long(i)+energy_short(i)
287 write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
289 write (iout,*) "Printing long+short range energy"
290 call enerprint(energy_)
295 time_ene=MPI_Wtime()-time00
297 write (iout,*) "Time for energy evaluation",time_ene
298 print *,"after etotal"
301 call enerprint(energy_)
302 call hairpin(.true.,nharp,iharp)
303 call secondary2(.true.)
307 print *, 'Calling OVERLAP_SC'
308 call overlap_sc(fail)
312 call sc_move(2,nres-1,10,1d10,nft_sc,etot)
313 print *,'SC_move',nft_sc,etot
314 write(iout,*) 'SC_move',nft_sc,etot
318 print *, 'Calling MINIM_DC'
322 ! call check_ecartint !el
323 call minim_dc(etot,iretcode,nfun)
324 ! call check_ecartint !el
326 if (indpdb.ne.0) then
328 write(iout,*) 'Warning: Calling chainbuild'
331 call geom_to_var(nvar,varia)
332 print *,'Calling MINIMIZE.'
337 ! call exec_checkgrad !el
338 call minimize(etot,varia,iretcode,nfun)
340 ! call exec_checkgrad !el
342 print *,'SUMSL return code is',iretcode,' eval ',nfun
344 evals=nfun/(MPI_WTIME()-time1)
346 print *,'# eval/s',evals
347 print *,'refstr=',refstr
348 ! call hairpin(.true.,nharp,iharp)
349 ! call secondary2(.true.)
352 call enerprint(energy_)
355 call briefout(0,etot)
356 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
357 write (iout,'(a,i3)') 'SUMSL return code:',iretcode
358 write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
359 write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
361 print *,'refstr=',refstr,frac,frac_nn,co
362 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
363 print *,"after rms_nac_ncc"
364 call briefout(0,etot)
366 if (outpdb) call pdbout(etot,titel(:32),ipdb)
367 if (outmol2) call mol2out(etot,titel(:32))
368 write(iout,*) "after exec_eeval_or_minim"
370 end subroutine exec_eeval_or_minim
371 !-----------------------------------------------------------------------------
372 subroutine exec_regularize
373 ! use MPI_data !include 'COMMON.SETUP'
374 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
375 use io_units !include 'COMMON.IOUNITS'
377 use energy_data !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
378 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
379 ! use REMD !include 'COMMON.REMD'
380 ! use MD !include 'COMMON.MD'
384 ! implicit real*8 (a-h,o-z)
385 ! include 'DIMENSIONS'
389 real(kind=8) :: energy_(0:n_ene)
391 real(kind=8) :: rms,frac,frac_nn,co
394 call alloc_compare_arrays
398 call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
400 energy_(0)=energy_(0)-energy_(14)
402 call enerprint(energy_)
404 call briefout(0,etot)
405 if (outpdb) call pdbout(etot,titel(:32),ipdb)
406 if (outmol2) call mol2out(etot,titel(:32))
407 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
408 write (iout,'(a,i3)') 'SUMSL return code:',iretcode
410 end subroutine exec_regularize
411 !-----------------------------------------------------------------------------
412 subroutine exec_thread
413 ! use MPI_data !include 'COMMON.SETUP'
416 ! include 'DIMENSIONS'
420 call alloc_compare_arrays
423 end subroutine exec_thread
424 !-----------------------------------------------------------------------------
426 ! use MPI_data !include 'COMMON.SETUP'
427 use control_data !include 'COMMON.CONTROL'
432 ! implicit real*8 (a-h,o-z)
433 ! include 'DIMENSIONS'
434 character(len=10) :: nodeinfo
435 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
440 call alloc_MCM_arrays
444 if (modecalc.eq.3) then
450 if (modecalc.eq.3) then
460 end subroutine exec_MC
461 !-----------------------------------------------------------------------------
462 subroutine exec_mult_eeval_or_minim
463 use MPI_data !include 'COMMON.SETUP'
464 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
465 use io_units !include 'COMMON.IOUNITS'
467 use energy_data !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
468 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
469 ! use REMD !include 'COMMON.REMD'
470 ! use MD !include 'COMMON.MD'
472 use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
473 use energy, only:etotal,enerprint
474 use compare, only:rms_nac_nnc
475 use minimm, only:minimize!,minim_mcmf
476 ! implicit real*8 (a-h,o-z)
477 ! include 'DIMENSIONS'
479 use minimm, only:minim_mcmf
482 integer :: ierror,ierr
484 real(kind=8),dimension(mpi_status_size) :: muster
488 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
489 integer,dimension(6) :: ind
490 real(kind=8) :: energy_(0:n_ene)
492 real(kind=8) :: etot,ene0
493 integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
495 real(kind=8) :: rms,frac,frac_nn,co,time,ene
505 open(intin,file=intinname,status='old')
506 write (istat,'(a5,20a12)')"# ",&
507 (wname(print_order(i)),i=1,nprint_ene)
509 write (istat,'(a5,20a12)')"# ",&
510 (ename(print_order(i)),i=1,nprint_ene),&
511 "ETOT total","RMSD","nat.contact","nnt.contact"
513 write (istat,'(a5,20a12)')"# ",&
514 (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
520 read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
521 call read_x(intin,*11)
523 ! Broadcast the order to compute internal coordinates to the slaves.
525 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
527 call int_from_cart1(.false.)
529 read (intin,'(i5)',end=1100,err=1100) iconf
530 call read_angles(intin,*11)
531 call geom_to_var(nvar,varia)
532 write(iout,*) 'Warning: Calling chainbuild1'
535 write (iout,'(a,i7)') 'Conformation #',iconf
537 call briefout(iconf,energy_(0))
538 call enerprint(energy_)
541 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
542 write (istat,'(i5,20(f12.3))') iconf,&
543 (energy_(print_order(i)),i=1,nprint_ene),etot,&
547 write (istat,'(i5,16(f12.3))') iconf,&
548 (energy_(print_order(i)),i=1,nprint_ene),etot
564 if (mm.lt.nodes) then
566 read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
567 call read_x(intin,*11)
569 ! Broadcast the order to compute internal coordinates to the slaves.
571 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
573 call int_from_cart1(.false.)
575 read (intin,'(i5)',end=11,err=11) iconf
576 call read_angles(intin,*11)
577 call geom_to_var(nvar,varia)
578 write(iout,*) 'Warning: Calling chainbuild2'
581 write (iout,'(a,i7)') 'Conformation #',iconf
591 call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
593 call mpi_send(varia,nvar,mpi_double_precision,mm,&
595 call mpi_send(ene0,1,mpi_double_precision,mm,&
597 ! print *,'task ',n,' sent to worker ',mm,nvar
599 call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
601 man=muster(mpi_source)
602 ! print *,'receiving result from worker ',man,' (',iii1,iii,')'
603 call mpi_recv(varia,nvar,mpi_double_precision,&
604 man,idreal,CG_COMM,muster,ierr)
605 call mpi_recv(ene,1,&
606 mpi_double_precision,man,idreal,&
608 call mpi_recv(ene0,1,&
609 mpi_double_precision,man,idreal,&
611 ! print *,'result received from worker ',man,' sending now'
613 call var_to_geom(nvar,varia)
614 write(iout,*) 'Warning: Calling chainbuild3'
620 write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
623 call enerprint(energy_)
624 call briefout(it,etot)
625 ! if (minim) call briefout(it,etot)
627 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
628 write (istat,'(i5,19(f12.3))') iconf,&
629 (energy_(print_order(i)),i=1,nprint_ene),etot,&
632 write (istat,'(i5,15(f12.3))') iconf,&
633 (energy_(print_order(i)),i=1,nprint_ene),etot
638 read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
639 call read_x(intin,*11)
641 ! Broadcast the order to compute internal coordinates to the slaves.
643 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
645 call int_from_cart1(.false.)
647 read (intin,'(i5)',end=1101,err=1101) iconf
648 call read_angles(intin,*11)
649 call geom_to_var(nvar,varia)
650 write(iout,*) 'Warning: Calling chainbuild4'
661 call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
663 call mpi_send(varia,nvar,mpi_double_precision,man,&
665 call mpi_send(ene0,1,mpi_double_precision,man,&
667 nf_mcmf=nf_mcmf+ind(4)
673 call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
675 man=muster(mpi_source)
676 call mpi_recv(varia,nvar,mpi_double_precision,&
677 man,idreal,CG_COMM,muster,ierr)
678 call mpi_recv(ene,1,&
679 mpi_double_precision,man,idreal,&
681 call mpi_recv(ene0,1,&
682 mpi_double_precision,man,idreal,&
685 call var_to_geom(nvar,varia)
686 write(iout,*) 'Warning: Calling chainbuild5'
692 write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
695 call enerprint(energy_)
696 call briefout(it,etot)
698 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
699 write (istat,'(i5,19(f12.3))') iconf,&
700 (energy_(print_order(i)),i=1,nprint_ene),etot,&
703 write (istat,'(i5,15(f12.3))') iconf,&
704 (energy_(print_order(i)),i=1,nprint_ene),etot
716 call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
721 open(intin,file=intinname,status='old')
722 write (istat,'(a5,20a12)')"# ",&
723 (wname(print_order(i)),i=1,nprint_ene)
724 write (istat,'("# ",20(1pe12.4))') &
725 (weights(print_order(i)),i=1,nprint_ene)
727 write (istat,'(a5,20a12)')"# ",&
728 (ename(print_order(i)),i=1,nprint_ene),&
729 "ETOT total","RMSD","nat.contact","nnt.contact"
731 write (istat,'(a5,14a12)')"# ",&
732 (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
736 read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
737 call read_x(intin,*11)
739 ! Broadcast the order to compute internal coordinates to the slaves.
741 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
743 call int_from_cart1(.false.)
745 read (intin,'(i5)',end=11,err=11) iconf
746 call read_angles(intin,*11)
747 call geom_to_var(nvar,varia)
748 write(iout,*) 'Warning: Calling chainbuild5'
751 write (iout,'(a,i7)') 'Conformation #',iconf
752 if (minim) call minimize(etot,varia,iretcode,nfun)
756 call enerprint(energy_)
757 if (minim) call briefout(it,etot)
759 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
760 write (istat,'(i5,18(f12.3))') iconf,&
761 (energy_(print_order(i)),i=1,nprint_ene),&
762 etot,rms,frac,frac_nn,co
765 write (istat,'(i5,14(f12.3))') iconf,&
766 (energy_(print_order(i)),i=1,nprint_ene),etot
772 end subroutine exec_mult_eeval_or_minim
773 !-----------------------------------------------------------------------------
774 subroutine exec_checkgrad
775 ! use MPI_data !include 'COMMON.SETUP'
776 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
777 use io_units !include 'COMMON.IOUNITS'
778 !el use energy_data, only:icall !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
779 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
780 ! use REMD !include 'COMMON.REMD'
781 use MD_data !include 'COMMON.MD'
782 use io_base, only:intout
783 use io_config, only:read_fragments
788 ! implicit real*8 (a-h,o-z)
789 ! include 'DIMENSIONS'
794 !el common /srutu/ icall
795 real(kind=8) :: energy_(0:max_ene)
799 ! vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
800 ! if (itype(i).ne.10)
801 ! & vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
803 if (indpdb.eq.0) then
804 write(iout,*) 'Warning: Calling chainbuild'
809 ! dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
813 ! if (itype(i).ne.10) then
815 ! dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
820 ! dc(j,0)=ran_number(-0.2d0,0.2d0)
832 write (iout,*) "before etotal"
833 call etotal(energy_(0))
835 call enerprint(energy_(0))
836 write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
837 print *,'icheckgrad=',icheckgrad
838 goto (10,20,30) icheckgrad
839 10 call check_ecartint
842 20 call check_cartgrad
847 end subroutine exec_checkgrad
848 !-----------------------------------------------------------------------------
852 use io_config, only:map_read
855 call alloc_map_arrays
859 end subroutine exec_map
860 !-----------------------------------------------------------------------------
863 use io_units !include 'COMMON.IOUNITS'
869 ! include 'DIMENSIONS'
870 ! Conformational Space Annealling programmed by Jooyoung Lee.
871 ! This method works only with parallel machines!
873 call alloc_CSA_arrays
876 write (iout,*) "CSA works on parallel machines only"
879 end subroutine exec_CSA
880 !-----------------------------------------------------------------------------
881 subroutine exec_softreg
882 use io_units !include 'COMMON.IOUNITS'
883 use control_data !include 'COMMON.CONTROL'
885 use io_base, only:intout,briefout
886 use geometry, only:chainbuild
890 ! include 'DIMENSIONS'
891 real(kind=8) :: energy_(0:n_ene)
893 real(kind=8) :: rms,frac,frac_nn,co,etot
896 call alloc_compare_arrays
897 write(iout,*) 'Warning: Calling chainbuild'
900 call enerprint(energy_)
901 if (.not.lsecondary) then
902 write(iout,*) 'Calling secondary structure recognition'
903 call secondary2(debug)
905 write(iout,*) 'Using secondary structure supplied in pdb'
912 call enerprint(energy_)
914 call briefout(0,etot)
915 call secondary2(.true.)
916 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
918 end subroutine exec_softreg
919 !-----------------------------------------------------------------------------
921 !-----------------------------------------------------------------------------
923 subroutine ergastulum
925 ! implicit real*8 (a-h,o-z)
926 ! include 'DIMENSIONS'
929 use MDyn, only:setup_fricmat
930 use REMD, only:fricmat_mult,ginv_mult
934 ! include 'COMMON.SETUP'
935 ! include 'COMMON.DERIV'
936 ! include 'COMMON.VAR'
937 ! include 'COMMON.IOUNITS'
938 ! include 'COMMON.FFIELD'
939 ! include 'COMMON.INTERACT'
940 ! include 'COMMON.MD'
941 ! include 'COMMON.TIME1'
942 real(kind=8),dimension(6*nres) :: z,d_a_tmp !(maxres6) maxres6=6*maxres
943 real(kind=8) :: edum(0:n_ene),time_order(0:10)
944 !el real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
945 !el common /przechowalnia/ Gcopy
949 real(kind=8) :: time00
950 integer :: iorder,i,j,nres2,ierr,ierror
953 if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
955 if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
958 if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
959 if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
960 if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
961 if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
963 if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
965 ! Workers wait for variables and NF, and NFL from the boss
967 do while (iorder.ge.0)
968 ! write (*,*) 'Processor',fg_rank,' CG group',kolor,
969 ! & ' receives order from Master'
971 call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
972 time_Bcast=time_Bcast+MPI_Wtime()-time00
973 if (icall.gt.4 .and. iorder.ge.0) &
974 time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
977 ! & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
978 if (iorder.eq.0) then
981 ! write (2,*) "After etotal"
982 ! write (2,*) "dimen",dimen," dimen3",dimen3
984 else if (iorder.eq.2) then
986 call etotal_short(edum)
987 ! write (2,*) "After etotal_short"
988 ! write (2,*) "dimen",dimen," dimen3",dimen3
990 else if (iorder.eq.3) then
992 call etotal_long(edum)
993 ! write (2,*) "After etotal_long"
994 ! write (2,*) "dimen",dimen," dimen3",dimen3
996 else if (iorder.eq.1) then
998 ! write (2,*) "After sum_gradient"
999 ! write (2,*) "dimen",dimen," dimen3",dimen3
1001 else if (iorder.eq.4) then
1002 call ginv_mult(z,d_a_tmp)
1003 else if (iorder.eq.5) then
1004 ! Setup MD things for a slave
1005 dimen=(nct-nnt+1)+nside
1006 dimen1=(nct-nnt)+(nct-nnt+1)
1008 ! write (2,*) "dimen",dimen," dimen3",dimen3
1010 call int_bounds(dimen,igmult_start,igmult_end)
1011 igmult_start=igmult_start-1
1012 call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1013 ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1014 my_ng_count=igmult_end-igmult_start
1015 call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1016 MPI_INTEGER,FG_COMM,IERROR)
1017 write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1018 ! write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1019 myginv_ng_count=nres2*my_ng_count !el maxres2
1020 ! write (2,*) "igmult_start",igmult_start," igmult_end",
1021 ! & igmult_end," my_ng_count",my_ng_count
1023 call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1024 nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1025 call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1026 nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1027 ! write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1028 ! write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1030 ! call MPI_Barrier(FG_COMM,IERROR)
1032 call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1033 nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1034 myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1036 time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1039 do j=1,2*my_ng_count
1040 ginv(j,i)=gcopy(i,j)
1043 ! write (2,*) "dimen",dimen," dimen3",dimen3
1044 ! write (2,*) "End MD setup"
1046 ! write (iout,*) "My chunk of ginv_block"
1047 ! call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1048 else if (iorder.eq.6) then
1049 call int_from_cart1(.false.)
1050 else if (iorder.eq.7) then
1051 call chainbuild_cart
1052 else if (iorder.eq.8) then
1054 else if (iorder.eq.9) then
1055 call fricmat_mult(z,d_a_tmp)
1056 else if (iorder.eq.10) then
1060 write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1061 ' absolute rank',myrank,' leves ERGASTULUM.'
1062 write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1063 (' order[',i,']',time_order(i),i=0,10)
1065 end subroutine ergastulum