1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
5 ! Program to carry out conformational search of proteins in an united-residue !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
13 use control, only:tcpu
14 use io_base, only:ilen
15 use geometry, only:chainbuild
16 use control, only:dajczas
19 use compare, only: test
25 ! implicit real*8 (a-h,o-z)
26 ! include 'DIMENSIONS'
28 use MPI_data ! include 'COMMON.SETUP'
33 use MPI_data, only: me,king
36 ! include 'COMMON.TIME1'
37 ! include 'COMMON.INTERACT'
38 ! include 'COMMON.NAMES'
39 ! include 'COMMON.GEO'
40 ! include 'COMMON.HEADER'
41 ! include 'COMMON.CONTROL'
42 ! include 'COMMON.CONTACTS'
43 ! include 'COMMON.CHAIN'
44 ! include 'COMMON.VAR'
45 ! include 'COMMON.IOUNITS'
46 ! include 'COMMON.FFIELD'
47 ! include 'COMMON.REMD'
49 ! include 'COMMON.SBRIDGE'
51 real(kind=8) :: hrtime,mintime,sectime
52 character(len=64) :: text_mode_calc(-2:14)
53 text_mode_calc(-2) = 'test'
54 text_mode_calc(-1) = 'cos'
55 text_mode_calc(0) = 'Energy evaluation or minimization'
56 text_mode_calc(1) = 'Regularization of PDB structure'
57 text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58 text_mode_calc(3) = 'Monte Carlo (with minimization) '
59 text_mode_calc(4) = 'Energy minimization of multiple conformations'
60 text_mode_calc(5) = 'Checking energy gradient'
61 text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62 text_mode_calc(7) = 'Energy map'
63 text_mode_calc(8) = 'CSA calculations'
64 text_mode_calc(9) = 'Not used 9'
65 text_mode_calc(10) = 'Not used 10'
66 text_mode_calc(11) = 'Soft regularization of PDB structure'
67 text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68 text_mode_calc(13) = 'Not used 13'
69 text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
71 ! call memmon_print_usage()
75 write(iout,*)'### LAST MODIFIED 09/03/15 15:32PM by EL'
76 if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
80 write (iout,*) "After readrtns"
83 if (me.eq.king .or. .not. out1file) then
84 write (iout,'(2a/)') &
85 text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
87 if (minim) write (iout,'(a)') &
88 'Conformations will be energy-minimized.'
89 write (iout,'(80(1h*)/)')
93 if (modecalc.eq.-2) then
96 else if (modecalc.eq.-1) then
97 write(iout,*) "call check_sc_map next"
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
104 if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106 call ergastulum ! slave workhouse in Latin
109 if (modecalc.eq.0) then
110 call exec_eeval_or_minim
111 else if (modecalc.eq.1) then
113 else if (modecalc.eq.2) then
115 else if (modecalc.eq.3 .or. modecalc .eq.6) then
117 else if (modecalc.eq.4) then
118 call exec_mult_eeval_or_minim
119 else if (modecalc.eq.5) then
121 else if (ModeCalc.eq.7) then
123 else if (ModeCalc.eq.8) then
125 else if (modecalc.eq.11) then
127 else if (modecalc.eq.12) then
130 else if (modecalc.eq.14) then
133 write (iout,'(a)') 'This calculation type is not supported',&
140 if (fg_rank.eq.0) call finish_task
141 ! call memmon_print_usage()
143 call print_detailed_timing
145 call MPI_Finalize(ierr)
148 call dajczas(tcpu(),hrtime,mintime,sectime)
149 stop '********** Program terminated normally.'
153 !-----------------------------------------------------------------------------
155 !-----------------------------------------------------------------------------
157 use MPI_data !include 'COMMON.SETUP'
158 use control_data !include 'COMMON.CONTROL'
159 use geometry, only:chainbuild,chainbuild_cart
161 use io_units !include 'COMMON.IOUNITS'
162 use compare, only:alloc_compare_arrays
165 ! include 'DIMENSIONS'
171 call alloc_compare_arrays
172 print *,'After MD alloc'
173 if (me.eq.king .or. .not. out1file) &
174 write (iout,*) "Calling chainbuild"
182 end subroutine exec_MD
183 !---------------------------------------------------------------------------
184 subroutine exec_MREMD
185 use MPI_data !include 'COMMON.SETUP'
186 use control_data !include 'COMMON.CONTROL'
187 use io_units !include 'COMMON.IOUNITS'
189 use REMD_data !include 'COMMON.REMD'
190 use geometry, only:chainbuild
192 use compare, only:alloc_compare_arrays
195 ! include 'DIMENSIONS'
202 call alloc_MREMD_arrays
203 call alloc_compare_arrays
204 ! if (me.eq.king .or. .not. out1file) &
205 ! write (iout,*) "Calling chainbuild"
207 if (me.eq.king .or. .not. out1file) &
208 write (iout,*) "Calling REMD",remd_mlist,nrep
218 end subroutine exec_MREMD
219 !-----------------------------------------------------------------------------
220 subroutine exec_eeval_or_minim
221 use MPI_data !include 'COMMON.SETUP'
222 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
223 use io_units !include 'COMMON.IOUNITS'
225 ! use energy !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
226 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
227 ! use REMD !include 'COMMON.REMD'
228 ! use MD !include 'COMMON.MD'
231 use MD_data, only: iset
233 use geometry, only:chainbuild
235 use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
236 use minimm, only:minimize,minim_dc,sc_move
240 ! implicit real*8 (a-h,o-z)
241 ! include 'DIMENSIONS'
246 !el common /srutu/ icall
247 real(kind=8) :: energy_(0:n_ene)
248 real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
249 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
250 real(kind=8) :: time00, evals, etota, etot, time_ene, time1
251 integer :: nharp,nft_sc,iretcode,nfun
252 integer,dimension(4,nres) :: iharp !(4,nres/3)(4,maxres/3)
254 real(kind=8) :: rms,frac,frac_nn,co
257 if (iset.eq.0) iset=1
258 call alloc_compare_arrays
259 if ((indpdb.eq.0).and.(.not.read_cart)) then
261 write(iout,*) 'Warning: Calling chainbuild'
267 ! write(iout,*)"in exec_eeval or minimim",split_ene
269 ! write(iout,*)"cccccc",j,(c(i,j),i=1,3)
270 ! write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
273 ! write(iout,*)"in exec_eeval or minimim"
275 print *,"Processor",myrank," after chainbuild"
278 call etotal_long(energy_long)
279 write (iout,*) "Printing long range energy"
280 call enerprint(energy_long)
282 call etotal_short(energy_short)
283 write (iout,*) "Printing short range energy"
284 call enerprint(energy_short)
286 energy_(i)=energy_long(i)+energy_short(i)
287 write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
289 write (iout,*) "Printing long+short range energy"
290 call enerprint(energy_)
295 time_ene=MPI_Wtime()-time00
297 write (iout,*) "Time for energy evaluation",time_ene
298 print *,"after etotal"
301 call enerprint(energy_)
303 call hairpin(.true.,nharp,iharp)
304 call secondary2(.true.)
308 print *,"overlap",searchsc,overlapsc
310 print *, 'Calling OVERLAP_SC'
311 call overlap_sc(fail)
315 call sc_move(2,nres-1,10,1d10,nft_sc,etot)
316 print *,'SC_move',nft_sc,etot
317 write(iout,*) 'SC_move',nft_sc,etot
321 print *, 'Calling MINIM_DC'
325 ! call check_ecartint !el
326 call minim_dc(etot,iretcode,nfun)
327 ! call check_ecartint !el
329 if (indpdb.ne.0) then
331 write(iout,*) 'Warning: Calling chainbuild'
334 call geom_to_var(nvar,varia)
335 print *,'Calling MINIMIZE.'
340 ! call exec_checkgrad !el
341 call minimize(etot,varia,iretcode,nfun)
343 ! call exec_checkgrad !el
345 print *,'SUMSL return code is',iretcode,' eval ',nfun
347 evals=nfun/(MPI_WTIME()-time1)
349 print *,'# eval/s',evals
350 print *,'refstr=',refstr
351 ! call hairpin(.true.,nharp,iharp)
352 ! call secondary2(.true.)
355 call enerprint(energy_)
358 call briefout(0,etot)
359 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
360 write (iout,'(a,i3)') 'SUMSL return code:',iretcode
361 write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
362 write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
364 print *,'refstr=',refstr,frac,frac_nn,co
365 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
366 print *,"after rms_nac_ncc"
367 call briefout(0,etot)
369 if (outpdb) call pdbout(etot,titel(:32),ipdb)
370 if (outmol2) call mol2out(etot,titel(:32))
371 write(iout,*) "after exec_eeval_or_minim"
373 end subroutine exec_eeval_or_minim
374 !-----------------------------------------------------------------------------
375 subroutine exec_regularize
376 ! use MPI_data !include 'COMMON.SETUP'
377 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
378 use io_units !include 'COMMON.IOUNITS'
380 use energy_data !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
381 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
382 ! use REMD !include 'COMMON.REMD'
383 ! use MD !include 'COMMON.MD'
387 ! implicit real*8 (a-h,o-z)
388 ! include 'DIMENSIONS'
392 real(kind=8) :: energy_(0:n_ene)
394 real(kind=8) :: rms,frac,frac_nn,co
397 call alloc_compare_arrays
401 call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
403 energy_(0)=energy_(0)-energy_(14)
405 call enerprint(energy_)
407 call briefout(0,etot)
408 if (outpdb) call pdbout(etot,titel(:32),ipdb)
409 if (outmol2) call mol2out(etot,titel(:32))
410 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
411 write (iout,'(a,i3)') 'SUMSL return code:',iretcode
413 end subroutine exec_regularize
414 !-----------------------------------------------------------------------------
415 subroutine exec_thread
416 ! use MPI_data !include 'COMMON.SETUP'
419 ! include 'DIMENSIONS'
423 call alloc_compare_arrays
426 end subroutine exec_thread
427 !-----------------------------------------------------------------------------
429 ! use MPI_data !include 'COMMON.SETUP'
430 use control_data !include 'COMMON.CONTROL'
435 ! implicit real*8 (a-h,o-z)
436 ! include 'DIMENSIONS'
437 character(len=10) :: nodeinfo
438 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
443 call alloc_MCM_arrays
447 if (modecalc.eq.3) then
453 if (modecalc.eq.3) then
463 end subroutine exec_MC
464 !-----------------------------------------------------------------------------
465 subroutine exec_mult_eeval_or_minim
466 use MPI_data !include 'COMMON.SETUP'
467 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
468 use io_units !include 'COMMON.IOUNITS'
470 use energy_data !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
471 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
472 ! use REMD !include 'COMMON.REMD'
473 ! use MD !include 'COMMON.MD'
475 use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
476 use energy, only:etotal,enerprint
477 use compare, only:rms_nac_nnc
478 use minimm, only:minimize!,minim_mcmf
479 ! implicit real*8 (a-h,o-z)
480 ! include 'DIMENSIONS'
482 use minimm, only:minim_mcmf
485 integer :: ierror,ierr
487 real(kind=8),dimension(mpi_status_size) :: muster
491 real(kind=8) :: varia(6*nres) !(maxvar) (maxvar=6*maxres)
492 integer,dimension(6) :: ind
493 real(kind=8) :: energy_(0:n_ene)
495 real(kind=8) :: etot,ene0
496 integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
498 real(kind=8) :: rms,frac,frac_nn,co,time,ene
508 open(intin,file=intinname,status='old')
509 write (istat,'(a5,20a12)')"# ",&
510 (wname(print_order(i)),i=1,nprint_ene)
512 write (istat,'(a5,20a12)')"# ",&
513 (ename(print_order(i)),i=1,nprint_ene),&
514 "ETOT total","RMSD","nat.contact","nnt.contact"
516 write (istat,'(a5,20a12)')"# ",&
517 (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
523 read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
524 call read_x(intin,*11)
526 ! Broadcast the order to compute internal coordinates to the slaves.
528 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
530 call int_from_cart1(.false.)
532 read (intin,'(i5)',end=1100,err=1100) iconf
533 call read_angles(intin,*11)
534 call geom_to_var(nvar,varia)
535 write(iout,*) 'Warning: Calling chainbuild1'
538 write (iout,'(a,i7)') 'Conformation #',iconf
540 call briefout(iconf,energy_(0))
541 call enerprint(energy_)
544 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
545 write (istat,'(i5,20(f12.3))') iconf,&
546 (energy_(print_order(i)),i=1,nprint_ene),etot,&
550 write (istat,'(i5,16(f12.3))') iconf,&
551 (energy_(print_order(i)),i=1,nprint_ene),etot
567 if (mm.lt.nodes) then
569 read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
570 call read_x(intin,*11)
572 ! Broadcast the order to compute internal coordinates to the slaves.
574 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
576 call int_from_cart1(.false.)
578 read (intin,'(i5)',end=11,err=11) iconf
579 call read_angles(intin,*11)
580 call geom_to_var(nvar,varia)
581 write(iout,*) 'Warning: Calling chainbuild2'
584 write (iout,'(a,i7)') 'Conformation #',iconf
594 call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
596 call mpi_send(varia,nvar,mpi_double_precision,mm,&
598 call mpi_send(ene0,1,mpi_double_precision,mm,&
600 ! print *,'task ',n,' sent to worker ',mm,nvar
602 call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
604 man=muster(mpi_source)
605 ! print *,'receiving result from worker ',man,' (',iii1,iii,')'
606 call mpi_recv(varia,nvar,mpi_double_precision,&
607 man,idreal,CG_COMM,muster,ierr)
608 call mpi_recv(ene,1,&
609 mpi_double_precision,man,idreal,&
611 call mpi_recv(ene0,1,&
612 mpi_double_precision,man,idreal,&
614 ! print *,'result received from worker ',man,' sending now'
616 call var_to_geom(nvar,varia)
617 write(iout,*) 'Warning: Calling chainbuild3'
623 write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
626 call enerprint(energy_)
627 call briefout(it,etot)
628 ! if (minim) call briefout(it,etot)
630 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
631 write (istat,'(i5,19(f12.3))') iconf,&
632 (energy_(print_order(i)),i=1,nprint_ene),etot,&
635 write (istat,'(i5,15(f12.3))') iconf,&
636 (energy_(print_order(i)),i=1,nprint_ene),etot
641 read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
642 call read_x(intin,*11)
644 ! Broadcast the order to compute internal coordinates to the slaves.
646 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
648 call int_from_cart1(.false.)
650 read (intin,'(i5)',end=1101,err=1101) iconf
651 call read_angles(intin,*11)
652 call geom_to_var(nvar,varia)
653 write(iout,*) 'Warning: Calling chainbuild4'
664 call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
666 call mpi_send(varia,nvar,mpi_double_precision,man,&
668 call mpi_send(ene0,1,mpi_double_precision,man,&
670 nf_mcmf=nf_mcmf+ind(4)
676 call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
678 man=muster(mpi_source)
679 call mpi_recv(varia,nvar,mpi_double_precision,&
680 man,idreal,CG_COMM,muster,ierr)
681 call mpi_recv(ene,1,&
682 mpi_double_precision,man,idreal,&
684 call mpi_recv(ene0,1,&
685 mpi_double_precision,man,idreal,&
688 call var_to_geom(nvar,varia)
689 write(iout,*) 'Warning: Calling chainbuild5'
695 write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
698 call enerprint(energy_)
699 call briefout(it,etot)
701 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
702 write (istat,'(i5,19(f12.3))') iconf,&
703 (energy_(print_order(i)),i=1,nprint_ene),etot,&
706 write (istat,'(i5,15(f12.3))') iconf,&
707 (energy_(print_order(i)),i=1,nprint_ene),etot
719 call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
724 open(intin,file=intinname,status='old')
725 write (istat,'(a5,20a12)')"# ",&
726 (wname(print_order(i)),i=1,nprint_ene)
727 write (istat,'("# ",20(1pe12.4))') &
728 (weights(print_order(i)),i=1,nprint_ene)
730 write (istat,'(a5,20a12)')"# ",&
731 (ename(print_order(i)),i=1,nprint_ene),&
732 "ETOT total","RMSD","nat.contact","nnt.contact"
734 write (istat,'(a5,14a12)')"# ",&
735 (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
739 read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
740 call read_x(intin,*11)
742 ! Broadcast the order to compute internal coordinates to the slaves.
744 call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
746 call int_from_cart1(.false.)
748 read (intin,'(i5)',end=11,err=11) iconf
749 call read_angles(intin,*11)
750 call geom_to_var(nvar,varia)
751 write(iout,*) 'Warning: Calling chainbuild5'
754 write (iout,'(a,i7)') 'Conformation #',iconf
755 if (minim) call minimize(etot,varia,iretcode,nfun)
759 call enerprint(energy_)
760 if (minim) call briefout(it,etot)
762 call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
763 write (istat,'(i5,18(f12.3))') iconf,&
764 (energy_(print_order(i)),i=1,nprint_ene),&
765 etot,rms,frac,frac_nn,co
768 write (istat,'(i5,14(f12.3))') iconf,&
769 (energy_(print_order(i)),i=1,nprint_ene),etot
775 end subroutine exec_mult_eeval_or_minim
776 !-----------------------------------------------------------------------------
777 subroutine exec_checkgrad
778 ! use MPI_data !include 'COMMON.SETUP'
779 use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
780 use io_units !include 'COMMON.IOUNITS'
781 !el use energy_data, only:icall !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
782 use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
783 ! use REMD !include 'COMMON.REMD'
784 use MD_data !include 'COMMON.MD'
785 use io_base, only:intout
786 use io_config, only:read_fragments
791 ! implicit real*8 (a-h,o-z)
792 ! include 'DIMENSIONS'
797 !el common /srutu/ icall
798 real(kind=8) :: energy_(0:max_ene)
802 ! vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
803 ! if (itype(i).ne.10)
804 ! & vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
806 if (indpdb.eq.0) then
807 write(iout,*) 'Warning: Calling chainbuild'
812 ! dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
816 ! if (itype(i).ne.10) then
818 ! dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
823 ! dc(j,0)=ran_number(-0.2d0,0.2d0)
835 write (iout,*) "before etotal"
836 call etotal(energy_(0))
838 call enerprint(energy_(0))
839 write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
840 print *,'icheckgrad=',icheckgrad
841 goto (10,20,30) icheckgrad
842 10 call check_ecartint
845 20 call check_cartgrad
850 end subroutine exec_checkgrad
851 !-----------------------------------------------------------------------------
855 use io_config, only:map_read
858 call alloc_map_arrays
862 end subroutine exec_map
863 !-----------------------------------------------------------------------------
866 use io_units !include 'COMMON.IOUNITS'
872 ! include 'DIMENSIONS'
873 ! Conformational Space Annealling programmed by Jooyoung Lee.
874 ! This method works only with parallel machines!
876 call alloc_CSA_arrays
879 write (iout,*) "CSA works on parallel machines only"
882 end subroutine exec_CSA
883 !-----------------------------------------------------------------------------
884 subroutine exec_softreg
885 use io_units !include 'COMMON.IOUNITS'
886 use control_data !include 'COMMON.CONTROL'
888 use io_base, only:intout,briefout
889 use geometry, only:chainbuild
893 ! include 'DIMENSIONS'
894 real(kind=8) :: energy_(0:n_ene)
896 real(kind=8) :: rms,frac,frac_nn,co,etot
899 call alloc_compare_arrays
900 write(iout,*) 'Warning: Calling chainbuild'
903 call enerprint(energy_)
904 if (.not.lsecondary) then
905 write(iout,*) 'Calling secondary structure recognition'
906 call secondary2(debug)
908 write(iout,*) 'Using secondary structure supplied in pdb'
915 call enerprint(energy_)
917 call briefout(0,etot)
918 call secondary2(.true.)
919 if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
921 end subroutine exec_softreg
922 !-----------------------------------------------------------------------------
924 !-----------------------------------------------------------------------------
926 subroutine ergastulum
928 ! implicit real*8 (a-h,o-z)
929 ! include 'DIMENSIONS'
932 use MDyn, only:setup_fricmat
933 use REMD, only:fricmat_mult,ginv_mult
937 ! include 'COMMON.SETUP'
938 ! include 'COMMON.DERIV'
939 ! include 'COMMON.VAR'
940 ! include 'COMMON.IOUNITS'
941 ! include 'COMMON.FFIELD'
942 ! include 'COMMON.INTERACT'
943 ! include 'COMMON.MD'
944 ! include 'COMMON.TIME1'
945 real(kind=8),dimension(6*nres) :: z,d_a_tmp !(maxres6) maxres6=6*maxres
946 real(kind=8) :: edum(0:n_ene),time_order(0:10)
947 !el real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
948 !el common /przechowalnia/ Gcopy
952 real(kind=8) :: time00
953 integer :: iorder,i,j,nres2,ierr,ierror
956 if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
958 if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
961 if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
962 if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
963 if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
964 if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
966 if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
968 ! Workers wait for variables and NF, and NFL from the boss
970 do while (iorder.ge.0)
971 ! write (*,*) 'Processor',fg_rank,' CG group',kolor,
972 ! & ' receives order from Master'
974 call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
975 time_Bcast=time_Bcast+MPI_Wtime()-time00
976 if (icall.gt.4 .and. iorder.ge.0) &
977 time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
980 ! & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
981 if (iorder.eq.0) then
984 ! write (2,*) "After etotal"
985 ! write (2,*) "dimen",dimen," dimen3",dimen3
987 else if (iorder.eq.2) then
989 call etotal_short(edum)
990 ! write (2,*) "After etotal_short"
991 ! write (2,*) "dimen",dimen," dimen3",dimen3
993 else if (iorder.eq.3) then
995 call etotal_long(edum)
996 ! write (2,*) "After etotal_long"
997 ! write (2,*) "dimen",dimen," dimen3",dimen3
999 else if (iorder.eq.1) then
1001 ! write (2,*) "After sum_gradient"
1002 ! write (2,*) "dimen",dimen," dimen3",dimen3
1004 else if (iorder.eq.4) then
1005 call ginv_mult(z,d_a_tmp)
1006 else if (iorder.eq.5) then
1007 ! Setup MD things for a slave
1008 dimen=(nct-nnt+1)+nside
1009 dimen1=(nct-nnt)+(nct-nnt+1)
1011 ! write (2,*) "dimen",dimen," dimen3",dimen3
1013 call int_bounds(dimen,igmult_start,igmult_end)
1014 igmult_start=igmult_start-1
1015 call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1016 ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1017 my_ng_count=igmult_end-igmult_start
1018 call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1019 MPI_INTEGER,FG_COMM,IERROR)
1020 write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1021 ! write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1022 myginv_ng_count=nres2*my_ng_count !el maxres2
1023 ! write (2,*) "igmult_start",igmult_start," igmult_end",
1024 ! & igmult_end," my_ng_count",my_ng_count
1026 call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1027 nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1028 call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1029 nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1030 ! write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1031 ! write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1033 ! call MPI_Barrier(FG_COMM,IERROR)
1035 call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1036 nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1037 myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1039 time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1042 do j=1,2*my_ng_count
1043 ginv(j,i)=gcopy(i,j)
1046 ! write (2,*) "dimen",dimen," dimen3",dimen3
1047 ! write (2,*) "End MD setup"
1049 ! write (iout,*) "My chunk of ginv_block"
1050 ! call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1051 else if (iorder.eq.6) then
1052 call int_from_cart1(.false.)
1053 else if (iorder.eq.7) then
1054 call chainbuild_cart
1055 else if (iorder.eq.8) then
1057 else if (iorder.eq.9) then
1058 call fricmat_mult(z,d_a_tmp)
1059 else if (iorder.eq.10) then
1063 write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1064 ' absolute rank',myrank,' leves ERGASTULUM.'
1065 write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1066 (' order[',i,']',time_order(i),i=0,10)
1068 end subroutine ergastulum