X-Git-Url: http://mmka.chem.univ.gda.pl/gitweb/?a=blobdiff_plain;f=source%2Funres%2Fsrc_CSA%2Funres_csa.F;h=51d6bca1f6a0fc3f77684523e15233305efdd161;hb=b857abb01fa60639bf244ffdd5082987dae11529;hp=ac19d24ee7e1650407b3429cb8a1c5065d34aae0;hpb=5e649e8ed333841acd808e370bd9dd017d79bcae;p=unres.git diff --git a/source/unres/src_CSA/unres_csa.F b/source/unres/src_CSA/unres_csa.F index ac19d24..51d6bca 100644 --- a/source/unres/src_CSA/unres_csa.F +++ b/source/unres/src_CSA/unres_csa.F @@ -90,8 +90,8 @@ c else if (modecalc.eq.2) then c call exec_thread c else if (modecalc.eq.3 .or. modecalc .eq.6) then c call exec_MC -c else if (modecalc.eq.4) then -c call exec_mult_eeval_or_minim + else if (modecalc.eq.4) then + call exec_mult_eeval_or_minim else if (modecalc.eq.5) then call exec_checkgrad c else if (ModeCalc.eq.7) then @@ -301,3 +301,244 @@ C This method works only with parallel machines! #endif return end +c--------------------------------------------------------------------------- + subroutine exec_mult_eeval_or_minim + implicit real*8 (a-h,o-z) + include 'DIMENSIONS' + include 'mpif.h' + integer muster(mpi_status_size) + include 'COMMON.SETUP' + include 'COMMON.TIME1' + include 'COMMON.INTERACT' + include 'COMMON.NAMES' + include 'COMMON.GEO' + include 'COMMON.HEADER' + include 'COMMON.CONTROL' + include 'COMMON.CONTACTS' + include 'COMMON.CHAIN' + include 'COMMON.VAR' + include 'COMMON.IOUNITS' + include 'COMMON.FFIELD' + include 'COMMON.SBRIDGE' + double precision varia(maxvar) + integer ind(6) + double precision energy(0:n_ene) + logical eof + eof=.false. + + if(me.ne.king) then + call minim_mcmf + return + endif + + close (intin) + open(intin,file=intinname,status='old') + write (istat,'(a5,100a12)')"# ", + & (wname(print_order(i)),i=1,nprint_ene) + if (refstr) then + write (istat,'(a5,100a12)')"# ", + & (ename(print_order(i)),i=1,nprint_ene), + & "ETOT total","RMSD","nat.contact","nnt.contact", + & "cont.order","TMscore" + else + write (istat,'(a5,100a12)')"# ", + & (ename(print_order(i)),i=1,nprint_ene),"ETOT total" + endif + + if (.not.minim) then + do while (.not. eof) + if (read_cart) then + read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene + call read_x(intin,*11) +c Broadcast the order to compute internal coordinates to the slaves. + if (nfgtasks.gt.1) + & call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR) + call int_from_cart1(.false.) + else + read (intin,'(i5)',end=1100,err=1100) iconf + call read_angles(intin,*11) + call geom_to_var(nvar,varia) + call chainbuild + endif + write (iout,'(a,i7)') 'Conformation #',iconf + call etotal(energy(0)) + call briefout(iconf,energy(0)) + call enerprint(energy(0)) + etot=energy(0) + if (refstr) then + call rms_nac_nnc(rms,frac,frac_nn,co,.true.) + call calc_tmscore(tm,.true.) + write (istat,'(i5,100(f12.3))') iconf, + & (energy(print_order(i)),i=1,nprint_ene),etot, + & rms,frac,frac_nn,co,tm + else + write (istat,'(i5,100(f12.3))') iconf, + & (energy(print_order(i)),i=1,nprint_ene),etot + endif + enddo +1100 continue + goto 1101 + endif + + mm=0 + imm=0 + nft=0 + ene0=0.0d0 + n=0 + iconf=0 + do while (.not. eof) + mm=mm+1 + if (mm.lt.nodes) then + if (read_cart) then + read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene + call read_x(intin,*11) +c Broadcast the order to compute internal coordinates to the slaves. + if (nfgtasks.gt.1) + & call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR) + call int_from_cart1(.false.) + else + read (intin,'(i5)',end=11,err=11) iconf + call read_angles(intin,*11) + call geom_to_var(nvar,varia) + call chainbuild + endif + + n=n+1 + write (iout,*) 'Conformation #',iconf,' read' + imm=imm+1 + ind(1)=1 + ind(2)=n + ind(3)=0 + ind(4)=0 + ind(5)=0 + ind(6)=0 + ene0=0.0d0 + call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM, + * ierr) + call mpi_send(varia,nvar,mpi_double_precision,mm, + * idreal,CG_COMM,ierr) + call mpi_send(ene0,1,mpi_double_precision,mm, + * idreal,CG_COMM,ierr) +c print *,'task ',n,' sent to worker ',mm,nvar + else + call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint, + * CG_COMM,muster,ierr) + man=muster(mpi_source) +c print *,'receiving result from worker ',man,' (',iii1,iii,')' + call mpi_recv(varia,nvar,mpi_double_precision, + * man,idreal,CG_COMM,muster,ierr) + call mpi_recv(ene,1, + * mpi_double_precision,man,idreal, + * CG_COMM,muster,ierr) + call mpi_recv(ene0,1, + * mpi_double_precision,man,idreal, + * CG_COMM,muster,ierr) +c print *,'result received from worker ',man,' sending now' + + call var_to_geom(nvar,varia) + call chainbuild + call etotal(energy(0)) + iconf=ind(2) + write (iout,*) + write (iout,*) + write (iout,*) 'Conformation #',iconf,ind(5) + + etot=energy(0) + call enerprint(energy(0)) + call briefout(iconf,etot) + if (refstr) then + call rms_nac_nnc(rms,frac,frac_nn,co,.true.) + call calc_tmscore(tm,.true.) + write (istat,'(i5,100(f12.3))') iconf, + & (energy(print_order(i)),i=1,nprint_ene),etot, + & rms,frac,frac_nn,co,tm + else + write (istat,'(i5,100(f12.3))') iconf, + & (energy(print_order(i)),i=1,nprint_ene),etot + endif + + imm=imm-1 + if (read_cart) then + read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene + call read_x(intin,*11) +c Broadcast the order to compute internal coordinates to the slaves. + if (nfgtasks.gt.1) + & call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR) + call int_from_cart1(.false.) + else + read (intin,'(i5)',end=1101,err=1101) iconf + call read_angles(intin,*11) + call geom_to_var(nvar,varia) + call chainbuild + endif + n=n+1 + write (iout,*) 'Conformation #',iconf,' read' + imm=imm+1 + ind(1)=1 + ind(2)=n + ind(3)=0 + ind(4)=0 + ind(5)=0 + ind(6)=0 + call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM, + * ierr) + call mpi_send(varia,nvar,mpi_double_precision,man, + * idreal,CG_COMM,ierr) + call mpi_send(ene0,1,mpi_double_precision,man, + * idreal,CG_COMM,ierr) + nf_mcmf=nf_mcmf+ind(4) + nmin=nmin+1 + endif + enddo +11 continue + do j=1,imm + call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint, + * CG_COMM,muster,ierr) + man=muster(mpi_source) + call mpi_recv(varia,nvar,mpi_double_precision, + * man,idreal,CG_COMM,muster,ierr) + call mpi_recv(ene,1, + * mpi_double_precision,man,idreal, + * CG_COMM,muster,ierr) + call mpi_recv(ene0,1, + * mpi_double_precision,man,idreal, + * CG_COMM,muster,ierr) + + call var_to_geom(nvar,varia) + call chainbuild + call etotal(energy(0)) + iconf=ind(2) + write (iout,*) + write (iout,*) + write (iout,*) 'Conformation #',iconf,ind(5) + + etot=energy(0) + call enerprint(energy(0)) + call briefout(iconf,etot) + if (refstr) then + call rms_nac_nnc(rms,frac,frac_nn,co,.true.) + call calc_tmscore(tm,.true.) + write (istat,'(i5,100(f12.3))') iconf, + & (energy(print_order(i)),i=1,nprint_ene),etot, + & rms,frac,frac_nn,co,tm + else + write (istat,'(i5,100(f12.3))') iconf, + & (energy(print_order(i)),i=1,nprint_ene),etot + endif + nmin=nmin+1 + enddo +1101 continue + do i=1, nodes-1 + ind(1)=0 + ind(2)=0 + ind(3)=0 + ind(4)=0 + ind(5)=0 + ind(6)=0 + call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM, + * ierr) + enddo + return + end +c--------------------------------------------------------------------------- +