--- /dev/null
+#ifdef MPI
+ subroutine init_task
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'mpif.h'
+ include 'COMMON.SETUP'
+ include 'COMMON.CONTROL'
+ include 'COMMON.IOUNITS'
+ logical lprn /.false./
+c real*8 text1 /'group_i '/,text2/'group_f '/,
+c & text3/'initialb'/,text4/'initiale'/,
+c & text5/'openb'/,text6/'opene'/
+ integer cgtasks(0:max_cg_procs)
+ character*3 cfgprocs
+ integer cg_size,fg_size,fg_size1
+c start parallel processing
+c print *,'Initializing MPI'
+ call mpi_init(ierr)
+ if (ierr.ne.0) then
+ print *, ' cannot initialize MPI'
+ stop
+ endif
+c determine # of nodes and current node
+ call MPI_Comm_rank( MPI_COMM_WORLD, me, ierr )
+ if (ierr.ne.0) then
+ print *, ' cannot determine rank of all processes'
+ call MPI_Finalize( MPI_COMM_WORLD, IERR )
+ stop
+ endif
+ call MPI_Comm_size( MPI_Comm_world, nodes, ierr )
+ if (ierr.ne.0) then
+ print *, ' cannot determine number of processes'
+ stop
+ endif
+ Nprocs=nodes
+ MyRank=me
+C Determine the number of "fine-grain" tasks
+ call getenv_loc("FGPROCS",cfgprocs)
+ read (cfgprocs,'(i3)') nfgtasks
+ if (nfgtasks.eq.0) nfgtasks=1
+ call getenv_loc("MAXGSPROCS",cfgprocs)
+ read (cfgprocs,'(i3)') max_gs_size
+ if (max_gs_size.eq.0) max_gs_size=2
+ if (lprn)
+ & print *,"Processor",me," nfgtasks",nfgtasks,
+ & " max_gs_size",max_gs_size
+ if (nfgtasks.eq.1) then
+ CG_COMM = MPI_COMM_WORLD
+ fg_size=1
+ fg_rank=0
+ nfgtasks1=1
+ fg_rank1=0
+ else
+ nodes=nprocs/nfgtasks
+ if (nfgtasks*nodes.ne.nprocs) then
+ write (*,'(a)') 'ERROR: Number of processors assigned',
+ & ' to coarse-grained tasks must be divisor',
+ & ' of the total number of processors.'
+ call MPI_Finalize( MPI_COMM_WORLD, IERR )
+ stop
+ endif
+C Put the ranks of coarse-grain processes in one table and create
+C the respective communicator. The processes with ranks "in between"
+C the ranks of CG processes will perform fine graining for the CG
+C process with the next lower rank.
+ do i=0,nprocs-1,nfgtasks
+ cgtasks(i/nfgtasks)=i
+ enddo
+ if (lprn) then
+ print*,"Processor",me," cgtasks",(cgtasks(i),i=0,nodes-1)
+c print "(a,i5,a)","Processor",myrank," Before MPI_Comm_group"
+ endif
+c call memmon_print_usage()
+ call MPI_Comm_group(MPI_COMM_WORLD,world_group,IERR)
+ call MPI_Group_incl(world_group,nodes,cgtasks,cg_group,IERR)
+ call MPI_Comm_create(MPI_COMM_WORLD,cg_group,CG_COMM,IERR)
+ call MPI_Group_rank(cg_group,me,ierr)
+ call MPI_Group_free(world_group,ierr)
+ call MPI_Group_free(cg_group,ierr)
+c print "(a,i5,a)","Processor",myrank," After MPI_Comm_group"
+c call memmon_print_usage()
+ if (me.ne.MPI_UNDEFINED) call MPI_Comm_Rank(CG_COMM,me,ierr)
+ if (lprn) print *," Processor",myrank," CG rank",me
+C Create communicators containig processes doing "fine grain" tasks.
+C The processes within each FG_COMM should have fast communication.
+ kolor=MyRank/nfgtasks
+ key=mod(MyRank,nfgtasks)
+ call MPI_Comm_split(MPI_COMM_WORLD,kolor,key,FG_COMM,ierr)
+ call MPI_Comm_size(FG_COMM,fg_size,ierr)
+ if (fg_size.ne.nfgtasks) then
+ write (*,*) "OOOOps... the number of fg tasks is",fg_size,
+ & " but",nfgtasks," was requested. MyRank=",MyRank
+ endif
+ call MPI_Comm_rank(FG_COMM,fg_rank,ierr)
+ if (fg_size.gt.max_gs_size) then
+ kolor1=fg_rank/max_gs_size
+ key1=mod(fg_rank,max_gs_size)
+ call MPI_Comm_split(FG_COMM,kolor1,key1,FG_COMM1,ierr)
+ call MPI_Comm_size(FG_COMM1,nfgtasks1,ierr)
+ call MPI_Comm_rank(FG_COMM1,fg_rank1,ierr)
+ else
+ FG_COMM1=FG_COMM
+ nfgtasks1=nfgtasks
+ fg_rank1=fg_rank
+ endif
+ endif
+ if (fg_rank.eq.0) then
+ write (*,*) "Processor",MyRank," out of",nprocs,
+ & " rank in CG_COMM",me," size of CG_COMM",nodes,
+ & " size of FG_COMM",fg_size,
+ & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
+ else
+ write (*,*) "Processor",MyRank," out of",nprocs,
+ & " rank in FG_COMM",fg_rank," size of FG_COMM",fg_size,
+ & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
+ endif
+C Initialize other variables.
+c print '(a)','Before initialize'
+c call memmon_print_usage()
+ call initialize
+c print '(a,i5,a)','Processor',myrank,' After initialize'
+c call memmon_print_usage()
+C Open task-dependent files.
+c print '(a,i5,a)','Processor',myrank,' Before openunits'
+c call memmon_print_usage()
+ call openunits
+c print '(a,i5,a)','Processor',myrank,' After openunits'
+c call memmon_print_usage()
+ if (me.eq.king .or. fg_rank.eq.0 .and. .not. out1file)
+ & write (iout,'(80(1h*)/a/80(1h*))')
+ & 'United-residue force field calculation - parallel job.'
+c print *,"Processor",myrank," exited OPENUNITS"
+ return
+ end
+C-----------------------------------------------------------------------------
+ subroutine finish_task
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'mpif.h'
+ include 'COMMON.SETUP'
+ include 'COMMON.CONTROL'
+ include 'COMMON.REMD'
+ include 'COMMON.IOUNITS'
+ include 'COMMON.FFIELD'
+ include 'COMMON.TIME1'
+ include 'COMMON.MD'
+ integer ilen
+ external ilen
+c
+ call MPI_Barrier(CG_COMM,ierr)
+ if (nfgtasks.gt.1)
+ & call MPI_Bcast(-1,1,MPI_INTEGER,king,FG_COMM,IERROR)
+ time1=MPI_WTIME()
+ if (me.eq.king .or. .not. out1file) then
+ write (iout,'(a,i4,a)') 'CG processor',me,' is finishing work.'
+ write (iout,*) 'Total wall clock time',time1-walltime,' sec'
+ if (nfgtasks.gt.1) then
+ write (iout,'(80(1h=)/a/(80(1h=)))')
+ & "Details of FG communication time"
+ write (iout,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))')
+ & "BROADCAST:",time_bcast,"REDUCE:",time_reduce,
+ & "GATHER:",time_gather,
+ & "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,
+ & "BARRIER ene",time_barrier_e,
+ & "BARRIER grad",time_barrier_g,"TOTAL:",
+ & time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv
+ & +time_barrier_e+time_barrier_g
+ write (*,*) 'Total wall clock time',time1-walltime,' sec'
+ write (*,*) "Processor",me," BROADCAST time",time_bcast,
+ & " REDUCE time",
+ & time_reduce," GATHER time",time_gather," SCATTER time",
+ & time_scatter," SENDRECV",time_sendrecv,
+ & " BARRIER ene",time_barrier_e," BARRIER grad",time_barrier_g
+ endif
+ endif
+ write (*,'(a,i4,a)') 'CG processor',me,' is finishing work.'
+ if (ilen(tmpdir).gt.0) then
+ write (*,*) "Processor",me,
+ & ": moving output files to the parent directory..."
+ close(inp)
+ close(istat,status='keep')
+ if (ntwe.gt.0) call move_from_tmp(statname)
+ close(irest2,status='keep')
+ if (modecalc.eq.12.or.
+ & (modecalc.eq.14 .and. .not.restart1file)) then
+ call move_from_tmp(rest2name)
+ else if (modecalc.eq.14.and. me.eq.king) then
+ call move_from_tmp(mremd_rst_name)
+ endif
+ if (mdpdb) then
+ close(ipdb,status='keep')
+ call move_from_tmp(pdbname)
+ else if (me.eq.king .or. .not.traj1file) then
+ close(icart,status='keep')
+ call move_from_tmp(cartname)
+ endif
+ if (me.eq.king .or. .not. out1file) then
+ close (iout,status='keep')
+ call move_from_tmp(outname)
+ endif
+ endif
+ return
+ end
+c-------------------------------------------------------------------------
+ subroutine pattern_receive
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'mpif.h'
+ include 'COMMON.SETUP'
+ include 'COMMON.THREAD'
+ include 'COMMON.IOUNITS'
+ integer tag,status(MPI_STATUS_SIZE)
+ integer source,ThreadType
+ logical flag
+ ThreadType=45
+ source=mpi_any_source
+ call mpi_iprobe(source,ThreadType,
+ & CG_COMM,flag,status,ierr)
+ do while (flag)
+ write (iout,*) 'Processor ',Me,' is receiving threading',
+ & ' pattern from processor',status(mpi_source)
+ write (*,*) 'Processor ',Me,' is receiving threading',
+ & ' pattern from processor',status(mpi_source)
+ nexcl=nexcl+1
+ call mpi_irecv(iexam(1,nexcl),2,mpi_integer,status(mpi_source),
+ & ThreadType, CG_COMM,ireq,ierr)
+ write (iout,*) 'Received pattern:',nexcl,iexam(1,nexcl),
+ & iexam(2,nexcl)
+ source=mpi_any_source
+ call mpi_iprobe(source,ThreadType,
+ & CG_COMM,flag,status,ierr)
+ enddo
+ return
+ end
+c----------------------------------------------------------------------------
+ subroutine pattern_send
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'mpif.h'
+ include 'COMMON.INFO'
+ include 'COMMON.THREAD'
+ include 'COMMON.IOUNITS'
+ integer source,ThreadType,ireq
+ ThreadType=45
+ do iproc=0,nprocs-1
+ if (iproc.ne.me .and. .not.Koniec(iproc) ) then
+ call mpi_isend(iexam(1,nexcl),2,mpi_integer,iproc,
+ & ThreadType, CG_COMM, ireq, ierr)
+ write (iout,*) 'CG processor ',me,' has sent pattern ',
+ & 'to processor',iproc
+ write (*,*) 'CG processor ',me,' has sent pattern ',
+ & 'to processor',iproc
+ write (iout,*) 'Pattern:',nexcl,iexam(1,nexcl),iexam(2,nexcl)
+ endif
+ enddo
+ return
+ end
+c-----------------------------------------------------------------------------
+ subroutine send_stop_sig(Kwita)
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'mpif.h'
+ include 'COMMON.INFO'
+ include 'COMMON.IOUNITS'
+ integer StopType,StopId,iproc,Kwita,NBytes
+ StopType=66
+c Kwita=-1
+C print *,'CG processor',me,' StopType=',StopType
+ Koniec(me)=.true.
+ if (me.eq.king) then
+C Master sends the STOP signal to everybody.
+ write (iout,'(a,a)')
+ & 'Master is sending STOP signal to other processors.'
+ do iproc=1,nprocs-1
+ print *,'Koniec(',iproc,')=',Koniec(iproc)
+ if (.not. Koniec(iproc)) then
+ call mpi_send(Kwita,1,mpi_integer,iproc,StopType,
+ & mpi_comm_world,ierr)
+ write (iout,*) 'Iproc=',iproc,' StopID=',StopID
+ write (*,*) 'Iproc=',iproc,' StopID=',StopID
+ endif
+ enddo
+ else
+C Else send the STOP signal to Master.
+ call mpi_send(Kwita,1,mpi_integer,MasterID,StopType,
+ & mpi_comm_world,ierr)
+ write (iout,*) 'CG processor=',me,' StopID=',StopID
+ write (*,*) 'CG processor=',me,' StopID=',StopID
+ endif
+ return
+ end
+c-----------------------------------------------------------------------------
+ subroutine recv_stop_sig(Kwita)
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'mpif.h'
+ include 'COMMON.INFO'
+ include 'COMMON.IOUNITS'
+ integer source,StopType,StopId,iproc,Kwita
+ logical flag
+ StopType=66
+ Kwita=0
+ source=mpi_any_source
+c print *,'CG processor:',me,' StopType=',StopType
+ call mpi_iprobe(source,StopType,
+ & mpi_comm_world,flag,status,ierr)
+ do while (flag)
+ Koniec(status(mpi_source))=.true.
+ write (iout,*) 'CG processor ',me,' is receiving STOP signal',
+ & ' from processor',status(mpi_source)
+ write (*,*) 'CG processor ',me,' is receiving STOP signal',
+ & ' from processor',status(mpi_source)
+ call mpi_irecv(Kwita,1,mpi_integer,status(mpi_source),StopType,
+ & mpi_comm_world,ireq,ierr)
+ call mpi_iprobe(source,StopType,
+ & mpi_comm_world,flag,status,ierr)
+ enddo
+ return
+ end
+c-----------------------------------------------------------------------------
+ subroutine send_MCM_info(ione)
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'mpif.h'
+ include 'COMMON.SETUP'
+ include 'COMMON.MCM'
+ include 'COMMON.IOUNITS'
+ integer tag,status(MPI_STATUS_SIZE)
+ integer MCM_info_Type,MCM_info_ID,iproc,one,NBytes
+ common /aaaa/ isend,irecv
+ integer nsend
+ save nsend
+ nsend=nsend+1
+ MCM_info_Type=77
+cd write (iout,'(a,i4,a)') 'CG Processor',me,
+cd & ' is sending MCM info to Master.'
+ write (*,'(a,i4,a,i8)') 'CG processor',me,
+ & ' is sending MCM info to Master, MCM_info_ID=',MCM_info_ID
+ call mpi_isend(ione,1,mpi_integer,MasterID,
+ & MCM_info_Type,mpi_comm_world, MCM_info_ID, ierr)
+cd write (iout,*) 'CG processor',me,' has sent info to the master;',
+cd & ' MCM_info_ID=',MCM_info_ID
+ write (*,*) 'CG processor',me,' has sent info to the master;',
+ & ' MCM_info_ID=',MCM_info_ID,' ierr ',ierr
+ isend=0
+ irecv=0
+ return
+ end
+c----------------------------------------------------------------------------
+ subroutine receive_MCM_info
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'mpif.h'
+ include 'COMMON.SETUP'
+ include 'COMMON.MCM'
+ include 'COMMON.IOUNITS'
+ integer tag,status(MPI_STATUS_SIZE)
+ integer source,MCM_info_Type,MCM_info_ID,iproc,ione
+ logical flag
+ MCM_info_Type=77
+ source=mpi_any_source
+c print *,'source=',source,' dontcare=',dontcare
+ call mpi_iprobe(source,MCM_info_Type,
+ & mpi_comm_world,flag,status,ierr)
+ do while (flag)
+ source=status(mpi_source)
+ itask=source/fgProcs+1
+cd write (iout,*) 'Master is receiving MCM info from processor ',
+cd & source,' itask',itask
+ write (*,*) 'Master is receiving MCM info from processor ',
+ & source,' itask',itask
+ call mpi_irecv(ione,1,mpi_integer,source,MCM_info_type,
+ & mpi_comm_world,MCM_info_ID,ierr)
+cd write (iout,*) 'Received from processor',source,' IONE=',ione
+ write (*,*) 'Received from processor',source,' IONE=',ione
+ nacc_tot=nacc_tot+1
+ if (ione.eq.2) nsave_part(itask)=nsave_part(itask)+1
+cd print *,'nsave_part(',itask,')=',nsave_part(itask)
+cd write (iout,*) 'Nacc_tot=',Nacc_tot
+cd write (*,*) 'Nacc_tot=',Nacc_tot
+ source=mpi_any_source
+ call mpi_iprobe(source,MCM_info_Type,
+ & mpi_comm_world,flag,status,ierr)
+ enddo
+ return
+ end
+c---------------------------------------------------------------------------
+ subroutine send_thread_results
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'mpif.h'
+ include 'COMMON.SETUP'
+ include 'COMMON.THREAD'
+ include 'COMMON.IOUNITS'
+ integer tag,status(MPI_STATUS_SIZE)
+ integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
+ & EnerID,msglen,nbytes
+ double precision buffer(20*maxthread+2)
+ ThreadType=444
+ EnerType=555
+ ipatt(1,nthread+1)=nthread
+ ipatt(2,nthread+1)=nexcl
+ do i=1,nthread
+ do j=1,n_ene
+ ener(j,i+nthread)=ener0(j,i)
+ enddo
+ enddo
+ ener(1,2*nthread+1)=max_time_for_thread
+ ener(2,2*nthread+1)=ave_time_for_thread
+C Send the IPATT array
+ write (iout,*) 'CG processor',me,
+ & ' is sending IPATT array to master: NTHREAD=',nthread
+ write (*,*) 'CG processor',me,
+ & ' is sending IPATT array to master: NTHREAD=',nthread
+ msglen=2*nthread+2
+ call mpi_send(ipatt(1,1),msglen,MPI_INTEGER,MasterID,
+ & ThreadType,mpi_comm_world,ierror)
+ write (iout,*) 'CG processor',me,
+ & ' has sent IPATT array to master MSGLEN',msglen
+ write (*,*) 'CG processor',me,
+ & ' has sent IPATT array to master MSGLEN',msglen
+C Send the energies.
+ msglen=n_ene2*nthread+2
+ write (iout,*) 'CG processor',me,' is sending energies to master.'
+ write (*,*) 'CG processor',me,' is sending energies to master.'
+ call mpi_send(ener(1,1),msglen,MPI_DOUBLE_PRECISION,MasterID,
+ & EnerType,mpi_comm_world,ierror)
+ write (iout,*) 'CG processor',me,' has sent energies to master.'
+ write (*,*) 'CG processor',me,' has sent energies to master.'
+ return
+ end
+c----------------------------------------------------------------------------
+ subroutine receive_thread_results(iproc)
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'mpif.h'
+ include 'COMMON.INFO'
+ include 'COMMON.THREAD'
+ include 'COMMON.IOUNITS'
+ integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
+ & EnerID,ReadyType,ReadyID,Ready,msglen,nbytes,nthread_temp
+ double precision buffer(20*maxthread+2),max_time_for_thread_t,
+ & ave_time_for_thread_t
+ logical flag
+ ThreadType=444
+ EnerType=555
+C Receive the IPATT array
+ call mpi_probe(iproc,ThreadType,
+ & mpi_comm_world,status,ierr)
+ call MPI_GET_COUNT(STATUS, MPI_INTEGER, MSGLEN, IERROR)
+ write (iout,*) 'Master is receiving IPATT array from processor:',
+ & iproc,' MSGLEN',msglen
+ write (*,*) 'Master is receiving IPATT array from processor:',
+ & iproc,' MSGLEN',msglen
+ call mpi_recv(ipatt(1,nthread+1),msglen,mpi_integer,iproc,
+ & ThreadType,
+ & mpi_comm_world,status,ierror)
+ write (iout,*) 'Master has received IPATT array from processor:',
+ & iproc,' MSGLEN=',msglen
+ write (*,*) 'Master has received IPATT array from processor:',
+ & iproc,' MSGLEN=',msglen
+ nthread_temp=ipatt(1,nthread+msglen/2)
+ nexcl_temp=ipatt(2,nthread+msglen/2)
+C Receive the energies.
+ call mpi_probe(iproc,EnerType,
+ & mpi_comm_world,status,ierr)
+ call MPI_GET_COUNT(STATUS, MPI_DOUBLE_PRECISION, MSGLEN, IERROR)
+ write (iout,*) 'Master is receiving energies from processor:',
+ & iproc,' MSGLEN=',MSGLEN
+ write (*,*) 'Master is receiving energies from processor:',
+ & iproc,' MSGLEN=',MSGLEN
+ call mpi_recv(ener(1,nthread+1),msglen,
+ & MPI_DOUBLE_PRECISION,iproc,
+ & EnerType,MPI_COMM_WORLD,status,ierror)
+ write (iout,*) 'Msglen=',Msglen
+ write (*,*) 'Msglen=',Msglen
+ write (iout,*) 'Master has received energies from processor',iproc
+ write (*,*) 'Master has received energies from processor',iproc
+ write (iout,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
+ write (*,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
+ do i=1,nthread_temp
+ do j=1,n_ene
+ ener0(j,nthread+i)=ener(j,nthread+nthread_temp+i)
+ enddo
+ enddo
+ max_time_for_thread_t=ener(1,nthread+2*nthread_temp+1)
+ ave_time_for_thread_t=ener(2,nthread+2*nthread_temp+1)
+ write (iout,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
+ write (iout,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
+ write (*,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
+ write (*,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
+ if (max_time_for_thread_t.gt.max_time_for_thread)
+ & max_time_for_thread=max_time_for_thread_t
+ ave_time_for_thread=(nthread*ave_time_for_thread+
+ & nthread_temp*ave_time_for_thread_t)/(nthread+nthread_temp)
+ nthread=nthread+nthread_temp
+ return
+ end
+#else
+ subroutine init_task
+ implicit real*8 (a-h,o-z)
+ include 'DIMENSIONS'
+ include 'COMMON.SETUP'
+ me=0
+ myrank=0
+ fg_rank=0
+ fg_size=1
+ nodes=1
+ nprocs=1
+ call initialize
+ call openunits
+ write (iout,'(80(1h*)/a/80(1h*))')
+ & 'United-residue force field calculation - serial job.'
+ return
+ end
+#endif