module MPI_ !----------------------------------------------------------------------------- use io_units use MPI_data implicit none !----------------------------------------------------------------------------- ! ! !----------------------------------------------------------------------------- contains !----------------------------------------------------------------------------- ! MP.F !----------------------------------------------------------------------------- #ifdef MPI subroutine init_task use control, only: initialize,getenv_loc use io_config, only: openunits use control_data, only: out1file include 'mpif.h' ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' ! include 'COMMON.SETUP' ! include 'COMMON.CONTROL' ! include 'COMMON.IOUNITS' logical :: lprn=.false. ! real*8 text1 /'group_i '/,text2/'group_f '/, ! & text3/'initialb'/,text4/'initiale'/, ! & text5/'openb'/,text6/'opene'/ integer,dimension(0:max_cg_procs) :: cgtasks !(0:max_cg_procs) character(len=3) :: cfgprocs integer :: cg_size,fg_size,fg_size1 !el local variables integer :: i,ierr,key real(kind=8) :: world_group,cg_group allocate(status(MPI_STATUS_SIZE)) ! start parallel processing print *,'Initializing MPI' print *,'MPI_STATUS_SIZE',MPI_STATUS_SIZE call mpi_init(ierr) write(2, *) "ierr",ierr call flush(iout) if (ierr.ne.0) then print *, ' cannot initialize MPI' stop endif ! determine # of nodes and current node call MPI_COMM_RANK( MPI_COMM_WORLD, me, ierr ) if (ierr.ne.0) then print *, ' cannot determine rank of all processes' call MPI_Finalize( MPI_COMM_WORLD, IERR ) stop endif call MPI_Comm_size( MPI_Comm_world, nodes, ierr ) if (ierr.ne.0) then print *, ' cannot determine number of processes' stop endif Nprocs=nodes MyRank=me ! Determine the number of "fine-grain" tasks print *,"Before getenv FGPROCS" call getenv_loc("FGPROCS",cfgprocs) print *,cfgprocs read (cfgprocs,'(i3)') nfgtasks if (nfgtasks.eq.0) nfgtasks=1 print *,nfgtasks print *,"Before getenv MAXFGPROCS" call getenv_loc("MAXGSPROCS",cfgprocs) print *,cfgprocs read (cfgprocs,'(i3)') max_gs_size if (max_gs_size.eq.0) max_gs_size=2 if (lprn) & print *,"Processor",me," nfgtasks",nfgtasks,& " max_gs_size",max_gs_size if (nfgtasks.eq.1) then CG_COMM = MPI_COMM_WORLD fg_size=1 fg_rank=0 nfgtasks1=1 fg_rank1=0 else nodes=nprocs/nfgtasks if (nfgtasks*nodes.ne.nprocs) then write (*,'(a)') 'ERROR: Number of processors assigned',& ' to coarse-grained tasks must be divisor',& ' of the total number of processors.' call MPI_Finalize( MPI_COMM_WORLD, IERR ) stop endif ! Put the ranks of coarse-grain processes in one table and create ! the respective communicator. The processes with ranks "in between" ! the ranks of CG processes will perform fine graining for the CG ! process with the next lower rank. !el allocate(cgtasks(0:nodes)) do i=0,nprocs-1,nfgtasks cgtasks(i/nfgtasks)=i enddo if (lprn) then print*,"Processor",me," cgtasks",(cgtasks(i),i=0,nodes-1) ! print "(a,i5,a)","Processor",myrank," Before MPI_Comm_group" endif ! call memmon_print_usage() call MPI_Comm_group(MPI_COMM_WORLD,world_group,IERR) call MPI_Group_incl(world_group,nodes,cgtasks,cg_group,IERR) call MPI_Comm_create(MPI_COMM_WORLD,cg_group,CG_COMM,IERR) call MPI_Group_rank(cg_group,me,ierr) call MPI_Group_free(world_group,ierr) call MPI_Group_free(cg_group,ierr) ! print "(a,i5,a)","Processor",myrank," After MPI_Comm_group" ! call memmon_print_usage() if (me.ne.MPI_UNDEFINED) call MPI_Comm_Rank(CG_COMM,me,ierr) if (lprn) print *," Processor",myrank," CG rank",me ! Create communicators containig processes doing "fine grain" tasks. ! The processes within each FG_COMM should have fast communication. kolor=MyRank/nfgtasks key=mod(MyRank,nfgtasks) call MPI_Comm_split(MPI_COMM_WORLD,kolor,key,FG_COMM,ierr) call MPI_Comm_size(FG_COMM,fg_size,ierr) if (fg_size.ne.nfgtasks) then write (*,*) "OOOOps... the number of fg tasks is",fg_size,& " but",nfgtasks," was requested. MyRank=",MyRank endif call MPI_Comm_rank(FG_COMM,fg_rank,ierr) if (fg_size.gt.max_gs_size) then kolor1=fg_rank/max_gs_size key1=mod(fg_rank,max_gs_size) call MPI_Comm_split(FG_COMM,kolor1,key1,FG_COMM1,ierr) call MPI_Comm_size(FG_COMM1,nfgtasks1,ierr) call MPI_Comm_rank(FG_COMM1,fg_rank1,ierr) else FG_COMM1=FG_COMM nfgtasks1=nfgtasks fg_rank1=fg_rank endif endif if (lprn) then if (fg_rank.eq.0) then write (*,*) "Processor",MyRank," out of",nprocs,& " rank in CG_COMM",me," size of CG_COMM",nodes,& " size of FG_COMM",fg_size,& " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1 else write (*,*) "Processor",MyRank," out of",nprocs,& " rank in FG_COMM",fg_rank," size of FG_COMM",fg_size,& " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1 endif endif ! Initialize other variables. ! print '(a)','Before initialize' ! call memmon_print_usage() call initialize ! print '(a,i5,a)','Processor',myrank,' After initialize' ! call memmon_print_usage() ! Open task-dependent files. ! print '(a,i5,a)','Processor',myrank,' Before openunits' ! call memmon_print_usage() call openunits ! print '(a,i5,a)','Processor',myrank,' After openunits' ! call memmon_print_usage() if (me.eq.king .or. fg_rank.eq.0 .and. .not. out1file) & write (iout,'(80(1h*)/a/80(1h*))') & 'United-residue force field calculation - parallel job.' ! print *,"Processor",myrank," exited OPENUNITS" !el deallocate(cgtasks) return end subroutine init_task !----------------------------------------------------------------------------- subroutine finish_task ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' ! use energy use io_base, only:ilen,move_from_tmp use MD_data, only: mdpdb,ntwe use REMD_data, only: restart1file,traj1file use control_data include 'mpif.h' ! use MD ! include 'COMMON.SETUP' ! include 'COMMON.CONTROL' ! include 'COMMON.REMD' ! include 'COMMON.IOUNITS' ! include 'COMMON.FFIELD' ! include 'COMMON.TIME1' ! include 'COMMON.MD' !el integer ilen !el external ilen !el local variables integer :: IERROR,ierr real(kind=8) :: time1 ! call MPI_Barrier(CG_COMM,ierr) if (nfgtasks.gt.1) & call MPI_Bcast(-1,1,MPI_INTEGER,king,FG_COMM,IERROR) time1=MPI_WTIME() ! if (me.eq.king .or. .not. out1file) then write (iout,'(a,i4,a)') 'CG processor',me,' is finishing work.' write (iout,*) 'Total wall clock time',time1-walltime,' sec' if (nfgtasks.gt.1) then write (iout,'(80(1h=)/a/(80(1h=)))') & "Details of FG communication time" write (iout,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))') & "BROADCAST:",time_bcast,"REDUCE:",time_reduce,& "GATHER:",time_gather,& "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,& "BARRIER ene",time_barrier_e,& "BARRIER grad",time_barrier_g,"TOTAL:",& time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv & +time_barrier_e+time_barrier_g write (*,*) 'Total wall clock time',time1-walltime,' sec' write (*,*) "Processor",me," BROADCAST time",time_bcast,& " REDUCE time",& time_reduce," GATHER time",time_gather," SCATTER time",& time_scatter," SENDRECV",time_sendrecv,& " BARRIER ene",time_barrier_e," BARRIER grad",time_barrier_g endif ! endif write (*,'(a,i4,a)') 'CG processor',me,' is finishing work.' if (ilen(tmpdir).gt.0) then write (*,*) "Processor",me,& ": moving output files to the parent directory..." close(inp) close(istat,status='keep') if (ntwe.gt.0) call move_from_tmp(statname) close(irest2,status='keep') if (modecalc.eq.12.or. & (modecalc.eq.14 .and. .not.restart1file)) then call move_from_tmp(rest2name) else if (modecalc.eq.14.and. me.eq.king) then call move_from_tmp(mremd_rst_name) endif if (mdpdb) then close(ipdb,status='keep') call move_from_tmp(pdbname) else if (me.eq.king .or. .not.traj1file) then close(icart,status='keep') call move_from_tmp(cartname) endif if (me.eq.king .or. .not. out1file) then close (iout,status='keep') call move_from_tmp(outname) endif endif return end subroutine finish_task !----------------------------------------------------------------------------- subroutine pattern_receive ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' use compare_data, only:nexcl,iexam include 'mpif.h' ! include 'COMMON.SETUP' ! include 'COMMON.THREAD' ! include 'COMMON.IOUNITS' integer :: tag,status(MPI_STATUS_SIZE) integer :: source,ThreadType logical :: flag integer :: ierr,ireq,iproc ThreadType=45 source=mpi_any_source call mpi_iprobe(source,ThreadType,& CG_COMM,flag,status,ierr) do while (flag) write (iout,*) 'Processor ',Me,' is receiving threading',& ' pattern from processor',status(mpi_source) write (*,*) 'Processor ',Me,' is receiving threading',& ' pattern from processor',status(mpi_source) nexcl=nexcl+1 call mpi_irecv(iexam(1,nexcl),2,mpi_integer,status(mpi_source),& ThreadType, CG_COMM,ireq,ierr) write (iout,*) 'Received pattern:',nexcl,iexam(1,nexcl),& iexam(2,nexcl) source=mpi_any_source call mpi_iprobe(source,ThreadType,& CG_COMM,flag,status,ierr) enddo return end subroutine pattern_receive !----------------------------------------------------------------------------- subroutine pattern_send ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' use compare_data, only:nexcl,iexam include 'mpif.h' ! include 'COMMON.INFO' ! include 'COMMON.THREAD' ! include 'COMMON.IOUNITS' integer :: source,ThreadType,ireq,iproc,ierr ThreadType=45 do iproc=0,nprocs-1 if (iproc.ne.me .and. .not.Koniec(iproc) ) then call mpi_isend(iexam(1,nexcl),2,mpi_integer,iproc,& ThreadType, CG_COMM, ireq, ierr) write (iout,*) 'CG processor ',me,' has sent pattern ',& 'to processor',iproc write (*,*) 'CG processor ',me,' has sent pattern ',& 'to processor',iproc write (iout,*) 'Pattern:',nexcl,iexam(1,nexcl),iexam(2,nexcl) endif enddo end subroutine pattern_send !----------------------------------------------------------------------------- subroutine send_stop_sig(Kwita) ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' include 'mpif.h' ! include 'COMMON.INFO' ! include 'COMMON.IOUNITS' integer :: StopType,StopId,iproc,Kwita,NBytes integer :: ierr StopType=66 ! Kwita=-1 ! print *,'CG processor',me,' StopType=',StopType Koniec(me)=.true. if (me.eq.king) then ! Master sends the STOP signal to everybody. write (iout,'(a,a)') & 'Master is sending STOP signal to other processors.' do iproc=1,nprocs-1 print *,'Koniec(',iproc,')=',Koniec(iproc) if (.not. Koniec(iproc)) then call mpi_send(Kwita,1,mpi_integer,iproc,StopType,& mpi_comm_world,ierr) write (iout,*) 'Iproc=',iproc,' StopID=',StopID write (*,*) 'Iproc=',iproc,' StopID=',StopID endif enddo else ! Else send the STOP signal to Master. call mpi_send(Kwita,1,mpi_integer,MasterID,StopType,& mpi_comm_world,ierr) write (iout,*) 'CG processor=',me,' StopID=',StopID write (*,*) 'CG processor=',me,' StopID=',StopID endif return end subroutine send_stop_sig !----------------------------------------------------------------------------- subroutine recv_stop_sig(Kwita) ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' include 'mpif.h' ! include 'COMMON.INFO' ! include 'COMMON.IOUNITS' integer :: source,StopType,StopId,iproc,Kwita,ireq,ierr logical :: flag StopType=66 Kwita=0 source=mpi_any_source allocate(koniec(0:nprocs)) !(0:maxprocs-1) ! print *,'CG processor:',me,' StopType=',StopType call mpi_iprobe(source,StopType,& mpi_comm_world,flag,status,ierr) do while (flag) Koniec(status(mpi_source))=.true. write (iout,*) 'CG processor ',me,' is receiving STOP signal',& ' from processor',status(mpi_source) write (*,*) 'CG processor ',me,' is receiving STOP signal',& ' from processor',status(mpi_source) call mpi_irecv(Kwita,1,mpi_integer,status(mpi_source),StopType,& mpi_comm_world,ireq,ierr) call mpi_iprobe(source,StopType,& mpi_comm_world,flag,status,ierr) enddo return end subroutine recv_stop_sig !----------------------------------------------------------------------------- subroutine send_MCM_info(ione) ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' include 'mpif.h' ! include 'COMMON.SETUP' ! include 'COMMON.MCM' ! include 'COMMON.IOUNITS' integer :: tag,status(MPI_STATUS_SIZE) integer :: MCM_info_Type,MCM_info_ID,iproc,one,NBytes !el common /aaaa/ isend,irecv integer :: nsend,ione,ierr,isend,irecv save nsend nsend=nsend+1 MCM_info_Type=77 !cd write (iout,'(a,i4,a)') 'CG Processor',me, !cd & ' is sending MCM info to Master.' write (*,'(a,i4,a,i8)') 'CG processor',me,& ' is sending MCM info to Master, MCM_info_ID=',MCM_info_ID call mpi_isend(ione,1,mpi_integer,MasterID,& MCM_info_Type,mpi_comm_world, MCM_info_ID, ierr) !cd write (iout,*) 'CG processor',me,' has sent info to the master;', !cd & ' MCM_info_ID=',MCM_info_ID write (*,*) 'CG processor',me,' has sent info to the master;',& ' MCM_info_ID=',MCM_info_ID,' ierr ',ierr isend=0 irecv=0 return end subroutine send_MCM_info !----------------------------------------------------------------------------- subroutine receive_MCM_info ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' use names use MCM_data, only:nacc_tot,nsave_part use compare_data, only:nthread,nexcl,ipatt include 'mpif.h' ! include 'COMMON.SETUP' ! include 'COMMON.MCM' ! include 'COMMON.IOUNITS' integer :: tag,status(MPI_STATUS_SIZE) integer :: source,MCM_info_Type,MCM_info_ID,iproc,ione logical :: flag integer :: itask,ierr MCM_info_Type=77 source=mpi_any_source ! print *,'source=',source,' dontcare=',dontcare call mpi_iprobe(source,MCM_info_Type,& mpi_comm_world,flag,status,ierr) do while (flag) source=status(mpi_source) itask=source/fgProcs+1 !d write (iout,*) 'Master is receiving MCM info from processor ',& !d source,' itask',itask write (*,*) 'Master is receiving MCM info from processor ',& source,' itask',itask call mpi_irecv(ione,1,mpi_integer,source,MCM_info_type,& mpi_comm_world,MCM_info_ID,ierr) !d write (iout,*) 'Received from processor',source,' IONE=',ione write (*,*) 'Received from processor',source,' IONE=',ione nacc_tot=nacc_tot+1 if (ione.eq.2) nsave_part(itask)=nsave_part(itask)+1 !cd print *,'nsave_part(',itask,')=',nsave_part(itask) !cd write (iout,*) 'Nacc_tot=',Nacc_tot !cd write (*,*) 'Nacc_tot=',Nacc_tot source=mpi_any_source call mpi_iprobe(source,MCM_info_Type,& mpi_comm_world,flag,status,ierr) enddo return end subroutine receive_MCM_info !----------------------------------------------------------------------------- subroutine send_thread_results ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' use names use compare_data include 'mpif.h' ! include 'COMMON.SETUP' ! include 'COMMON.THREAD' ! include 'COMMON.IOUNITS' integer :: tag,status(MPI_STATUS_SIZE) integer :: ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,& EnerID,msglen,nbytes real(kind=8) :: buffer(20*maxthread+2) integer :: i,j,ierror ThreadType=444 EnerType=555 ipatt(1,nthread+1)=nthread ipatt(2,nthread+1)=nexcl do i=1,nthread do j=1,n_ene ener(j,i+nthread)=ener0(j,i) enddo enddo ener(1,2*nthread+1)=max_time_for_thread ener(2,2*nthread+1)=ave_time_for_thread ! Send the IPATT array write (iout,*) 'CG processor',me,& ' is sending IPATT array to master: NTHREAD=',nthread write (*,*) 'CG processor',me,& ' is sending IPATT array to master: NTHREAD=',nthread msglen=2*nthread+2 call mpi_send(ipatt(1,1),msglen,MPI_INTEGER,MasterID,& ThreadType,mpi_comm_world,ierror) write (iout,*) 'CG processor',me,& ' has sent IPATT array to master MSGLEN',msglen write (*,*) 'CG processor',me,& ' has sent IPATT array to master MSGLEN',msglen ! Send the energies. msglen=n_ene2*nthread+2 write (iout,*) 'CG processor',me,' is sending energies to master.' write (*,*) 'CG processor',me,' is sending energies to master.' call mpi_send(ener(1,1),msglen,MPI_DOUBLE_PRECISION,MasterID,& EnerType,mpi_comm_world,ierror) write (iout,*) 'CG processor',me,' has sent energies to master.' write (*,*) 'CG processor',me,' has sent energies to master.' return end subroutine send_thread_results !----------------------------------------------------------------------------- subroutine receive_thread_results(iproc) ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' use names use compare_data include 'mpif.h' ! include 'COMMON.INFO' ! include 'COMMON.THREAD' ! include 'COMMON.IOUNITS' integer :: ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,& EnerID,ReadyType,ReadyID,Ready,msglen,nbytes,nthread_temp real(kind=8) :: buffer(20*maxthread+2),max_time_for_thread_t,& ave_time_for_thread_t logical :: flag integer :: iproc,ierr,ierror,i,j real(kind=8) :: nexcl_temp ThreadType=444 EnerType=555 ! Receive the IPATT array call mpi_probe(iproc,ThreadType,& mpi_comm_world,status,ierr) call MPI_GET_COUNT(STATUS, MPI_INTEGER, MSGLEN, IERROR) write (iout,*) 'Master is receiving IPATT array from processor:',& iproc,' MSGLEN',msglen write (*,*) 'Master is receiving IPATT array from processor:',& iproc,' MSGLEN',msglen call mpi_recv(ipatt(1,nthread+1),msglen,mpi_integer,iproc,& ThreadType,& mpi_comm_world,status,ierror) write (iout,*) 'Master has received IPATT array from processor:',& iproc,' MSGLEN=',msglen write (*,*) 'Master has received IPATT array from processor:',& iproc,' MSGLEN=',msglen nthread_temp=ipatt(1,nthread+msglen/2) nexcl_temp=ipatt(2,nthread+msglen/2) ! Receive the energies. call mpi_probe(iproc,EnerType,& mpi_comm_world,status,ierr) call MPI_GET_COUNT(STATUS, MPI_DOUBLE_PRECISION, MSGLEN, IERROR) write (iout,*) 'Master is receiving energies from processor:',& iproc,' MSGLEN=',MSGLEN write (*,*) 'Master is receiving energies from processor:',& iproc,' MSGLEN=',MSGLEN call mpi_recv(ener(1,nthread+1),msglen,& MPI_DOUBLE_PRECISION,iproc,& EnerType,MPI_COMM_WORLD,status,ierror) write (iout,*) 'Msglen=',Msglen write (*,*) 'Msglen=',Msglen write (iout,*) 'Master has received energies from processor',iproc write (*,*) 'Master has received energies from processor',iproc write (iout,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp write (*,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp do i=1,nthread_temp do j=1,n_ene ener0(j,nthread+i)=ener(j,nthread+nthread_temp+i) enddo enddo max_time_for_thread_t=ener(1,nthread+2*nthread_temp+1) ave_time_for_thread_t=ener(2,nthread+2*nthread_temp+1) write (iout,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t write (iout,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t write (*,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t write (*,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t if (max_time_for_thread_t.gt.max_time_for_thread) & max_time_for_thread=max_time_for_thread_t ave_time_for_thread=(nthread*ave_time_for_thread+ & nthread_temp*ave_time_for_thread_t)/(nthread+nthread_temp) nthread=nthread+nthread_temp return end subroutine receive_thread_results !----------------------------------------------------------------------------- #else subroutine init_task use control, only: initialize,getenv_loc use io_config, only: openunits ! implicit real*8 (a-h,o-z) ! include 'DIMENSIONS' ! include 'COMMON.SETUP' integer :: fg_size me=0 myrank=0 fg_rank=0 fg_size=1 nodes=1 nprocs=1 call initialize call openunits write (iout,'(80(1h*)/a/80(1h*))') & 'United-residue force field calculation - serial job.' return end subroutine init_task #endif !----------------------------------------------------------------------------- !----------------------------------------------------------------------------- end module MPI_