2 !-----------------------------------------------------------------------------
6 !-----------------------------------------------------------------------------
9 !-----------------------------------------------------------------------------
11 !-----------------------------------------------------------------------------
13 !-----------------------------------------------------------------------------
17 use control, only: initialize,getenv_loc
18 use io_config, only: openunits
19 use control_data, only: out1file
21 ! implicit real*8 (a-h,o-z)
22 ! include 'DIMENSIONS'
23 ! include 'COMMON.SETUP'
24 ! include 'COMMON.CONTROL'
25 ! include 'COMMON.IOUNITS'
26 logical :: lprn=.false.
27 ! real*8 text1 /'group_i '/,text2/'group_f '/,
28 ! & text3/'initialb'/,text4/'initiale'/,
29 ! & text5/'openb'/,text6/'opene'/
30 integer,dimension(0:max_cg_procs) :: cgtasks !(0:max_cg_procs)
31 character(len=3) :: cfgprocs
32 integer :: cg_size,fg_size,fg_size1
35 real(kind=8) :: world_group,cg_group
37 allocate(status(MPI_STATUS_SIZE))
39 ! start parallel processing
40 print *,'Initializing MPI'
41 print *,'MPI_STATUS_SIZE',MPI_STATUS_SIZE
43 write(2, *) "ierr",ierr
46 print *, ' cannot initialize MPI'
49 ! determine # of nodes and current node
50 call MPI_COMM_RANK( MPI_COMM_WORLD, me, ierr )
52 print *, ' cannot determine rank of all processes'
53 call MPI_Finalize( MPI_COMM_WORLD, IERR )
56 call MPI_Comm_size( MPI_Comm_world, nodes, ierr )
58 print *, ' cannot determine number of processes'
63 ! Determine the number of "fine-grain" tasks
64 print *,"Before getenv FGPROCS"
65 call getenv_loc("FGPROCS",cfgprocs)
67 read (cfgprocs,'(i3)') nfgtasks
68 if (nfgtasks.eq.0) nfgtasks=1
70 print *,"Before getenv MAXFGPROCS"
71 call getenv_loc("MAXGSPROCS",cfgprocs)
73 read (cfgprocs,'(i3)') max_gs_size
74 if (max_gs_size.eq.0) max_gs_size=2
76 print *,"Processor",me," nfgtasks",nfgtasks,&
77 " max_gs_size",max_gs_size
78 if (nfgtasks.eq.1) then
79 CG_COMM = MPI_COMM_WORLD
86 if (nfgtasks*nodes.ne.nprocs) then
87 write (*,'(a)') 'ERROR: Number of processors assigned',&
88 ' to coarse-grained tasks must be divisor',&
89 ' of the total number of processors.'
90 call MPI_Finalize( MPI_COMM_WORLD, IERR )
93 ! Put the ranks of coarse-grain processes in one table and create
94 ! the respective communicator. The processes with ranks "in between"
95 ! the ranks of CG processes will perform fine graining for the CG
96 ! process with the next lower rank.
97 !el allocate(cgtasks(0:nodes))
98 do i=0,nprocs-1,nfgtasks
102 print*,"Processor",me," cgtasks",(cgtasks(i),i=0,nodes-1)
103 ! print "(a,i5,a)","Processor",myrank," Before MPI_Comm_group"
105 ! call memmon_print_usage()
106 call MPI_Comm_group(MPI_COMM_WORLD,world_group,IERR)
107 call MPI_Group_incl(world_group,nodes,cgtasks,cg_group,IERR)
108 call MPI_Comm_create(MPI_COMM_WORLD,cg_group,CG_COMM,IERR)
109 call MPI_Group_rank(cg_group,me,ierr)
110 call MPI_Group_free(world_group,ierr)
111 call MPI_Group_free(cg_group,ierr)
112 ! print "(a,i5,a)","Processor",myrank," After MPI_Comm_group"
113 ! call memmon_print_usage()
114 if (me.ne.MPI_UNDEFINED) call MPI_Comm_Rank(CG_COMM,me,ierr)
115 if (lprn) print *," Processor",myrank," CG rank",me
116 ! Create communicators containig processes doing "fine grain" tasks.
117 ! The processes within each FG_COMM should have fast communication.
118 kolor=MyRank/nfgtasks
119 key=mod(MyRank,nfgtasks)
120 call MPI_Comm_split(MPI_COMM_WORLD,kolor,key,FG_COMM,ierr)
121 call MPI_Comm_size(FG_COMM,fg_size,ierr)
122 if (fg_size.ne.nfgtasks) then
123 write (*,*) "OOOOps... the number of fg tasks is",fg_size,&
124 " but",nfgtasks," was requested. MyRank=",MyRank
126 call MPI_Comm_rank(FG_COMM,fg_rank,ierr)
127 if (fg_size.gt.max_gs_size) then
128 kolor1=fg_rank/max_gs_size
129 key1=mod(fg_rank,max_gs_size)
130 call MPI_Comm_split(FG_COMM,kolor1,key1,FG_COMM1,ierr)
131 call MPI_Comm_size(FG_COMM1,nfgtasks1,ierr)
132 call MPI_Comm_rank(FG_COMM1,fg_rank1,ierr)
140 if (fg_rank.eq.0) then
141 write (*,*) "Processor",MyRank," out of",nprocs,&
142 " rank in CG_COMM",me," size of CG_COMM",nodes,&
143 " size of FG_COMM",fg_size,&
144 " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
146 write (*,*) "Processor",MyRank," out of",nprocs,&
147 " rank in FG_COMM",fg_rank," size of FG_COMM",fg_size,&
148 " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
151 ! Initialize other variables.
152 ! print '(a)','Before initialize'
153 ! call memmon_print_usage()
155 ! print '(a,i5,a)','Processor',myrank,' After initialize'
156 ! call memmon_print_usage()
157 ! Open task-dependent files.
158 ! print '(a,i5,a)','Processor',myrank,' Before openunits'
159 ! call memmon_print_usage()
161 ! print '(a,i5,a)','Processor',myrank,' After openunits'
162 ! call memmon_print_usage()
163 if (me.eq.king .or. fg_rank.eq.0 .and. .not. out1file) &
164 write (iout,'(80(1h*)/a/80(1h*))') &
165 'United-residue force field calculation - parallel job.'
166 ! print *,"Processor",myrank," exited OPENUNITS"
167 !el deallocate(cgtasks)
169 end subroutine init_task
170 !-----------------------------------------------------------------------------
171 subroutine finish_task
173 ! implicit real*8 (a-h,o-z)
174 ! include 'DIMENSIONS'
176 use io_base, only:ilen,move_from_tmp
177 use MD_data, only: mdpdb,ntwe
178 use REMD_data, only: restart1file,traj1file
182 ! include 'COMMON.SETUP'
183 ! include 'COMMON.CONTROL'
184 ! include 'COMMON.REMD'
185 ! include 'COMMON.IOUNITS'
186 ! include 'COMMON.FFIELD'
187 ! include 'COMMON.TIME1'
188 ! include 'COMMON.MD'
192 integer :: IERROR,ierr
193 real(kind=8) :: time1
195 call MPI_Barrier(CG_COMM,ierr)
197 call MPI_Bcast(-1,1,MPI_INTEGER,king,FG_COMM,IERROR)
199 ! if (me.eq.king .or. .not. out1file) then
200 write (iout,'(a,i4,a)') 'CG processor',me,' is finishing work.'
201 write (iout,*) 'Total wall clock time',time1-walltime,' sec'
202 if (nfgtasks.gt.1) then
203 write (iout,'(80(1h=)/a/(80(1h=)))') &
204 "Details of FG communication time"
205 write (iout,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))') &
206 "BROADCAST:",time_bcast,"REDUCE:",time_reduce,&
207 "GATHER:",time_gather,&
208 "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,&
209 "BARRIER ene",time_barrier_e,&
210 "BARRIER grad",time_barrier_g,"TOTAL:",&
211 time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv &
212 +time_barrier_e+time_barrier_g
213 write (*,*) 'Total wall clock time',time1-walltime,' sec'
214 write (*,*) "Processor",me," BROADCAST time",time_bcast,&
216 time_reduce," GATHER time",time_gather," SCATTER time",&
217 time_scatter," SENDRECV",time_sendrecv,&
218 " BARRIER ene",time_barrier_e," BARRIER grad",time_barrier_g
221 write (*,'(a,i4,a)') 'CG processor',me,' is finishing work.'
222 if (ilen(tmpdir).gt.0) then
223 write (*,*) "Processor",me,&
224 ": moving output files to the parent directory..."
226 close(istat,status='keep')
227 if (ntwe.gt.0) call move_from_tmp(statname)
228 close(irest2,status='keep')
229 if (modecalc.eq.12.or. &
230 (modecalc.eq.14 .and. .not.restart1file)) then
231 call move_from_tmp(rest2name)
232 else if (modecalc.eq.14.and. me.eq.king) then
233 call move_from_tmp(mremd_rst_name)
236 close(ipdb,status='keep')
237 call move_from_tmp(pdbname)
238 else if (me.eq.king .or. .not.traj1file) then
239 close(icart,status='keep')
240 call move_from_tmp(cartname)
242 if (me.eq.king .or. .not. out1file) then
243 close (iout,status='keep')
244 call move_from_tmp(outname)
248 end subroutine finish_task
249 !-----------------------------------------------------------------------------
250 subroutine pattern_receive
252 ! implicit real*8 (a-h,o-z)
253 ! include 'DIMENSIONS'
254 use compare_data, only:nexcl,iexam
256 ! include 'COMMON.SETUP'
257 ! include 'COMMON.THREAD'
258 ! include 'COMMON.IOUNITS'
259 integer :: tag,status(MPI_STATUS_SIZE)
260 integer :: source,ThreadType
262 integer :: ierr,ireq,iproc
264 source=mpi_any_source
265 call mpi_iprobe(source,ThreadType,&
266 CG_COMM,flag,status,ierr)
268 write (iout,*) 'Processor ',Me,' is receiving threading',&
269 ' pattern from processor',status(mpi_source)
270 write (*,*) 'Processor ',Me,' is receiving threading',&
271 ' pattern from processor',status(mpi_source)
273 call mpi_irecv(iexam(1,nexcl),2,mpi_integer,status(mpi_source),&
274 ThreadType, CG_COMM,ireq,ierr)
275 write (iout,*) 'Received pattern:',nexcl,iexam(1,nexcl),&
277 source=mpi_any_source
278 call mpi_iprobe(source,ThreadType,&
279 CG_COMM,flag,status,ierr)
282 end subroutine pattern_receive
283 !-----------------------------------------------------------------------------
284 subroutine pattern_send
286 ! implicit real*8 (a-h,o-z)
287 ! include 'DIMENSIONS'
288 use compare_data, only:nexcl,iexam
290 ! include 'COMMON.INFO'
291 ! include 'COMMON.THREAD'
292 ! include 'COMMON.IOUNITS'
293 integer :: source,ThreadType,ireq,iproc,ierr
296 if (iproc.ne.me .and. .not.Koniec(iproc) ) then
297 call mpi_isend(iexam(1,nexcl),2,mpi_integer,iproc,&
298 ThreadType, CG_COMM, ireq, ierr)
299 write (iout,*) 'CG processor ',me,' has sent pattern ',&
301 write (*,*) 'CG processor ',me,' has sent pattern ',&
303 write (iout,*) 'Pattern:',nexcl,iexam(1,nexcl),iexam(2,nexcl)
306 end subroutine pattern_send
307 !-----------------------------------------------------------------------------
308 subroutine send_stop_sig(Kwita)
310 ! implicit real*8 (a-h,o-z)
311 ! include 'DIMENSIONS'
313 ! include 'COMMON.INFO'
314 ! include 'COMMON.IOUNITS'
315 integer :: StopType,StopId,iproc,Kwita,NBytes
319 ! print *,'CG processor',me,' StopType=',StopType
322 ! Master sends the STOP signal to everybody.
323 write (iout,'(a,a)') &
324 'Master is sending STOP signal to other processors.'
326 print *,'Koniec(',iproc,')=',Koniec(iproc)
327 if (.not. Koniec(iproc)) then
328 call mpi_send(Kwita,1,mpi_integer,iproc,StopType,&
330 write (iout,*) 'Iproc=',iproc,' StopID=',StopID
331 write (*,*) 'Iproc=',iproc,' StopID=',StopID
335 ! Else send the STOP signal to Master.
336 call mpi_send(Kwita,1,mpi_integer,MasterID,StopType,&
338 write (iout,*) 'CG processor=',me,' StopID=',StopID
339 write (*,*) 'CG processor=',me,' StopID=',StopID
342 end subroutine send_stop_sig
343 !-----------------------------------------------------------------------------
344 subroutine recv_stop_sig(Kwita)
345 ! implicit real*8 (a-h,o-z)
346 ! include 'DIMENSIONS'
348 ! include 'COMMON.INFO'
349 ! include 'COMMON.IOUNITS'
350 integer :: source,StopType,StopId,iproc,Kwita,ireq,ierr
355 source=mpi_any_source
356 allocate(koniec(0:nprocs)) !(0:maxprocs-1)
358 ! print *,'CG processor:',me,' StopType=',StopType
359 call mpi_iprobe(source,StopType,&
360 mpi_comm_world,flag,status,ierr)
362 Koniec(status(mpi_source))=.true.
363 write (iout,*) 'CG processor ',me,' is receiving STOP signal',&
364 ' from processor',status(mpi_source)
365 write (*,*) 'CG processor ',me,' is receiving STOP signal',&
366 ' from processor',status(mpi_source)
367 call mpi_irecv(Kwita,1,mpi_integer,status(mpi_source),StopType,&
368 mpi_comm_world,ireq,ierr)
369 call mpi_iprobe(source,StopType,&
370 mpi_comm_world,flag,status,ierr)
373 end subroutine recv_stop_sig
374 !-----------------------------------------------------------------------------
375 subroutine send_MCM_info(ione)
377 ! implicit real*8 (a-h,o-z)
378 ! include 'DIMENSIONS'
380 ! include 'COMMON.SETUP'
381 ! include 'COMMON.MCM'
382 ! include 'COMMON.IOUNITS'
383 integer :: tag,status(MPI_STATUS_SIZE)
384 integer :: MCM_info_Type,MCM_info_ID,iproc,one,NBytes
385 !el common /aaaa/ isend,irecv
386 integer :: nsend,ione,ierr,isend,irecv
390 !cd write (iout,'(a,i4,a)') 'CG Processor',me,
391 !cd & ' is sending MCM info to Master.'
392 write (*,'(a,i4,a,i8)') 'CG processor',me,&
393 ' is sending MCM info to Master, MCM_info_ID=',MCM_info_ID
394 call mpi_isend(ione,1,mpi_integer,MasterID,&
395 MCM_info_Type,mpi_comm_world, MCM_info_ID, ierr)
396 !cd write (iout,*) 'CG processor',me,' has sent info to the master;',
397 !cd & ' MCM_info_ID=',MCM_info_ID
398 write (*,*) 'CG processor',me,' has sent info to the master;',&
399 ' MCM_info_ID=',MCM_info_ID,' ierr ',ierr
403 end subroutine send_MCM_info
404 !-----------------------------------------------------------------------------
405 subroutine receive_MCM_info
407 ! implicit real*8 (a-h,o-z)
408 ! include 'DIMENSIONS'
410 use MCM_data, only:nacc_tot,nsave_part
411 use compare_data, only:nthread,nexcl,ipatt
413 ! include 'COMMON.SETUP'
414 ! include 'COMMON.MCM'
415 ! include 'COMMON.IOUNITS'
416 integer :: tag,status(MPI_STATUS_SIZE)
417 integer :: source,MCM_info_Type,MCM_info_ID,iproc,ione
419 integer :: itask,ierr
421 source=mpi_any_source
422 ! print *,'source=',source,' dontcare=',dontcare
423 call mpi_iprobe(source,MCM_info_Type,&
424 mpi_comm_world,flag,status,ierr)
426 source=status(mpi_source)
427 itask=source/fgProcs+1
428 !d write (iout,*) 'Master is receiving MCM info from processor ',&
429 !d source,' itask',itask
430 write (*,*) 'Master is receiving MCM info from processor ',&
431 source,' itask',itask
432 call mpi_irecv(ione,1,mpi_integer,source,MCM_info_type,&
433 mpi_comm_world,MCM_info_ID,ierr)
434 !d write (iout,*) 'Received from processor',source,' IONE=',ione
435 write (*,*) 'Received from processor',source,' IONE=',ione
437 if (ione.eq.2) nsave_part(itask)=nsave_part(itask)+1
438 !cd print *,'nsave_part(',itask,')=',nsave_part(itask)
439 !cd write (iout,*) 'Nacc_tot=',Nacc_tot
440 !cd write (*,*) 'Nacc_tot=',Nacc_tot
441 source=mpi_any_source
442 call mpi_iprobe(source,MCM_info_Type,&
443 mpi_comm_world,flag,status,ierr)
446 end subroutine receive_MCM_info
447 !-----------------------------------------------------------------------------
448 subroutine send_thread_results
450 ! implicit real*8 (a-h,o-z)
451 ! include 'DIMENSIONS'
455 ! include 'COMMON.SETUP'
456 ! include 'COMMON.THREAD'
457 ! include 'COMMON.IOUNITS'
458 integer :: tag,status(MPI_STATUS_SIZE)
459 integer :: ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,&
461 real(kind=8) :: buffer(20*maxthread+2)
462 integer :: i,j,ierror
465 ipatt(1,nthread+1)=nthread
466 ipatt(2,nthread+1)=nexcl
469 ener(j,i+nthread)=ener0(j,i)
472 ener(1,2*nthread+1)=max_time_for_thread
473 ener(2,2*nthread+1)=ave_time_for_thread
474 ! Send the IPATT array
475 write (iout,*) 'CG processor',me,&
476 ' is sending IPATT array to master: NTHREAD=',nthread
477 write (*,*) 'CG processor',me,&
478 ' is sending IPATT array to master: NTHREAD=',nthread
480 call mpi_send(ipatt(1,1),msglen,MPI_INTEGER,MasterID,&
481 ThreadType,mpi_comm_world,ierror)
482 write (iout,*) 'CG processor',me,&
483 ' has sent IPATT array to master MSGLEN',msglen
484 write (*,*) 'CG processor',me,&
485 ' has sent IPATT array to master MSGLEN',msglen
487 msglen=n_ene2*nthread+2
488 write (iout,*) 'CG processor',me,' is sending energies to master.'
489 write (*,*) 'CG processor',me,' is sending energies to master.'
490 call mpi_send(ener(1,1),msglen,MPI_DOUBLE_PRECISION,MasterID,&
491 EnerType,mpi_comm_world,ierror)
492 write (iout,*) 'CG processor',me,' has sent energies to master.'
493 write (*,*) 'CG processor',me,' has sent energies to master.'
495 end subroutine send_thread_results
496 !-----------------------------------------------------------------------------
497 subroutine receive_thread_results(iproc)
499 ! implicit real*8 (a-h,o-z)
500 ! include 'DIMENSIONS'
504 ! include 'COMMON.INFO'
505 ! include 'COMMON.THREAD'
506 ! include 'COMMON.IOUNITS'
507 integer :: ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,&
508 EnerID,ReadyType,ReadyID,Ready,msglen,nbytes,nthread_temp
509 real(kind=8) :: buffer(20*maxthread+2),max_time_for_thread_t,&
510 ave_time_for_thread_t
512 integer :: iproc,ierr,ierror,i,j
513 real(kind=8) :: nexcl_temp
516 ! Receive the IPATT array
517 call mpi_probe(iproc,ThreadType,&
518 mpi_comm_world,status,ierr)
519 call MPI_GET_COUNT(STATUS, MPI_INTEGER, MSGLEN, IERROR)
520 write (iout,*) 'Master is receiving IPATT array from processor:',&
521 iproc,' MSGLEN',msglen
522 write (*,*) 'Master is receiving IPATT array from processor:',&
523 iproc,' MSGLEN',msglen
524 call mpi_recv(ipatt(1,nthread+1),msglen,mpi_integer,iproc,&
526 mpi_comm_world,status,ierror)
527 write (iout,*) 'Master has received IPATT array from processor:',&
528 iproc,' MSGLEN=',msglen
529 write (*,*) 'Master has received IPATT array from processor:',&
530 iproc,' MSGLEN=',msglen
531 nthread_temp=ipatt(1,nthread+msglen/2)
532 nexcl_temp=ipatt(2,nthread+msglen/2)
533 ! Receive the energies.
534 call mpi_probe(iproc,EnerType,&
535 mpi_comm_world,status,ierr)
536 call MPI_GET_COUNT(STATUS, MPI_DOUBLE_PRECISION, MSGLEN, IERROR)
537 write (iout,*) 'Master is receiving energies from processor:',&
538 iproc,' MSGLEN=',MSGLEN
539 write (*,*) 'Master is receiving energies from processor:',&
540 iproc,' MSGLEN=',MSGLEN
541 call mpi_recv(ener(1,nthread+1),msglen,&
542 MPI_DOUBLE_PRECISION,iproc,&
543 EnerType,MPI_COMM_WORLD,status,ierror)
544 write (iout,*) 'Msglen=',Msglen
545 write (*,*) 'Msglen=',Msglen
546 write (iout,*) 'Master has received energies from processor',iproc
547 write (*,*) 'Master has received energies from processor',iproc
548 write (iout,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
549 write (*,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
552 ener0(j,nthread+i)=ener(j,nthread+nthread_temp+i)
555 max_time_for_thread_t=ener(1,nthread+2*nthread_temp+1)
556 ave_time_for_thread_t=ener(2,nthread+2*nthread_temp+1)
557 write (iout,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
558 write (iout,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
559 write (*,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
560 write (*,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
561 if (max_time_for_thread_t.gt.max_time_for_thread) &
562 max_time_for_thread=max_time_for_thread_t
563 ave_time_for_thread=(nthread*ave_time_for_thread+ &
564 nthread_temp*ave_time_for_thread_t)/(nthread+nthread_temp)
565 nthread=nthread+nthread_temp
567 end subroutine receive_thread_results
568 !-----------------------------------------------------------------------------
572 use control, only: initialize,getenv_loc
573 use io_config, only: openunits
574 ! implicit real*8 (a-h,o-z)
575 ! include 'DIMENSIONS'
576 ! include 'COMMON.SETUP'
587 write (iout,'(80(1h*)/a/80(1h*))') &
588 'United-residue force field calculation - serial job.'
590 end subroutine init_task
592 !-----------------------------------------------------------------------------
593 !-----------------------------------------------------------------------------