3 implicit real*8 (a-h,o-z)
7 include 'COMMON.CONTROL'
8 include 'COMMON.IOUNITS'
10 c real*8 text1 /'group_i '/,text2/'group_f '/,
11 c & text3/'initialb'/,text4/'initiale'/,
12 c & text5/'openb'/,text6/'opene'/
13 integer cgtasks(0:max_cg_procs)
15 integer cg_size,fg_size,fg_size1
16 c start parallel processing
17 c print *,'Initializing MPI'
20 print *, ' cannot initialize MPI'
23 c determine # of nodes and current node
24 call MPI_Comm_rank( MPI_COMM_WORLD, me, ierr )
26 print *, ' cannot determine rank of all processes'
27 call MPI_Finalize( MPI_COMM_WORLD, IERR )
30 call MPI_Comm_size( MPI_Comm_world, nodes, ierr )
32 print *, ' cannot determine number of processes'
37 C Determine the number of "fine-grain" tasks
38 call getenv_loc("FGPROCS",cfgprocs)
39 read (cfgprocs,'(i3)') nfgtasks
40 if (nfgtasks.eq.0) nfgtasks=1
41 call getenv_loc("MAXGSPROCS",cfgprocs)
42 read (cfgprocs,'(i3)') max_gs_size
43 if (max_gs_size.eq.0) max_gs_size=2
45 & print *,"Processor",me," nfgtasks",nfgtasks,
46 & " max_gs_size",max_gs_size
47 if (nfgtasks.eq.1) then
48 CG_COMM = MPI_COMM_WORLD
55 if (nfgtasks*nodes.ne.nprocs) then
56 write (*,'(a)') 'ERROR: Number of processors assigned',
57 & ' to coarse-grained tasks must be divisor',
58 & ' of the total number of processors.'
59 call MPI_Finalize( MPI_COMM_WORLD, IERR )
62 C Put the ranks of coarse-grain processes in one table and create
63 C the respective communicator. The processes with ranks "in between"
64 C the ranks of CG processes will perform fine graining for the CG
65 C process with the next lower rank.
66 do i=0,nprocs-1,nfgtasks
70 print*,"Processor",me," cgtasks",(cgtasks(i),i=0,nodes-1)
71 c print "(a,i5,a)","Processor",myrank," Before MPI_Comm_group"
73 c call memmon_print_usage()
74 call MPI_Comm_group(MPI_COMM_WORLD,world_group,IERR)
75 call MPI_Group_incl(world_group,nodes,cgtasks,cg_group,IERR)
76 call MPI_Comm_create(MPI_COMM_WORLD,cg_group,CG_COMM,IERR)
77 call MPI_Group_rank(cg_group,me,ierr)
78 call MPI_Group_free(world_group,ierr)
79 call MPI_Group_free(cg_group,ierr)
80 c print "(a,i5,a)","Processor",myrank," After MPI_Comm_group"
81 c call memmon_print_usage()
82 if (me.ne.MPI_UNDEFINED) call MPI_Comm_Rank(CG_COMM,me,ierr)
83 if (lprn) print *," Processor",myrank," CG rank",me
84 C Create communicators containig processes doing "fine grain" tasks.
85 C The processes within each FG_COMM should have fast communication.
87 key=mod(MyRank,nfgtasks)
88 call MPI_Comm_split(MPI_COMM_WORLD,kolor,key,FG_COMM,ierr)
89 call MPI_Comm_size(FG_COMM,fg_size,ierr)
90 if (fg_size.ne.nfgtasks) then
91 write (*,*) "OOOOps... the number of fg tasks is",fg_size,
92 & " but",nfgtasks," was requested. MyRank=",MyRank
94 call MPI_Comm_rank(FG_COMM,fg_rank,ierr)
95 if (fg_size.gt.max_gs_size) then
96 kolor1=fg_rank/max_gs_size
97 key1=mod(fg_rank,max_gs_size)
98 call MPI_Comm_split(FG_COMM,kolor1,key1,FG_COMM1,ierr)
99 call MPI_Comm_size(FG_COMM1,nfgtasks1,ierr)
100 call MPI_Comm_rank(FG_COMM1,fg_rank1,ierr)
108 if (fg_rank.eq.0) then
109 write (*,*) "Processor",MyRank," out of",nprocs,
110 & " rank in CG_COMM",me," size of CG_COMM",nodes,
111 & " size of FG_COMM",fg_size,
112 & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
114 write (*,*) "Processor",MyRank," out of",nprocs,
115 & " rank in FG_COMM",fg_rank," size of FG_COMM",fg_size,
116 & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
119 C Initialize other variables.
120 c print '(a)','Before initialize'
121 c call memmon_print_usage()
123 c print '(a,i5,a)','Processor',myrank,' After initialize'
124 c call memmon_print_usage()
125 C Open task-dependent files.
126 c print '(a,i5,a)','Processor',myrank,' Before openunits'
127 c call memmon_print_usage()
129 c print '(a,i5,a)','Processor',myrank,' After openunits'
130 c call memmon_print_usage()
131 if (me.eq.king .or. fg_rank.eq.0 .and. .not. out1file)
132 & write (iout,'(80(1h*)/a/80(1h*))')
133 & 'United-residue force field calculation - parallel job.'
134 c print *,"Processor",myrank," exited OPENUNITS"
137 C-----------------------------------------------------------------------------
138 subroutine finish_task
139 implicit real*8 (a-h,o-z)
142 include 'COMMON.SETUP'
143 include 'COMMON.CONTROL'
144 include 'COMMON.REMD'
145 include 'COMMON.IOUNITS'
146 include 'COMMON.FFIELD'
147 include 'COMMON.TIME1'
152 call MPI_Barrier(CG_COMM,ierr)
154 & call MPI_Bcast(-1,1,MPI_INTEGER,king,FG_COMM,IERROR)
156 if (me.eq.king .or. .not. out1file) then
157 write (iout,'(a,i4,a)') 'CG processor',me,' is finishing work.'
158 write (iout,*) 'Total wall clock time',time1-walltime,' sec'
159 if (nfgtasks.gt.1) then
160 write (iout,'(80(1h=)/a/(80(1h=)))')
161 & "Details of FG communication time"
162 write (iout,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))')
163 & "BROADCAST:",time_bcast,"REDUCE:",time_reduce,
164 & "GATHER:",time_gather,
165 & "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,
166 & "BARRIER ene",time_barrier_e,
167 & "BARRIER grad",time_barrier_g,"TOTAL:",
168 & time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv
169 & +time_barrier_e+time_barrier_g
170 write (*,*) 'Total wall clock time',time1-walltime,' sec'
171 write (*,*) "Processor",me," BROADCAST time",time_bcast,
173 & time_reduce," GATHER time",time_gather," SCATTER time",
174 & time_scatter," SENDRECV",time_sendrecv,
175 & " BARRIER ene",time_barrier_e," BARRIER grad",time_barrier_g
178 write (*,'(a,i4,a)') 'CG processor',me,' is finishing work.'
179 if (ilen(tmpdir).gt.0) then
180 write (*,*) "Processor",me,
181 & ": moving output files to the parent directory..."
183 close(istat,status='keep')
184 if (ntwe.gt.0) call move_from_tmp(statname)
185 close(irest2,status='keep')
186 if (modecalc.eq.12.or.
187 & (modecalc.eq.14 .and. .not.restart1file)) then
188 call move_from_tmp(rest2name)
189 else if (modecalc.eq.14.and. me.eq.king) then
190 call move_from_tmp(mremd_rst_name)
193 close(ipdb,status='keep')
194 call move_from_tmp(pdbname)
195 else if (me.eq.king .or. .not.traj1file) then
196 close(icart,status='keep')
197 call move_from_tmp(cartname)
199 if (me.eq.king .or. .not. out1file) then
200 close (iout,status='keep')
201 call move_from_tmp(outname)
206 c-------------------------------------------------------------------------
207 subroutine pattern_receive
208 implicit real*8 (a-h,o-z)
211 include 'COMMON.SETUP'
212 include 'COMMON.THREAD'
213 include 'COMMON.IOUNITS'
214 integer tag,status(MPI_STATUS_SIZE)
215 integer source,ThreadType
218 source=mpi_any_source
219 call mpi_iprobe(source,ThreadType,
220 & CG_COMM,flag,status,ierr)
222 write (iout,*) 'Processor ',Me,' is receiving threading',
223 & ' pattern from processor',status(mpi_source)
224 write (*,*) 'Processor ',Me,' is receiving threading',
225 & ' pattern from processor',status(mpi_source)
227 call mpi_irecv(iexam(1,nexcl),2,mpi_integer,status(mpi_source),
228 & ThreadType, CG_COMM,ireq,ierr)
229 write (iout,*) 'Received pattern:',nexcl,iexam(1,nexcl),
231 source=mpi_any_source
232 call mpi_iprobe(source,ThreadType,
233 & CG_COMM,flag,status,ierr)
237 c----------------------------------------------------------------------------
238 subroutine pattern_send
239 implicit real*8 (a-h,o-z)
242 include 'COMMON.INFO'
243 include 'COMMON.THREAD'
244 include 'COMMON.IOUNITS'
245 integer source,ThreadType,ireq
248 if (iproc.ne.me .and. .not.Koniec(iproc) ) then
249 call mpi_isend(iexam(1,nexcl),2,mpi_integer,iproc,
250 & ThreadType, CG_COMM, ireq, ierr)
251 write (iout,*) 'CG processor ',me,' has sent pattern ',
252 & 'to processor',iproc
253 write (*,*) 'CG processor ',me,' has sent pattern ',
254 & 'to processor',iproc
255 write (iout,*) 'Pattern:',nexcl,iexam(1,nexcl),iexam(2,nexcl)
260 c-----------------------------------------------------------------------------
261 subroutine send_stop_sig(Kwita)
262 implicit real*8 (a-h,o-z)
265 include 'COMMON.INFO'
266 include 'COMMON.IOUNITS'
267 integer StopType,StopId,iproc,Kwita,NBytes
270 C print *,'CG processor',me,' StopType=',StopType
273 C Master sends the STOP signal to everybody.
275 & 'Master is sending STOP signal to other processors.'
277 print *,'Koniec(',iproc,')=',Koniec(iproc)
278 if (.not. Koniec(iproc)) then
279 call mpi_send(Kwita,1,mpi_integer,iproc,StopType,
280 & mpi_comm_world,ierr)
281 write (iout,*) 'Iproc=',iproc,' StopID=',StopID
282 write (*,*) 'Iproc=',iproc,' StopID=',StopID
286 C Else send the STOP signal to Master.
287 call mpi_send(Kwita,1,mpi_integer,MasterID,StopType,
288 & mpi_comm_world,ierr)
289 write (iout,*) 'CG processor=',me,' StopID=',StopID
290 write (*,*) 'CG processor=',me,' StopID=',StopID
294 c-----------------------------------------------------------------------------
295 subroutine recv_stop_sig(Kwita)
296 implicit real*8 (a-h,o-z)
299 include 'COMMON.INFO'
300 include 'COMMON.IOUNITS'
301 integer source,StopType,StopId,iproc,Kwita
305 source=mpi_any_source
306 c print *,'CG processor:',me,' StopType=',StopType
307 call mpi_iprobe(source,StopType,
308 & mpi_comm_world,flag,status,ierr)
310 Koniec(status(mpi_source))=.true.
311 write (iout,*) 'CG processor ',me,' is receiving STOP signal',
312 & ' from processor',status(mpi_source)
313 write (*,*) 'CG processor ',me,' is receiving STOP signal',
314 & ' from processor',status(mpi_source)
315 call mpi_irecv(Kwita,1,mpi_integer,status(mpi_source),StopType,
316 & mpi_comm_world,ireq,ierr)
317 call mpi_iprobe(source,StopType,
318 & mpi_comm_world,flag,status,ierr)
322 c-----------------------------------------------------------------------------
323 subroutine send_MCM_info(ione)
324 implicit real*8 (a-h,o-z)
327 include 'COMMON.SETUP'
329 include 'COMMON.IOUNITS'
330 integer tag,status(MPI_STATUS_SIZE)
331 integer MCM_info_Type,MCM_info_ID,iproc,one,NBytes
332 common /aaaa/ isend,irecv
337 cd write (iout,'(a,i4,a)') 'CG Processor',me,
338 cd & ' is sending MCM info to Master.'
339 write (*,'(a,i4,a,i8)') 'CG processor',me,
340 & ' is sending MCM info to Master, MCM_info_ID=',MCM_info_ID
341 call mpi_isend(ione,1,mpi_integer,MasterID,
342 & MCM_info_Type,mpi_comm_world, MCM_info_ID, ierr)
343 cd write (iout,*) 'CG processor',me,' has sent info to the master;',
344 cd & ' MCM_info_ID=',MCM_info_ID
345 write (*,*) 'CG processor',me,' has sent info to the master;',
346 & ' MCM_info_ID=',MCM_info_ID,' ierr ',ierr
351 c----------------------------------------------------------------------------
352 subroutine receive_MCM_info
353 implicit real*8 (a-h,o-z)
356 include 'COMMON.SETUP'
358 include 'COMMON.IOUNITS'
359 integer tag,status(MPI_STATUS_SIZE)
360 integer source,MCM_info_Type,MCM_info_ID,iproc,ione
363 source=mpi_any_source
364 c print *,'source=',source,' dontcare=',dontcare
365 call mpi_iprobe(source,MCM_info_Type,
366 & mpi_comm_world,flag,status,ierr)
368 source=status(mpi_source)
369 itask=source/fgProcs+1
370 cd write (iout,*) 'Master is receiving MCM info from processor ',
371 cd & source,' itask',itask
372 write (*,*) 'Master is receiving MCM info from processor ',
373 & source,' itask',itask
374 call mpi_irecv(ione,1,mpi_integer,source,MCM_info_type,
375 & mpi_comm_world,MCM_info_ID,ierr)
376 cd write (iout,*) 'Received from processor',source,' IONE=',ione
377 write (*,*) 'Received from processor',source,' IONE=',ione
379 if (ione.eq.2) nsave_part(itask)=nsave_part(itask)+1
380 cd print *,'nsave_part(',itask,')=',nsave_part(itask)
381 cd write (iout,*) 'Nacc_tot=',Nacc_tot
382 cd write (*,*) 'Nacc_tot=',Nacc_tot
383 source=mpi_any_source
384 call mpi_iprobe(source,MCM_info_Type,
385 & mpi_comm_world,flag,status,ierr)
389 c---------------------------------------------------------------------------
390 subroutine send_thread_results
391 implicit real*8 (a-h,o-z)
394 include 'COMMON.SETUP'
395 include 'COMMON.THREAD'
396 include 'COMMON.IOUNITS'
397 integer tag,status(MPI_STATUS_SIZE)
398 integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
399 & EnerID,msglen,nbytes
400 double precision buffer(20*maxthread+2)
403 ipatt(1,nthread+1)=nthread
404 ipatt(2,nthread+1)=nexcl
407 ener(j,i+nthread)=ener0(j,i)
410 ener(1,2*nthread+1)=max_time_for_thread
411 ener(2,2*nthread+1)=ave_time_for_thread
412 C Send the IPATT array
413 write (iout,*) 'CG processor',me,
414 & ' is sending IPATT array to master: NTHREAD=',nthread
415 write (*,*) 'CG processor',me,
416 & ' is sending IPATT array to master: NTHREAD=',nthread
418 call mpi_send(ipatt(1,1),msglen,MPI_INTEGER,MasterID,
419 & ThreadType,mpi_comm_world,ierror)
420 write (iout,*) 'CG processor',me,
421 & ' has sent IPATT array to master MSGLEN',msglen
422 write (*,*) 'CG processor',me,
423 & ' has sent IPATT array to master MSGLEN',msglen
425 msglen=n_ene2*nthread+2
426 write (iout,*) 'CG processor',me,' is sending energies to master.'
427 write (*,*) 'CG processor',me,' is sending energies to master.'
428 call mpi_send(ener(1,1),msglen,MPI_DOUBLE_PRECISION,MasterID,
429 & EnerType,mpi_comm_world,ierror)
430 write (iout,*) 'CG processor',me,' has sent energies to master.'
431 write (*,*) 'CG processor',me,' has sent energies to master.'
434 c----------------------------------------------------------------------------
435 subroutine receive_thread_results(iproc)
436 implicit real*8 (a-h,o-z)
439 include 'COMMON.INFO'
440 include 'COMMON.THREAD'
441 include 'COMMON.IOUNITS'
442 integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
443 & EnerID,ReadyType,ReadyID,Ready,msglen,nbytes,nthread_temp
444 double precision buffer(20*maxthread+2),max_time_for_thread_t,
445 & ave_time_for_thread_t
449 C Receive the IPATT array
450 call mpi_probe(iproc,ThreadType,
451 & mpi_comm_world,status,ierr)
452 call MPI_GET_COUNT(STATUS, MPI_INTEGER, MSGLEN, IERROR)
453 write (iout,*) 'Master is receiving IPATT array from processor:',
454 & iproc,' MSGLEN',msglen
455 write (*,*) 'Master is receiving IPATT array from processor:',
456 & iproc,' MSGLEN',msglen
457 call mpi_recv(ipatt(1,nthread+1),msglen,mpi_integer,iproc,
459 & mpi_comm_world,status,ierror)
460 write (iout,*) 'Master has received IPATT array from processor:',
461 & iproc,' MSGLEN=',msglen
462 write (*,*) 'Master has received IPATT array from processor:',
463 & iproc,' MSGLEN=',msglen
464 nthread_temp=ipatt(1,nthread+msglen/2)
465 nexcl_temp=ipatt(2,nthread+msglen/2)
466 C Receive the energies.
467 call mpi_probe(iproc,EnerType,
468 & mpi_comm_world,status,ierr)
469 call MPI_GET_COUNT(STATUS, MPI_DOUBLE_PRECISION, MSGLEN, IERROR)
470 write (iout,*) 'Master is receiving energies from processor:',
471 & iproc,' MSGLEN=',MSGLEN
472 write (*,*) 'Master is receiving energies from processor:',
473 & iproc,' MSGLEN=',MSGLEN
474 call mpi_recv(ener(1,nthread+1),msglen,
475 & MPI_DOUBLE_PRECISION,iproc,
476 & EnerType,MPI_COMM_WORLD,status,ierror)
477 write (iout,*) 'Msglen=',Msglen
478 write (*,*) 'Msglen=',Msglen
479 write (iout,*) 'Master has received energies from processor',iproc
480 write (*,*) 'Master has received energies from processor',iproc
481 write (iout,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
482 write (*,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
485 ener0(j,nthread+i)=ener(j,nthread+nthread_temp+i)
488 max_time_for_thread_t=ener(1,nthread+2*nthread_temp+1)
489 ave_time_for_thread_t=ener(2,nthread+2*nthread_temp+1)
490 write (iout,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
491 write (iout,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
492 write (*,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
493 write (*,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
494 if (max_time_for_thread_t.gt.max_time_for_thread)
495 & max_time_for_thread=max_time_for_thread_t
496 ave_time_for_thread=(nthread*ave_time_for_thread+
497 & nthread_temp*ave_time_for_thread_t)/(nthread+nthread_temp)
498 nthread=nthread+nthread_temp
503 implicit real*8 (a-h,o-z)
505 include 'COMMON.SETUP'
514 write (iout,'(80(1h*)/a/80(1h*))')
515 & 'United-residue force field calculation - serial job.'