3 implicit real*8 (a-h,o-z)
7 include 'COMMON.CONTROL'
8 include 'COMMON.IOUNITS'
10 c real*8 text1 /'group_i '/,text2/'group_f '/,
11 c & text3/'initialb'/,text4/'initiale'/,
12 c & text5/'openb'/,text6/'opene'/
13 integer cgtasks(0:max_cg_procs)
15 integer cg_size,fg_size,fg_size1
16 c start parallel processing
17 c print *,'Initializing MPI'
20 print *, ' cannot initialize MPI'
23 c determine # of nodes and current node
24 call MPI_Comm_rank( MPI_COMM_WORLD, me, ierr )
26 print *, ' cannot determine rank of all processes'
27 call MPI_Finalize( MPI_COMM_WORLD, IERR )
30 call MPI_Comm_size( MPI_Comm_world, nodes, ierr )
32 print *, ' cannot determine number of processes'
37 C Determine the number of "fine-grain" tasks
38 call getenv_loc("FGPROCS",cfgprocs)
39 read (cfgprocs,'(i3)') nfgtasks
40 if (nfgtasks.eq.0) nfgtasks=1
41 call getenv_loc("MAXGSPROCS",cfgprocs)
42 read (cfgprocs,'(i3)') max_gs_size
43 if (max_gs_size.eq.0) max_gs_size=2
45 & print *,"Processor",me," nfgtasks",nfgtasks,
46 & " max_gs_size",max_gs_size
47 if (nfgtasks.eq.1) then
48 CG_COMM = MPI_COMM_WORLD
55 if (nfgtasks*nodes.ne.nprocs) then
56 write (*,'(a)') 'ERROR: Number of processors assigned',
57 & ' to coarse-grained tasks must be divisor',
58 & ' of the total number of processors.'
59 call MPI_Finalize( MPI_COMM_WORLD, IERR )
62 C Put the ranks of coarse-grain processes in one table and create
63 C the respective communicator. The processes with ranks "in between"
64 C the ranks of CG processes will perform fine graining for the CG
65 C process with the next lower rank.
66 do i=0,nprocs-1,nfgtasks
70 print*,"Processor",me," cgtasks",(cgtasks(i),i=0,nodes-1)
71 c print "(a,i5,a)","Processor",myrank," Before MPI_Comm_group"
73 c call memmon_print_usage()
74 call MPI_Comm_group(MPI_COMM_WORLD,world_group,IERR)
75 call MPI_Group_incl(world_group,nodes,cgtasks,cg_group,IERR)
76 call MPI_Comm_create(MPI_COMM_WORLD,cg_group,CG_COMM,IERR)
77 call MPI_Group_rank(cg_group,me,ierr)
78 call MPI_Group_free(world_group,ierr)
79 call MPI_Group_free(cg_group,ierr)
80 c print "(a,i5,a)","Processor",myrank," After MPI_Comm_group"
81 c call memmon_print_usage()
82 if (me.ne.MPI_UNDEFINED) call MPI_Comm_Rank(CG_COMM,me,ierr)
83 if (lprn) print *," Processor",myrank," CG rank",me
84 C Create communicators containig processes doing "fine grain" tasks.
85 C The processes within each FG_COMM should have fast communication.
87 key=mod(MyRank,nfgtasks)
88 call MPI_Comm_split(MPI_COMM_WORLD,kolor,key,FG_COMM,ierr)
89 call MPI_Comm_size(FG_COMM,fg_size,ierr)
90 if (fg_size.ne.nfgtasks) then
91 write (*,*) "OOOOps... the number of fg tasks is",fg_size,
92 & " but",nfgtasks," was requested. MyRank=",MyRank
94 call MPI_Comm_rank(FG_COMM,fg_rank,ierr)
95 if (fg_size.gt.max_gs_size) then
96 kolor1=fg_rank/max_gs_size
97 key1=mod(fg_rank,max_gs_size)
98 call MPI_Comm_split(FG_COMM,kolor1,key1,FG_COMM1,ierr)
99 call MPI_Comm_size(FG_COMM1,nfgtasks1,ierr)
100 call MPI_Comm_rank(FG_COMM1,fg_rank1,ierr)
107 if (fg_rank.eq.0) then
108 write (*,*) "Processor",MyRank," out of",nprocs,
109 & " rank in CG_COMM",me," size of CG_COMM",nodes,
110 & " size of FG_COMM",fg_size,
111 & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
113 write (*,*) "Processor",MyRank," out of",nprocs,
114 & " rank in FG_COMM",fg_rank," size of FG_COMM",fg_size,
115 & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
117 C Initialize other variables.
118 c print '(a)','Before initialize'
119 c call memmon_print_usage()
121 c print '(a,i5,a)','Processor',myrank,' After initialize'
122 c call memmon_print_usage()
123 C Open task-dependent files.
124 c print '(a,i5,a)','Processor',myrank,' Before openunits'
125 c call memmon_print_usage()
127 c print '(a,i5,a)','Processor',myrank,' After openunits'
128 c call memmon_print_usage()
129 if (me.eq.king .or. fg_rank.eq.0 .and. .not. out1file)
130 & write (iout,'(80(1h*)/a/80(1h*))')
131 & 'United-residue force field calculation - parallel job.'
132 c print *,"Processor",myrank," exited OPENUNITS"
135 C-----------------------------------------------------------------------------
136 subroutine finish_task
137 implicit real*8 (a-h,o-z)
140 include 'COMMON.SETUP'
141 include 'COMMON.CONTROL'
142 c include 'COMMON.REMD'
143 include 'COMMON.IOUNITS'
144 include 'COMMON.FFIELD'
145 include 'COMMON.TIME1'
150 call MPI_Barrier(CG_COMM,ierr)
152 & call MPI_Bcast(-1,1,MPI_INTEGER,king,FG_COMM,IERROR)
154 if (me.eq.king .or. .not. out1file) then
155 write (iout,'(a,i4,a)') 'CG processor',me,' is finishing work.'
156 write (iout,*) 'Total wall clock time',time1-walltime,' sec'
157 if (nfgtasks.gt.1) then
158 write (iout,'(80(1h=)/a/(80(1h=)))')
159 & "Details of FG communication time"
160 write (iout,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))')
161 & "BROADCAST:",time_bcast,"REDUCE:",time_reduce,
162 & "GATHER:",time_gather,
163 & "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,
164 & "BARRIER ene",time_barrier_e,
165 & "BARRIER grad",time_barrier_g,"TOTAL:",
166 & time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv
167 & +time_barrier_e+time_barrier_g
168 write (*,*) 'Total wall clock time',time1-walltime,' sec'
169 write (*,*) "Processor",me," BROADCAST time",time_bcast,
171 & time_reduce," GATHER time",time_gather," SCATTER time",
172 & time_scatter," SENDRECV",time_sendrecv,
173 & " BARRIER ene",time_barrier_e," BARRIER grad",time_barrier_g
176 write (*,'(a,i4,a)') 'CG processor',me,' is finishing work.'
177 if (ilen(tmpdir).gt.0) then
178 write (*,*) "Processor",me,
179 & ": moving output files to the parent directory..."
181 close(istat,status='keep')
182 if (ntwe.gt.0) call move_from_tmp(statname)
183 close(irest2,status='keep')
184 cremd if (modecalc.eq.12.or.
185 cremd & (modecalc.eq.14 .and. .not.restart1file)) then
186 cremd call move_from_tmp(rest2name)
187 cremd else if (modecalc.eq.14.and. me.eq.king) then
188 cremd call move_from_tmp(mremd_rst_name)
191 cmd close(ipdb,status='keep')
192 cmd call move_from_tmp(pdbname)
193 cmd else if (me.eq.king .or. .not.traj1file) then
194 cmd close(icart,status='keep')
195 cmd call move_from_tmp(cartname)
197 if (me.eq.king .or. .not. out1file) then
198 close (iout,status='keep')
199 call move_from_tmp(outname)
204 c-------------------------------------------------------------------------
205 subroutine pattern_receive
206 implicit real*8 (a-h,o-z)
209 include 'COMMON.SETUP'
210 include 'COMMON.THREAD'
211 include 'COMMON.IOUNITS'
212 integer tag,status(MPI_STATUS_SIZE)
213 integer source,ThreadType
216 source=mpi_any_source
217 call mpi_iprobe(source,ThreadType,
218 & CG_COMM,flag,status,ierr)
220 write (iout,*) 'Processor ',Me,' is receiving threading',
221 & ' pattern from processor',status(mpi_source)
222 write (*,*) 'Processor ',Me,' is receiving threading',
223 & ' pattern from processor',status(mpi_source)
225 call mpi_irecv(iexam(1,nexcl),2,mpi_integer,status(mpi_source),
226 & ThreadType, CG_COMM,ireq,ierr)
227 write (iout,*) 'Received pattern:',nexcl,iexam(1,nexcl),
229 source=mpi_any_source
230 call mpi_iprobe(source,ThreadType,
231 & CG_COMM,flag,status,ierr)
235 c----------------------------------------------------------------------------
236 subroutine pattern_send
237 implicit real*8 (a-h,o-z)
240 include 'COMMON.INFO'
241 include 'COMMON.THREAD'
242 include 'COMMON.IOUNITS'
243 integer source,ThreadType,ireq
246 if (iproc.ne.me .and. .not.Koniec(iproc) ) then
247 call mpi_isend(iexam(1,nexcl),2,mpi_integer,iproc,
248 & ThreadType, CG_COMM, ireq, ierr)
249 write (iout,*) 'CG processor ',me,' has sent pattern ',
250 & 'to processor',iproc
251 write (*,*) 'CG processor ',me,' has sent pattern ',
252 & 'to processor',iproc
253 write (iout,*) 'Pattern:',nexcl,iexam(1,nexcl),iexam(2,nexcl)
258 c-----------------------------------------------------------------------------
259 subroutine send_stop_sig(Kwita)
260 implicit real*8 (a-h,o-z)
263 include 'COMMON.INFO'
264 include 'COMMON.IOUNITS'
265 integer StopType,StopId,iproc,Kwita,NBytes
268 C print *,'CG processor',me,' StopType=',StopType
271 C Master sends the STOP signal to everybody.
273 & 'Master is sending STOP signal to other processors.'
275 print *,'Koniec(',iproc,')=',Koniec(iproc)
276 if (.not. Koniec(iproc)) then
277 call mpi_send(Kwita,1,mpi_integer,iproc,StopType,
278 & mpi_comm_world,ierr)
279 write (iout,*) 'Iproc=',iproc,' StopID=',StopID
280 write (*,*) 'Iproc=',iproc,' StopID=',StopID
284 C Else send the STOP signal to Master.
285 call mpi_send(Kwita,1,mpi_integer,MasterID,StopType,
286 & mpi_comm_world,ierr)
287 write (iout,*) 'CG processor=',me,' StopID=',StopID
288 write (*,*) 'CG processor=',me,' StopID=',StopID
292 c-----------------------------------------------------------------------------
293 subroutine recv_stop_sig(Kwita)
294 implicit real*8 (a-h,o-z)
297 include 'COMMON.INFO'
298 include 'COMMON.IOUNITS'
299 integer source,StopType,StopId,iproc,Kwita
303 source=mpi_any_source
304 c print *,'CG processor:',me,' StopType=',StopType
305 call mpi_iprobe(source,StopType,
306 & mpi_comm_world,flag,status,ierr)
308 Koniec(status(mpi_source))=.true.
309 write (iout,*) 'CG processor ',me,' is receiving STOP signal',
310 & ' from processor',status(mpi_source)
311 write (*,*) 'CG processor ',me,' is receiving STOP signal',
312 & ' from processor',status(mpi_source)
313 call mpi_irecv(Kwita,1,mpi_integer,status(mpi_source),StopType,
314 & mpi_comm_world,ireq,ierr)
315 call mpi_iprobe(source,StopType,
316 & mpi_comm_world,flag,status,ierr)
320 c-----------------------------------------------------------------------------
321 subroutine send_MCM_info(ione)
322 implicit real*8 (a-h,o-z)
325 include 'COMMON.SETUP'
327 include 'COMMON.IOUNITS'
328 integer tag,status(MPI_STATUS_SIZE)
329 integer MCM_info_Type,MCM_info_ID,iproc,one,NBytes
330 common /aaaa/ isend,irecv
335 cd write (iout,'(a,i4,a)') 'CG Processor',me,
336 cd & ' is sending MCM info to Master.'
337 write (*,'(a,i4,a,i8)') 'CG processor',me,
338 & ' is sending MCM info to Master, MCM_info_ID=',MCM_info_ID
339 call mpi_isend(ione,1,mpi_integer,MasterID,
340 & MCM_info_Type,mpi_comm_world, MCM_info_ID, ierr)
341 cd write (iout,*) 'CG processor',me,' has sent info to the master;',
342 cd & ' MCM_info_ID=',MCM_info_ID
343 write (*,*) 'CG processor',me,' has sent info to the master;',
344 & ' MCM_info_ID=',MCM_info_ID,' ierr ',ierr
349 c----------------------------------------------------------------------------
350 subroutine receive_MCM_info
351 implicit real*8 (a-h,o-z)
354 include 'COMMON.SETUP'
356 include 'COMMON.IOUNITS'
357 integer tag,status(MPI_STATUS_SIZE)
358 integer source,MCM_info_Type,MCM_info_ID,iproc,ione
361 source=mpi_any_source
362 c print *,'source=',source,' dontcare=',dontcare
363 call mpi_iprobe(source,MCM_info_Type,
364 & mpi_comm_world,flag,status,ierr)
366 source=status(mpi_source)
367 itask=source/fgProcs+1
368 cd write (iout,*) 'Master is receiving MCM info from processor ',
369 cd & source,' itask',itask
370 write (*,*) 'Master is receiving MCM info from processor ',
371 & source,' itask',itask
372 call mpi_irecv(ione,1,mpi_integer,source,MCM_info_type,
373 & mpi_comm_world,MCM_info_ID,ierr)
374 cd write (iout,*) 'Received from processor',source,' IONE=',ione
375 write (*,*) 'Received from processor',source,' IONE=',ione
377 if (ione.eq.2) nsave_part(itask)=nsave_part(itask)+1
378 cd print *,'nsave_part(',itask,')=',nsave_part(itask)
379 cd write (iout,*) 'Nacc_tot=',Nacc_tot
380 cd write (*,*) 'Nacc_tot=',Nacc_tot
381 source=mpi_any_source
382 call mpi_iprobe(source,MCM_info_Type,
383 & mpi_comm_world,flag,status,ierr)
387 c---------------------------------------------------------------------------
388 subroutine send_thread_results
389 implicit real*8 (a-h,o-z)
392 include 'COMMON.SETUP'
393 include 'COMMON.THREAD'
394 include 'COMMON.IOUNITS'
395 integer tag,status(MPI_STATUS_SIZE)
396 integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
397 & EnerID,msglen,nbytes
398 double precision buffer(20*maxthread+2)
401 ipatt(1,nthread+1)=nthread
402 ipatt(2,nthread+1)=nexcl
405 ener(j,i+nthread)=ener0(j,i)
408 ener(1,2*nthread+1)=max_time_for_thread
409 ener(2,2*nthread+1)=ave_time_for_thread
410 C Send the IPATT array
411 write (iout,*) 'CG processor',me,
412 & ' is sending IPATT array to master: NTHREAD=',nthread
413 write (*,*) 'CG processor',me,
414 & ' is sending IPATT array to master: NTHREAD=',nthread
416 call mpi_send(ipatt(1,1),msglen,MPI_INTEGER,MasterID,
417 & ThreadType,mpi_comm_world,ierror)
418 write (iout,*) 'CG processor',me,
419 & ' has sent IPATT array to master MSGLEN',msglen
420 write (*,*) 'CG processor',me,
421 & ' has sent IPATT array to master MSGLEN',msglen
423 msglen=n_ene2*nthread+2
424 write (iout,*) 'CG processor',me,' is sending energies to master.'
425 write (*,*) 'CG processor',me,' is sending energies to master.'
426 call mpi_send(ener(1,1),msglen,MPI_DOUBLE_PRECISION,MasterID,
427 & EnerType,mpi_comm_world,ierror)
428 write (iout,*) 'CG processor',me,' has sent energies to master.'
429 write (*,*) 'CG processor',me,' has sent energies to master.'
432 c----------------------------------------------------------------------------
433 subroutine receive_thread_results(iproc)
434 implicit real*8 (a-h,o-z)
437 include 'COMMON.INFO'
438 include 'COMMON.THREAD'
439 include 'COMMON.IOUNITS'
440 integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
441 & EnerID,ReadyType,ReadyID,Ready,msglen,nbytes,nthread_temp
442 double precision buffer(20*maxthread+2),max_time_for_thread_t,
443 & ave_time_for_thread_t
447 C Receive the IPATT array
448 call mpi_probe(iproc,ThreadType,
449 & mpi_comm_world,status,ierr)
450 call MPI_GET_COUNT(STATUS, MPI_INTEGER, MSGLEN, IERROR)
451 write (iout,*) 'Master is receiving IPATT array from processor:',
452 & iproc,' MSGLEN',msglen
453 write (*,*) 'Master is receiving IPATT array from processor:',
454 & iproc,' MSGLEN',msglen
455 call mpi_recv(ipatt(1,nthread+1),msglen,mpi_integer,iproc,
457 & mpi_comm_world,status,ierror)
458 write (iout,*) 'Master has received IPATT array from processor:',
459 & iproc,' MSGLEN=',msglen
460 write (*,*) 'Master has received IPATT array from processor:',
461 & iproc,' MSGLEN=',msglen
462 nthread_temp=ipatt(1,nthread+msglen/2)
463 nexcl_temp=ipatt(2,nthread+msglen/2)
464 C Receive the energies.
465 call mpi_probe(iproc,EnerType,
466 & mpi_comm_world,status,ierr)
467 call MPI_GET_COUNT(STATUS, MPI_DOUBLE_PRECISION, MSGLEN, IERROR)
468 write (iout,*) 'Master is receiving energies from processor:',
469 & iproc,' MSGLEN=',MSGLEN
470 write (*,*) 'Master is receiving energies from processor:',
471 & iproc,' MSGLEN=',MSGLEN
472 call mpi_recv(ener(1,nthread+1),msglen,
473 & MPI_DOUBLE_PRECISION,iproc,
474 & EnerType,MPI_COMM_WORLD,status,ierror)
475 write (iout,*) 'Msglen=',Msglen
476 write (*,*) 'Msglen=',Msglen
477 write (iout,*) 'Master has received energies from processor',iproc
478 write (*,*) 'Master has received energies from processor',iproc
479 write (iout,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
480 write (*,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
483 ener0(j,nthread+i)=ener(j,nthread+nthread_temp+i)
486 max_time_for_thread_t=ener(1,nthread+2*nthread_temp+1)
487 ave_time_for_thread_t=ener(2,nthread+2*nthread_temp+1)
488 write (iout,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
489 write (iout,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
490 write (*,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
491 write (*,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
492 if (max_time_for_thread_t.gt.max_time_for_thread)
493 & max_time_for_thread=max_time_for_thread_t
494 ave_time_for_thread=(nthread*ave_time_for_thread+
495 & nthread_temp*ave_time_for_thread_t)/(nthread+nthread_temp)
496 nthread=nthread+nthread_temp
501 implicit real*8 (a-h,o-z)
503 include 'COMMON.SETUP'
512 write (iout,'(80(1h*)/a/80(1h*))')
513 & 'United-residue force field calculation - serial job.'