09bf922e165aaffdb1e088afd5dc37991974d44f
[unres.git] / MP.F
1 #ifdef MPI
2       subroutine init_task
3       implicit real*8 (a-h,o-z)
4       include 'DIMENSIONS'
5       include 'mpif.h'
6       include 'COMMON.SETUP'
7       include 'COMMON.CONTROL'
8       include 'COMMON.IOUNITS'
9       logical lprn /.false./
10 c      real*8 text1 /'group_i '/,text2/'group_f '/,
11 c     & text3/'initialb'/,text4/'initiale'/,
12 c     & text5/'openb'/,text6/'opene'/
13       integer cgtasks(0:max_cg_procs)
14       character*3 cfgprocs 
15       integer cg_size,fg_size,fg_size1
16 c  start parallel processing
17 c      print *,'Initializing MPI'
18       call mpi_init(ierr)
19       if (ierr.ne.0) then
20         print *, ' cannot initialize MPI'
21         stop
22       endif
23 c  determine # of nodes and current node
24       call MPI_Comm_rank( MPI_COMM_WORLD, me, ierr )
25       if (ierr.ne.0) then
26         print *, ' cannot determine rank of all processes'
27         call MPI_Finalize( MPI_COMM_WORLD, IERR )
28         stop
29       endif
30       call MPI_Comm_size( MPI_Comm_world, nodes, ierr )
31       if (ierr.ne.0) then
32         print *, ' cannot determine number of processes'
33         stop
34       endif
35       Nprocs=nodes
36       MyRank=me
37 C Determine the number of "fine-grain" tasks
38       call getenv_loc("FGPROCS",cfgprocs)
39       read (cfgprocs,'(i3)') nfgtasks
40       if (nfgtasks.eq.0) nfgtasks=1
41       call getenv_loc("MAXGSPROCS",cfgprocs)
42       read (cfgprocs,'(i3)') max_gs_size
43       if (max_gs_size.eq.0) max_gs_size=2
44       if (lprn) 
45      &  print *,"Processor",me," nfgtasks",nfgtasks,
46      & " max_gs_size",max_gs_size
47       if (nfgtasks.eq.1) then
48         CG_COMM = MPI_COMM_WORLD
49         fg_size=1
50         fg_rank=0
51         nfgtasks1=1
52         fg_rank1=0
53       else
54         nodes=nprocs/nfgtasks
55         if (nfgtasks*nodes.ne.nprocs) then
56           write (*,'(a)') 'ERROR: Number of processors assigned',
57      &     ' to coarse-grained tasks must be divisor',
58      &     ' of the total number of processors.'
59           call MPI_Finalize( MPI_COMM_WORLD, IERR )
60           stop
61         endif
62 C Put the ranks of coarse-grain processes in one table and create
63 C the respective communicator. The processes with ranks "in between" 
64 C the ranks of CG processes will perform fine graining for the CG
65 C process with the next lower rank.
66         do i=0,nprocs-1,nfgtasks
67           cgtasks(i/nfgtasks)=i
68         enddo
69         if (lprn) then
70         print*,"Processor",me," cgtasks",(cgtasks(i),i=0,nodes-1)
71 c        print "(a,i5,a)","Processor",myrank," Before MPI_Comm_group"
72         endif
73 c        call memmon_print_usage()
74         call MPI_Comm_group(MPI_COMM_WORLD,world_group,IERR)
75         call MPI_Group_incl(world_group,nodes,cgtasks,cg_group,IERR)
76         call MPI_Comm_create(MPI_COMM_WORLD,cg_group,CG_COMM,IERR)
77         call MPI_Group_rank(cg_group,me,ierr)
78         call MPI_Group_free(world_group,ierr)
79         call MPI_Group_free(cg_group,ierr)
80 c        print "(a,i5,a)","Processor",myrank," After MPI_Comm_group"
81 c        call memmon_print_usage()
82         if (me.ne.MPI_UNDEFINED) call MPI_Comm_Rank(CG_COMM,me,ierr)
83         if (lprn) print *," Processor",myrank," CG rank",me
84 C Create communicators containig processes doing "fine grain" tasks. 
85 C The processes within each FG_COMM should have fast communication.
86         kolor=MyRank/nfgtasks
87         key=mod(MyRank,nfgtasks)
88         call MPI_Comm_split(MPI_COMM_WORLD,kolor,key,FG_COMM,ierr)
89         call MPI_Comm_size(FG_COMM,fg_size,ierr)
90         if (fg_size.ne.nfgtasks) then
91           write (*,*) "OOOOps... the number of fg tasks is",fg_size,
92      &      " but",nfgtasks," was requested. MyRank=",MyRank
93         endif
94         call MPI_Comm_rank(FG_COMM,fg_rank,ierr)
95         if (fg_size.gt.max_gs_size) then
96           kolor1=fg_rank/max_gs_size
97           key1=mod(fg_rank,max_gs_size)
98           call MPI_Comm_split(FG_COMM,kolor1,key1,FG_COMM1,ierr)
99           call MPI_Comm_size(FG_COMM1,nfgtasks1,ierr)
100           call MPI_Comm_rank(FG_COMM1,fg_rank1,ierr)
101         else
102           FG_COMM1=FG_COMM
103           nfgtasks1=nfgtasks
104           fg_rank1=fg_rank
105         endif
106       endif
107       if (fg_rank.eq.0) then
108       write (*,*) "Processor",MyRank," out of",nprocs,
109      & " rank in CG_COMM",me," size of CG_COMM",nodes,
110      & " size of FG_COMM",fg_size,
111      & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
112       else
113       write (*,*) "Processor",MyRank," out of",nprocs,
114      & " rank in FG_COMM",fg_rank," size of FG_COMM",fg_size,
115      & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
116       endif
117 C Initialize other variables.
118 c      print '(a)','Before initialize'
119 c      call memmon_print_usage()
120       call initialize
121 c      print '(a,i5,a)','Processor',myrank,' After initialize'
122 c      call memmon_print_usage()
123 C Open task-dependent files.
124 c      print '(a,i5,a)','Processor',myrank,' Before openunits'
125 c      call memmon_print_usage()
126       call openunits
127 c      print '(a,i5,a)','Processor',myrank,' After openunits'
128 c      call memmon_print_usage()
129       if (me.eq.king .or. fg_rank.eq.0 .and. .not. out1file) 
130      &  write (iout,'(80(1h*)/a/80(1h*))') 
131      & 'United-residue force field calculation - parallel job.'
132 c      print *,"Processor",myrank," exited OPENUNITS"
133       return
134       end
135 C-----------------------------------------------------------------------------
136       subroutine finish_task
137       implicit real*8 (a-h,o-z)
138       include 'DIMENSIONS'
139       include 'mpif.h'
140       include 'COMMON.SETUP'
141       include 'COMMON.CONTROL'
142       include 'COMMON.REMD'
143       include 'COMMON.IOUNITS'
144       include 'COMMON.FFIELD'
145       include 'COMMON.TIME1'
146       include 'COMMON.MD'
147       integer ilen
148       external ilen
149 c
150       call MPI_Barrier(CG_COMM,ierr)
151       if (nfgtasks.gt.1) 
152      &    call MPI_Bcast(-1,1,MPI_INTEGER,king,FG_COMM,IERROR)
153       time1=MPI_WTIME()
154       if (me.eq.king .or. .not. out1file) then
155        write (iout,'(a,i4,a)') 'CG processor',me,' is finishing work.'
156        write (iout,*) 'Total wall clock time',time1-walltime,' sec'
157        if (nfgtasks.gt.1) then
158          write (iout,'(80(1h=)/a/(80(1h=)))') 
159      &    "Details of FG communication time"
160           write (iout,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))') 
161      &    "BROADCAST:",time_bcast,"REDUCE:",time_reduce,
162      &    "GATHER:",time_gather,
163      &    "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,
164      &    "BARRIER ene",time_barrier_e,
165      &    "BARRIER grad",time_barrier_g,"TOTAL:",
166      &    time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv
167      &    +time_barrier_e+time_barrier_g
168           write (*,*) 'Total wall clock time',time1-walltime,' sec'
169           write (*,*) "Processor",me," BROADCAST time",time_bcast,
170      &      " REDUCE time",
171      &      time_reduce," GATHER time",time_gather," SCATTER time",
172      &      time_scatter," SENDRECV",time_sendrecv,
173      &      " BARRIER ene",time_barrier_e," BARRIER grad",time_barrier_g
174       endif
175       endif
176       write (*,'(a,i4,a)') 'CG processor',me,' is finishing work.'
177       if (ilen(tmpdir).gt.0) then
178         write (*,*) "Processor",me,
179      &   ": moving output files to the parent directory..."
180         close(inp)
181         close(istat,status='keep')
182         if (ntwe.gt.0) call move_from_tmp(statname)
183         close(irest2,status='keep')
184         if (modecalc.eq.12.or.
185      &     (modecalc.eq.14 .and. .not.restart1file)) then
186           call move_from_tmp(rest2name) 
187         else if (modecalc.eq.14.and. me.eq.king) then
188           call move_from_tmp(mremd_rst_name)
189         endif
190         if (mdpdb) then
191          close(ipdb,status='keep')
192          call move_from_tmp(pdbname)
193         else if (me.eq.king .or. .not.traj1file) then
194          close(icart,status='keep')
195          call move_from_tmp(cartname)
196         endif
197         if (me.eq.king .or. .not. out1file) then
198           close (iout,status='keep')
199           call move_from_tmp(outname)
200         endif
201       endif
202       return
203       end
204 c-------------------------------------------------------------------------
205       subroutine pattern_receive      
206       implicit real*8 (a-h,o-z)
207       include 'DIMENSIONS'
208       include 'mpif.h'
209       include 'COMMON.SETUP'
210       include 'COMMON.THREAD'
211       include 'COMMON.IOUNITS'
212       integer tag,status(MPI_STATUS_SIZE)
213       integer source,ThreadType
214       logical flag
215       ThreadType=45
216       source=mpi_any_source
217       call mpi_iprobe(source,ThreadType,
218      &                 CG_COMM,flag,status,ierr)
219       do while (flag)
220         write (iout,*) 'Processor ',Me,' is receiving threading',
221      & ' pattern from processor',status(mpi_source)
222         write (*,*) 'Processor ',Me,' is receiving threading',
223      & ' pattern from processor',status(mpi_source)
224         nexcl=nexcl+1
225         call mpi_irecv(iexam(1,nexcl),2,mpi_integer,status(mpi_source),
226      &    ThreadType, CG_COMM,ireq,ierr)
227         write (iout,*) 'Received pattern:',nexcl,iexam(1,nexcl),
228      &    iexam(2,nexcl)
229         source=mpi_any_source
230       call mpi_iprobe(source,ThreadType,               
231      &                 CG_COMM,flag,status,ierr)
232       enddo
233       return
234       end
235 c----------------------------------------------------------------------------
236       subroutine pattern_send
237       implicit real*8 (a-h,o-z)
238       include 'DIMENSIONS'
239       include 'mpif.h'
240       include 'COMMON.INFO'
241       include 'COMMON.THREAD'
242       include 'COMMON.IOUNITS'
243       integer source,ThreadType,ireq
244       ThreadType=45 
245       do iproc=0,nprocs-1
246         if (iproc.ne.me .and. .not.Koniec(iproc) ) then
247           call mpi_isend(iexam(1,nexcl),2,mpi_integer,iproc,
248      &                  ThreadType, CG_COMM, ireq, ierr)
249           write (iout,*) 'CG processor ',me,' has sent pattern ',
250      &    'to processor',iproc
251           write (*,*) 'CG processor ',me,' has sent pattern ',
252      &    'to processor',iproc
253           write (iout,*) 'Pattern:',nexcl,iexam(1,nexcl),iexam(2,nexcl)
254         endif
255       enddo
256       return
257       end
258 c-----------------------------------------------------------------------------
259       subroutine send_stop_sig(Kwita)
260       implicit real*8 (a-h,o-z)
261       include 'DIMENSIONS'
262       include 'mpif.h'
263       include 'COMMON.INFO'
264       include 'COMMON.IOUNITS'
265       integer StopType,StopId,iproc,Kwita,NBytes
266       StopType=66
267 c     Kwita=-1
268 C     print *,'CG processor',me,' StopType=',StopType
269       Koniec(me)=.true.
270       if (me.eq.king) then
271 C Master sends the STOP signal to everybody.
272         write (iout,'(a,a)') 
273      &   'Master is sending STOP signal to other processors.'
274         do iproc=1,nprocs-1
275           print *,'Koniec(',iproc,')=',Koniec(iproc)
276           if (.not. Koniec(iproc)) then
277             call mpi_send(Kwita,1,mpi_integer,iproc,StopType,
278      &          mpi_comm_world,ierr)
279             write (iout,*) 'Iproc=',iproc,' StopID=',StopID
280             write (*,*) 'Iproc=',iproc,' StopID=',StopID
281           endif
282         enddo
283       else
284 C Else send the STOP signal to Master.
285         call mpi_send(Kwita,1,mpi_integer,MasterID,StopType,
286      &          mpi_comm_world,ierr)
287         write (iout,*) 'CG processor=',me,' StopID=',StopID
288         write (*,*) 'CG processor=',me,' StopID=',StopID
289       endif
290       return
291       end 
292 c-----------------------------------------------------------------------------
293       subroutine recv_stop_sig(Kwita)
294       implicit real*8 (a-h,o-z)
295       include 'DIMENSIONS' 
296       include 'mpif.h'
297       include 'COMMON.INFO'
298       include 'COMMON.IOUNITS'
299       integer source,StopType,StopId,iproc,Kwita
300       logical flag
301       StopType=66
302       Kwita=0
303       source=mpi_any_source
304 c     print *,'CG processor:',me,' StopType=',StopType
305       call mpi_iprobe(source,StopType,
306      &                 mpi_comm_world,flag,status,ierr)
307       do while (flag)
308         Koniec(status(mpi_source))=.true.
309         write (iout,*) 'CG processor ',me,' is receiving STOP signal',
310      & ' from processor',status(mpi_source)
311         write (*,*) 'CG processor ',me,' is receiving STOP signal',
312      & ' from processor',status(mpi_source)
313         call mpi_irecv(Kwita,1,mpi_integer,status(mpi_source),StopType,
314      &           mpi_comm_world,ireq,ierr)
315         call mpi_iprobe(source,StopType,
316      &                 mpi_comm_world,flag,status,ierr)
317       enddo       
318       return
319       end
320 c-----------------------------------------------------------------------------
321       subroutine send_MCM_info(ione)
322       implicit real*8 (a-h,o-z)
323       include 'DIMENSIONS'
324       include 'mpif.h'
325       include 'COMMON.SETUP'
326       include 'COMMON.MCM'
327       include 'COMMON.IOUNITS'
328       integer tag,status(MPI_STATUS_SIZE)
329       integer MCM_info_Type,MCM_info_ID,iproc,one,NBytes
330       common /aaaa/ isend,irecv
331       integer nsend
332       save nsend
333       nsend=nsend+1
334       MCM_info_Type=77
335 cd    write (iout,'(a,i4,a)') 'CG Processor',me,
336 cd   & ' is sending MCM info to Master.'
337       write (*,'(a,i4,a,i8)') 'CG processor',me,
338      & ' is sending MCM info to Master, MCM_info_ID=',MCM_info_ID
339       call mpi_isend(ione,1,mpi_integer,MasterID,
340      &               MCM_info_Type,mpi_comm_world, MCM_info_ID, ierr)
341 cd    write (iout,*) 'CG processor',me,' has sent info to the master;',
342 cd   &    ' MCM_info_ID=',MCM_info_ID
343       write (*,*) 'CG processor',me,' has sent info to the master;',
344      &    ' MCM_info_ID=',MCM_info_ID,' ierr ',ierr
345       isend=0
346       irecv=0
347       return
348       end 
349 c----------------------------------------------------------------------------
350       subroutine receive_MCM_info
351       implicit real*8 (a-h,o-z)
352       include 'DIMENSIONS'
353       include 'mpif.h'
354       include 'COMMON.SETUP'
355       include 'COMMON.MCM'
356       include 'COMMON.IOUNITS'
357       integer tag,status(MPI_STATUS_SIZE)
358       integer source,MCM_info_Type,MCM_info_ID,iproc,ione
359       logical flag
360       MCM_info_Type=77
361       source=mpi_any_source
362 c     print *,'source=',source,' dontcare=',dontcare
363       call mpi_iprobe(source,MCM_info_Type,
364      &                mpi_comm_world,flag,status,ierr)
365       do while (flag)
366         source=status(mpi_source)
367         itask=source/fgProcs+1
368 cd      write (iout,*) 'Master is receiving MCM info from processor ',
369 cd   &                 source,' itask',itask
370         write (*,*) 'Master is receiving MCM info from processor ',
371      &                 source,' itask',itask
372         call mpi_irecv(ione,1,mpi_integer,source,MCM_info_type,
373      &                  mpi_comm_world,MCM_info_ID,ierr)
374 cd      write (iout,*) 'Received from processor',source,' IONE=',ione 
375         write (*,*) 'Received from processor',source,' IONE=',ione 
376         nacc_tot=nacc_tot+1
377         if (ione.eq.2) nsave_part(itask)=nsave_part(itask)+1
378 cd      print *,'nsave_part(',itask,')=',nsave_part(itask)
379 cd      write (iout,*) 'Nacc_tot=',Nacc_tot
380 cd      write (*,*) 'Nacc_tot=',Nacc_tot
381         source=mpi_any_source
382               call mpi_iprobe(source,MCM_info_Type,
383      &                mpi_comm_world,flag,status,ierr)
384       enddo
385       return
386       end 
387 c---------------------------------------------------------------------------
388       subroutine send_thread_results
389       implicit real*8 (a-h,o-z)
390       include 'DIMENSIONS'
391       include 'mpif.h'
392       include 'COMMON.SETUP'
393       include 'COMMON.THREAD'
394       include 'COMMON.IOUNITS'
395       integer tag,status(MPI_STATUS_SIZE)
396       integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
397      &   EnerID,msglen,nbytes
398       double precision buffer(20*maxthread+2) 
399       ThreadType=444
400       EnerType=555
401       ipatt(1,nthread+1)=nthread
402       ipatt(2,nthread+1)=nexcl
403       do i=1,nthread
404         do j=1,n_ene
405           ener(j,i+nthread)=ener0(j,i)
406         enddo
407       enddo
408       ener(1,2*nthread+1)=max_time_for_thread
409       ener(2,2*nthread+1)=ave_time_for_thread
410 C Send the IPATT array
411       write (iout,*) 'CG processor',me,
412      & ' is sending IPATT array to master: NTHREAD=',nthread
413       write (*,*) 'CG processor',me,
414      & ' is sending IPATT array to master: NTHREAD=',nthread
415       msglen=2*nthread+2
416       call mpi_send(ipatt(1,1),msglen,MPI_INTEGER,MasterID,
417      & ThreadType,mpi_comm_world,ierror)
418       write (iout,*) 'CG processor',me,
419      & ' has sent IPATT array to master MSGLEN',msglen
420       write (*,*) 'CG processor',me,
421      & ' has sent IPATT array to master MSGLEN',msglen
422 C Send the energies.
423       msglen=n_ene2*nthread+2
424       write (iout,*) 'CG processor',me,' is sending energies to master.'
425       write (*,*) 'CG processor',me,' is sending energies to master.'
426       call mpi_send(ener(1,1),msglen,MPI_DOUBLE_PRECISION,MasterID,
427      & EnerType,mpi_comm_world,ierror)
428       write (iout,*) 'CG processor',me,' has sent energies to master.'
429       write (*,*) 'CG processor',me,' has sent energies to master.'
430       return
431       end
432 c----------------------------------------------------------------------------
433       subroutine receive_thread_results(iproc)
434       implicit real*8 (a-h,o-z)
435       include 'DIMENSIONS'
436       include 'mpif.h'
437       include 'COMMON.INFO'
438       include 'COMMON.THREAD'
439       include 'COMMON.IOUNITS'
440       integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
441      &   EnerID,ReadyType,ReadyID,Ready,msglen,nbytes,nthread_temp
442       double precision buffer(20*maxthread+2),max_time_for_thread_t,
443      & ave_time_for_thread_t
444       logical flag
445       ThreadType=444
446       EnerType=555
447 C Receive the IPATT array
448       call mpi_probe(iproc,ThreadType,
449      &                 mpi_comm_world,status,ierr)
450       call MPI_GET_COUNT(STATUS, MPI_INTEGER, MSGLEN, IERROR)
451       write (iout,*) 'Master is receiving IPATT array from processor:',
452      &    iproc,' MSGLEN',msglen
453       write (*,*) 'Master is receiving IPATT array from processor:',
454      &    iproc,' MSGLEN',msglen
455       call mpi_recv(ipatt(1,nthread+1),msglen,mpi_integer,iproc,
456      & ThreadType,
457      & mpi_comm_world,status,ierror)
458       write (iout,*) 'Master has received IPATT array from processor:',
459      &    iproc,' MSGLEN=',msglen
460       write (*,*) 'Master has received IPATT array from processor:',
461      &    iproc,' MSGLEN=',msglen
462       nthread_temp=ipatt(1,nthread+msglen/2)
463       nexcl_temp=ipatt(2,nthread+msglen/2)
464 C Receive the energies.
465       call mpi_probe(iproc,EnerType,
466      &                 mpi_comm_world,status,ierr)
467       call MPI_GET_COUNT(STATUS, MPI_DOUBLE_PRECISION, MSGLEN, IERROR)
468       write (iout,*) 'Master is receiving energies from processor:',
469      &    iproc,' MSGLEN=',MSGLEN
470       write (*,*) 'Master is receiving energies from processor:',
471      &    iproc,' MSGLEN=',MSGLEN
472       call mpi_recv(ener(1,nthread+1),msglen,
473      & MPI_DOUBLE_PRECISION,iproc,
474      & EnerType,MPI_COMM_WORLD,status,ierror)
475       write (iout,*) 'Msglen=',Msglen
476       write (*,*) 'Msglen=',Msglen
477       write (iout,*) 'Master has received energies from processor',iproc
478       write (*,*) 'Master has received energies from processor',iproc
479       write (iout,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
480       write (*,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
481       do i=1,nthread_temp
482         do j=1,n_ene
483           ener0(j,nthread+i)=ener(j,nthread+nthread_temp+i)
484         enddo
485       enddo
486       max_time_for_thread_t=ener(1,nthread+2*nthread_temp+1)
487       ave_time_for_thread_t=ener(2,nthread+2*nthread_temp+1)
488       write (iout,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
489       write (iout,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
490       write (*,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
491       write (*,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
492       if (max_time_for_thread_t.gt.max_time_for_thread)
493      & max_time_for_thread=max_time_for_thread_t
494       ave_time_for_thread=(nthread*ave_time_for_thread+
495      & nthread_temp*ave_time_for_thread_t)/(nthread+nthread_temp)
496       nthread=nthread+nthread_temp
497       return
498       end
499 #else
500       subroutine init_task
501       implicit real*8 (a-h,o-z)
502       include 'DIMENSIONS'
503       include 'COMMON.SETUP'
504       include 'COMMON.IOUNITS'
505       me=0
506       myrank=0
507       fg_rank=0
508       fg_size=1
509       nodes=1
510       nprocs=1
511       call initialize
512       call openunits
513       write (iout,'(80(1h*)/a/80(1h*))') 
514      & 'United-residue force field calculation - serial job.'
515       return
516       end
517 #endif