debe2b1064ee94439ef12da2638fd5aa0e2e87ea
[unres.git] / source / unres / src-HCD-5D / MP.F
1 #ifdef MPI
2       subroutine init_task
3       implicit real*8 (a-h,o-z)
4       include 'DIMENSIONS'
5       include 'mpif.h'
6       include 'COMMON.SETUP'
7       include 'COMMON.CONTROL'
8       include 'COMMON.IOUNITS'
9       logical lprn /.false./
10 c      real*8 text1 /'group_i '/,text2/'group_f '/,
11 c     & text3/'initialb'/,text4/'initiale'/,
12 c     & text5/'openb'/,text6/'opene'/
13       integer cgtasks(0:max_cg_procs)
14       character*3 cfgprocs /"  1"/
15       integer cg_size,fg_size,fg_size1
16 c  start parallel processing
17 c      print *,'Initializing MPI'
18       call mpi_init(ierr)
19       if (ierr.ne.0) then
20         print *, ' cannot initialize MPI'
21         stop
22       endif
23 c  determine # of nodes and current node
24       call MPI_Comm_rank( MPI_COMM_WORLD, me, ierr )
25       if (ierr.ne.0) then
26         print *, ' cannot determine rank of all processes'
27         call MPI_Finalize( MPI_COMM_WORLD, IERR )
28         stop
29       endif
30       call MPI_Comm_size( MPI_Comm_world, nodes, ierr )
31       if (ierr.ne.0) then
32         print *, ' cannot determine number of processes'
33         stop
34       endif
35       Nprocs=nodes
36       MyRank=me
37 C Determine the number of "fine-grain" tasks
38       call getenv_loc("FGPROCS",cfgprocs)
39       print *,cfgprocs
40       read (cfgprocs,'(i3)') nfgtasks
41       if (nfgtasks.eq.0) nfgtasks=1
42       call getenv_loc("MAXGSPROCS",cfgprocs)
43       print *,cfgprocs
44       read (cfgprocs,'(i3)') max_gs_size
45       if (max_gs_size.eq.0) max_gs_size=2
46       if (lprn) 
47      &  print *,"Processor",me," nfgtasks",nfgtasks,
48      & " max_gs_size",max_gs_size
49       if (nfgtasks.eq.1) then
50         CG_COMM = MPI_COMM_WORLD
51         fg_size=1
52         fg_rank=0
53         nfgtasks1=1
54         fg_rank1=0
55       else
56         nodes=nprocs/nfgtasks
57         if (nfgtasks*nodes.ne.nprocs) then
58           write (*,'(a)') 'ERROR: Number of processors assigned',
59      &     ' to coarse-grained tasks must be divisor',
60      &     ' of the total number of processors.'
61           call MPI_Finalize( MPI_COMM_WORLD, IERR )
62           stop
63         endif
64 C Put the ranks of coarse-grain processes in one table and create
65 C the respective communicator. The processes with ranks "in between" 
66 C the ranks of CG processes will perform fine graining for the CG
67 C process with the next lower rank.
68         do i=0,nprocs-1,nfgtasks
69           cgtasks(i/nfgtasks)=i
70         enddo
71         if (lprn) then
72         print*,"Processor",me," cgtasks",(cgtasks(i),i=0,nodes-1)
73 c        print "(a,i5,a)","Processor",myrank," Before MPI_Comm_group"
74         endif
75 c        call memmon_print_usage()
76         call MPI_Comm_group(MPI_COMM_WORLD,world_group,IERR)
77         call MPI_Group_incl(world_group,nodes,cgtasks,cg_group,IERR)
78         call MPI_Comm_create(MPI_COMM_WORLD,cg_group,CG_COMM,IERR)
79         call MPI_Group_rank(cg_group,me,ierr)
80         call MPI_Group_free(world_group,ierr)
81         call MPI_Group_free(cg_group,ierr)
82 c        print "(a,i5,a)","Processor",myrank," After MPI_Comm_group"
83 c        call memmon_print_usage()
84         if (me.ne.MPI_UNDEFINED) call MPI_Comm_Rank(CG_COMM,me,ierr)
85         if (lprn) print *," Processor",myrank," CG rank",me
86 C Create communicators containig processes doing "fine grain" tasks. 
87 C The processes within each FG_COMM should have fast communication.
88         kolor=MyRank/nfgtasks
89         key=mod(MyRank,nfgtasks)
90         call MPI_Comm_split(MPI_COMM_WORLD,kolor,key,FG_COMM,ierr)
91         call MPI_Comm_size(FG_COMM,fg_size,ierr)
92         if (fg_size.ne.nfgtasks) then
93           write (*,*) "OOOOps... the number of fg tasks is",fg_size,
94      &      " but",nfgtasks," was requested. MyRank=",MyRank
95         endif
96         call MPI_Comm_rank(FG_COMM,fg_rank,ierr)
97         if (fg_size.gt.max_gs_size) then
98           kolor1=fg_rank/max_gs_size
99           key1=mod(fg_rank,max_gs_size)
100           call MPI_Comm_split(FG_COMM,kolor1,key1,FG_COMM1,ierr)
101           call MPI_Comm_size(FG_COMM1,nfgtasks1,ierr)
102           call MPI_Comm_rank(FG_COMM1,fg_rank1,ierr)
103         else
104           FG_COMM1=FG_COMM
105           nfgtasks1=nfgtasks
106           fg_rank1=fg_rank
107         endif
108       endif
109       if (lprn) then
110       if (fg_rank.eq.0) then
111       write (*,*) "Processor",MyRank," out of",nprocs,
112      & " rank in CG_COMM",me," size of CG_COMM",nodes,
113      & " size of FG_COMM",fg_size,
114      & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
115       else
116       write (*,*) "Processor",MyRank," out of",nprocs,
117      & " rank in FG_COMM",fg_rank," size of FG_COMM",fg_size,
118      & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
119       endif
120       endif
121 C Initialize other variables.
122 c      print '(a)','Before initialize'
123 c      call memmon_print_usage()
124       call initialize
125 c      print '(a,i5,a)','Processor',myrank,' After initialize'
126 c      call memmon_print_usage()
127 C Open task-dependent files.
128 c      print '(a,i5,a)','Processor',myrank,' Before openunits'
129 c      call memmon_print_usage()
130       call openunits
131 c      print '(a,i5,a)','Processor',myrank,' After openunits'
132 c      call memmon_print_usage()
133       if (me.eq.king .or. fg_rank.eq.0 .and. .not. out1file) 
134      &  write (iout,'(80(1h*)/a/80(1h*))') 
135      & 'United-residue force field calculation - parallel job.'
136 c      print *,"Processor",myrank," exited OPENUNITS"
137       return
138       end
139 C-----------------------------------------------------------------------------
140       subroutine finish_task
141       implicit real*8 (a-h,o-z)
142       include 'DIMENSIONS'
143       include 'mpif.h'
144       include 'COMMON.SETUP'
145       include 'COMMON.CONTROL'
146       include 'COMMON.REMD'
147       include 'COMMON.IOUNITS'
148       include 'COMMON.FFIELD'
149       include 'COMMON.TIME1'
150       include 'COMMON.MD'
151       integer ilen
152       external ilen
153 c
154       call MPI_Barrier(CG_COMM,ierr)
155       if (nfgtasks.gt.1) 
156      &    call MPI_Bcast(-1,1,MPI_INTEGER,king,FG_COMM,IERROR)
157       time1=MPI_WTIME()
158 c      if (me.eq.king .or. .not. out1file) then
159        write (iout,'(a,i4,a)') 'CG processor',me,' is finishing work.'
160        write (iout,*) 'Total wall clock time',time1-walltime,' sec'
161        if (nfgtasks.gt.1) then
162          write (iout,'(80(1h=)/a/(80(1h=)))') 
163      &    "Details of FG communication time"
164           write (iout,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))') 
165      &    "BROADCAST:",time_bcast,"REDUCE:",time_reduce,
166      &    "GATHER:",time_gather,
167      &    "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,
168      &    "BARRIER ene",time_barrier_e,
169      &    "BARRIER grad",time_barrier_g,"TOTAL:",
170      &    time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv
171      &    +time_barrier_e+time_barrier_g
172           write (*,*) 'Total wall clock time',time1-walltime,' sec'
173           write (*,*) "Processor",me," BROADCAST time",time_bcast,
174      &      " REDUCE time",
175      &      time_reduce," GATHER time",time_gather," SCATTER time",
176      &      time_scatter," SENDRECV",time_sendrecv,
177      &      " BARRIER ene",time_barrier_e," BARRIER grad",time_barrier_g
178         endif
179 c      endif
180       write (*,'(a,i4,a)') 'CG processor',me,' is finishing work.'
181       if (ilen(tmpdir).gt.0) then
182         write (*,*) "Processor",me,
183      &   ": moving output files to the parent directory..."
184         close(inp)
185         close(istat,status='keep')
186         if (ntwe.gt.0) call move_from_tmp(statname)
187         close(irest2,status='keep')
188         if (modecalc.eq.12.or.
189      &     (modecalc.eq.14 .and. .not.restart1file)) then
190           call move_from_tmp(rest2name) 
191         else if (modecalc.eq.14.and. me.eq.king) then
192           call move_from_tmp(mremd_rst_name)
193         endif
194         if (mdpdb) then
195          close(ipdb,status='keep')
196          call move_from_tmp(pdbname)
197         else if (me.eq.king .or. .not.traj1file) then
198          close(icart,status='keep')
199          call move_from_tmp(cartname)
200         endif
201         if (me.eq.king .or. .not. out1file) then
202           close (iout,status='keep')
203           call move_from_tmp(outname)
204         endif
205       endif
206       return
207       end
208 c-------------------------------------------------------------------------
209       subroutine pattern_receive      
210       implicit real*8 (a-h,o-z)
211       include 'DIMENSIONS'
212       include 'mpif.h'
213       include 'COMMON.SETUP'
214       include 'COMMON.THREAD'
215       include 'COMMON.IOUNITS'
216       integer tag,status(MPI_STATUS_SIZE)
217       integer source,ThreadType
218       logical flag
219       ThreadType=45
220       source=mpi_any_source
221       call mpi_iprobe(source,ThreadType,
222      &                 CG_COMM,flag,status,ierr)
223       do while (flag)
224         write (iout,*) 'Processor ',Me,' is receiving threading',
225      & ' pattern from processor',status(mpi_source)
226         write (*,*) 'Processor ',Me,' is receiving threading',
227      & ' pattern from processor',status(mpi_source)
228         nexcl=nexcl+1
229         call mpi_irecv(iexam(1,nexcl),2,mpi_integer,status(mpi_source),
230      &    ThreadType, CG_COMM,ireq,ierr)
231         write (iout,*) 'Received pattern:',nexcl,iexam(1,nexcl),
232      &    iexam(2,nexcl)
233         source=mpi_any_source
234       call mpi_iprobe(source,ThreadType,               
235      &                 CG_COMM,flag,status,ierr)
236       enddo
237       return
238       end
239 c----------------------------------------------------------------------------
240       subroutine pattern_send
241       implicit real*8 (a-h,o-z)
242       include 'DIMENSIONS'
243       include 'mpif.h'
244       include 'COMMON.INFO'
245       include 'COMMON.THREAD'
246       include 'COMMON.IOUNITS'
247       integer source,ThreadType,ireq
248       ThreadType=45 
249       do iproc=0,nprocs-1
250         if (iproc.ne.me .and. .not.Koniec(iproc) ) then
251           call mpi_isend(iexam(1,nexcl),2,mpi_integer,iproc,
252      &                  ThreadType, CG_COMM, ireq, ierr)
253           write (iout,*) 'CG processor ',me,' has sent pattern ',
254      &    'to processor',iproc
255           write (*,*) 'CG processor ',me,' has sent pattern ',
256      &    'to processor',iproc
257           write (iout,*) 'Pattern:',nexcl,iexam(1,nexcl),iexam(2,nexcl)
258         endif
259       enddo
260       return
261       end
262 c-----------------------------------------------------------------------------
263       subroutine send_stop_sig(Kwita)
264       implicit real*8 (a-h,o-z)
265       include 'DIMENSIONS'
266       include 'mpif.h'
267       include 'COMMON.INFO'
268       include 'COMMON.IOUNITS'
269       integer StopType,StopId,iproc,Kwita,NBytes
270       StopType=66
271 c     Kwita=-1
272 C     print *,'CG processor',me,' StopType=',StopType
273       Koniec(me)=.true.
274       if (me.eq.king) then
275 C Master sends the STOP signal to everybody.
276         write (iout,'(a,a)') 
277      &   'Master is sending STOP signal to other processors.'
278         do iproc=1,nprocs-1
279           print *,'Koniec(',iproc,')=',Koniec(iproc)
280           if (.not. Koniec(iproc)) then
281             call mpi_send(Kwita,1,mpi_integer,iproc,StopType,
282      &          mpi_comm_world,ierr)
283             write (iout,*) 'Iproc=',iproc,' StopID=',StopID
284             write (*,*) 'Iproc=',iproc,' StopID=',StopID
285           endif
286         enddo
287       else
288 C Else send the STOP signal to Master.
289         call mpi_send(Kwita,1,mpi_integer,MasterID,StopType,
290      &          mpi_comm_world,ierr)
291         write (iout,*) 'CG processor=',me,' StopID=',StopID
292         write (*,*) 'CG processor=',me,' StopID=',StopID
293       endif
294       return
295       end 
296 c-----------------------------------------------------------------------------
297       subroutine recv_stop_sig(Kwita)
298       implicit real*8 (a-h,o-z)
299       include 'DIMENSIONS' 
300       include 'mpif.h'
301       include 'COMMON.INFO'
302       include 'COMMON.IOUNITS'
303       integer source,StopType,StopId,iproc,Kwita
304       logical flag
305       StopType=66
306       Kwita=0
307       source=mpi_any_source
308 c     print *,'CG processor:',me,' StopType=',StopType
309       call mpi_iprobe(source,StopType,
310      &                 mpi_comm_world,flag,status,ierr)
311       do while (flag)
312         Koniec(status(mpi_source))=.true.
313         write (iout,*) 'CG processor ',me,' is receiving STOP signal',
314      & ' from processor',status(mpi_source)
315         write (*,*) 'CG processor ',me,' is receiving STOP signal',
316      & ' from processor',status(mpi_source)
317         call mpi_irecv(Kwita,1,mpi_integer,status(mpi_source),StopType,
318      &           mpi_comm_world,ireq,ierr)
319         call mpi_iprobe(source,StopType,
320      &                 mpi_comm_world,flag,status,ierr)
321       enddo       
322       return
323       end
324 c-----------------------------------------------------------------------------
325       subroutine send_MCM_info(ione)
326       implicit real*8 (a-h,o-z)
327       include 'DIMENSIONS'
328       include 'mpif.h'
329       include 'COMMON.SETUP'
330       include 'COMMON.MCM'
331       include 'COMMON.IOUNITS'
332       integer tag,status(MPI_STATUS_SIZE)
333       integer MCM_info_Type,MCM_info_ID,iproc,one,NBytes
334       common /aaaa/ isend,irecv
335       integer nsend
336       save nsend
337       nsend=nsend+1
338       MCM_info_Type=77
339 cd    write (iout,'(a,i4,a)') 'CG Processor',me,
340 cd   & ' is sending MCM info to Master.'
341       write (*,'(a,i4,a,i8)') 'CG processor',me,
342      & ' is sending MCM info to Master, MCM_info_ID=',MCM_info_ID
343       call mpi_isend(ione,1,mpi_integer,MasterID,
344      &               MCM_info_Type,mpi_comm_world, MCM_info_ID, ierr)
345 cd    write (iout,*) 'CG processor',me,' has sent info to the master;',
346 cd   &    ' MCM_info_ID=',MCM_info_ID
347       write (*,*) 'CG processor',me,' has sent info to the master;',
348      &    ' MCM_info_ID=',MCM_info_ID,' ierr ',ierr
349       isend=0
350       irecv=0
351       return
352       end 
353 c----------------------------------------------------------------------------
354       subroutine receive_MCM_info
355       implicit real*8 (a-h,o-z)
356       include 'DIMENSIONS'
357       include 'mpif.h'
358       include 'COMMON.SETUP'
359       include 'COMMON.MCM'
360       include 'COMMON.IOUNITS'
361       integer tag,status(MPI_STATUS_SIZE)
362       integer source,MCM_info_Type,MCM_info_ID,iproc,ione
363       logical flag
364       MCM_info_Type=77
365       source=mpi_any_source
366 c     print *,'source=',source,' dontcare=',dontcare
367       call mpi_iprobe(source,MCM_info_Type,
368      &                mpi_comm_world,flag,status,ierr)
369       do while (flag)
370         source=status(mpi_source)
371         itask=source/fgProcs+1
372 cd      write (iout,*) 'Master is receiving MCM info from processor ',
373 cd   &                 source,' itask',itask
374         write (*,*) 'Master is receiving MCM info from processor ',
375      &                 source,' itask',itask
376         call mpi_irecv(ione,1,mpi_integer,source,MCM_info_type,
377      &                  mpi_comm_world,MCM_info_ID,ierr)
378 cd      write (iout,*) 'Received from processor',source,' IONE=',ione 
379         write (*,*) 'Received from processor',source,' IONE=',ione 
380         nacc_tot=nacc_tot+1
381         if (ione.eq.2) nsave_part(itask)=nsave_part(itask)+1
382 cd      print *,'nsave_part(',itask,')=',nsave_part(itask)
383 cd      write (iout,*) 'Nacc_tot=',Nacc_tot
384 cd      write (*,*) 'Nacc_tot=',Nacc_tot
385         source=mpi_any_source
386               call mpi_iprobe(source,MCM_info_Type,
387      &                mpi_comm_world,flag,status,ierr)
388       enddo
389       return
390       end 
391 c---------------------------------------------------------------------------
392       subroutine send_thread_results
393       implicit real*8 (a-h,o-z)
394       include 'DIMENSIONS'
395       include 'mpif.h'
396       include 'COMMON.SETUP'
397       include 'COMMON.THREAD'
398       include 'COMMON.IOUNITS'
399       integer tag,status(MPI_STATUS_SIZE)
400       integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
401      &   EnerID,msglen,nbytes
402       double precision buffer(20*maxthread+2) 
403       ThreadType=444
404       EnerType=555
405       ipatt(1,nthread+1)=nthread
406       ipatt(2,nthread+1)=nexcl
407       do i=1,nthread
408         do j=1,n_ene
409           ener(j,i+nthread)=ener0(j,i)
410         enddo
411       enddo
412       ener(1,2*nthread+1)=max_time_for_thread
413       ener(2,2*nthread+1)=ave_time_for_thread
414 C Send the IPATT array
415       write (iout,*) 'CG processor',me,
416      & ' is sending IPATT array to master: NTHREAD=',nthread
417       write (*,*) 'CG processor',me,
418      & ' is sending IPATT array to master: NTHREAD=',nthread
419       msglen=2*nthread+2
420       call mpi_send(ipatt(1,1),msglen,MPI_INTEGER,MasterID,
421      & ThreadType,mpi_comm_world,ierror)
422       write (iout,*) 'CG processor',me,
423      & ' has sent IPATT array to master MSGLEN',msglen
424       write (*,*) 'CG processor',me,
425      & ' has sent IPATT array to master MSGLEN',msglen
426 C Send the energies.
427       msglen=n_ene2*nthread+2
428       write (iout,*) 'CG processor',me,' is sending energies to master.'
429       write (*,*) 'CG processor',me,' is sending energies to master.'
430       call mpi_send(ener(1,1),msglen,MPI_DOUBLE_PRECISION,MasterID,
431      & EnerType,mpi_comm_world,ierror)
432       write (iout,*) 'CG processor',me,' has sent energies to master.'
433       write (*,*) 'CG processor',me,' has sent energies to master.'
434       return
435       end
436 c----------------------------------------------------------------------------
437       subroutine receive_thread_results(iproc)
438       implicit real*8 (a-h,o-z)
439       include 'DIMENSIONS'
440       include 'mpif.h'
441       include 'COMMON.INFO'
442       include 'COMMON.THREAD'
443       include 'COMMON.IOUNITS'
444       integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
445      &   EnerID,ReadyType,ReadyID,Ready,msglen,nbytes,nthread_temp
446       double precision buffer(20*maxthread+2),max_time_for_thread_t,
447      & ave_time_for_thread_t
448       logical flag
449       ThreadType=444
450       EnerType=555
451 C Receive the IPATT array
452       call mpi_probe(iproc,ThreadType,
453      &                 mpi_comm_world,status,ierr)
454       call MPI_GET_COUNT(STATUS, MPI_INTEGER, MSGLEN, IERROR)
455       write (iout,*) 'Master is receiving IPATT array from processor:',
456      &    iproc,' MSGLEN',msglen
457       write (*,*) 'Master is receiving IPATT array from processor:',
458      &    iproc,' MSGLEN',msglen
459       call mpi_recv(ipatt(1,nthread+1),msglen,mpi_integer,iproc,
460      & ThreadType,
461      & mpi_comm_world,status,ierror)
462       write (iout,*) 'Master has received IPATT array from processor:',
463      &    iproc,' MSGLEN=',msglen
464       write (*,*) 'Master has received IPATT array from processor:',
465      &    iproc,' MSGLEN=',msglen
466       nthread_temp=ipatt(1,nthread+msglen/2)
467       nexcl_temp=ipatt(2,nthread+msglen/2)
468 C Receive the energies.
469       call mpi_probe(iproc,EnerType,
470      &                 mpi_comm_world,status,ierr)
471       call MPI_GET_COUNT(STATUS, MPI_DOUBLE_PRECISION, MSGLEN, IERROR)
472       write (iout,*) 'Master is receiving energies from processor:',
473      &    iproc,' MSGLEN=',MSGLEN
474       write (*,*) 'Master is receiving energies from processor:',
475      &    iproc,' MSGLEN=',MSGLEN
476       call mpi_recv(ener(1,nthread+1),msglen,
477      & MPI_DOUBLE_PRECISION,iproc,
478      & EnerType,MPI_COMM_WORLD,status,ierror)
479       write (iout,*) 'Msglen=',Msglen
480       write (*,*) 'Msglen=',Msglen
481       write (iout,*) 'Master has received energies from processor',iproc
482       write (*,*) 'Master has received energies from processor',iproc
483       write (iout,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
484       write (*,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
485       do i=1,nthread_temp
486         do j=1,n_ene
487           ener0(j,nthread+i)=ener(j,nthread+nthread_temp+i)
488         enddo
489       enddo
490       max_time_for_thread_t=ener(1,nthread+2*nthread_temp+1)
491       ave_time_for_thread_t=ener(2,nthread+2*nthread_temp+1)
492       write (iout,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
493       write (iout,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
494       write (*,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
495       write (*,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
496       if (max_time_for_thread_t.gt.max_time_for_thread)
497      & max_time_for_thread=max_time_for_thread_t
498       ave_time_for_thread=(nthread*ave_time_for_thread+
499      & nthread_temp*ave_time_for_thread_t)/(nthread+nthread_temp)
500       nthread=nthread+nthread_temp
501       return
502       end
503 #else
504       subroutine init_task
505       implicit real*8 (a-h,o-z)
506       include 'DIMENSIONS'
507       include 'COMMON.SETUP'
508       me=0
509       myrank=0
510       fg_rank=0
511       fg_size=1
512       nodes=1
513       nprocs=1
514       call initialize
515       call openunits
516       write (iout,'(80(1h*)/a/80(1h*))') 
517      & 'United-residue force field calculation - serial job.'
518       return
519       end
520 #endif