added source code
[unres.git] / source / unres / src_MD-M / MP.F
1 #ifdef MPI
2       subroutine init_task
3       implicit real*8 (a-h,o-z)
4       include 'DIMENSIONS'
5       include 'mpif.h'
6       include 'COMMON.SETUP'
7       include 'COMMON.CONTROL'
8       include 'COMMON.IOUNITS'
9       logical lprn /.false./
10 c      real*8 text1 /'group_i '/,text2/'group_f '/,
11 c     & text3/'initialb'/,text4/'initiale'/,
12 c     & text5/'openb'/,text6/'opene'/
13       integer cgtasks(0:max_cg_procs)
14       character*3 cfgprocs 
15       integer cg_size,fg_size,fg_size1
16 c  start parallel processing
17 c      print *,'Initializing MPI'
18       call mpi_init(ierr)
19       if (ierr.ne.0) then
20         print *, ' cannot initialize MPI'
21         stop
22       endif
23 c  determine # of nodes and current node
24       call MPI_Comm_rank( MPI_COMM_WORLD, me, ierr )
25       if (ierr.ne.0) then
26         print *, ' cannot determine rank of all processes'
27         call MPI_Finalize( MPI_COMM_WORLD, IERR )
28         stop
29       endif
30       call MPI_Comm_size( MPI_Comm_world, nodes, ierr )
31       if (ierr.ne.0) then
32         print *, ' cannot determine number of processes'
33         stop
34       endif
35       Nprocs=nodes
36       MyRank=me
37 C Determine the number of "fine-grain" tasks
38       call getenv_loc("FGPROCS",cfgprocs)
39       read (cfgprocs,'(i3)') nfgtasks
40       if (nfgtasks.eq.0) nfgtasks=1
41       call getenv_loc("MAXGSPROCS",cfgprocs)
42       read (cfgprocs,'(i3)') max_gs_size
43       if (max_gs_size.eq.0) max_gs_size=2
44       if (lprn) 
45      &  print *,"Processor",me," nfgtasks",nfgtasks,
46      & " max_gs_size",max_gs_size
47       if (nfgtasks.eq.1) then
48         CG_COMM = MPI_COMM_WORLD
49         fg_size=1
50         fg_rank=0
51         nfgtasks1=1
52         fg_rank1=0
53       else
54         nodes=nprocs/nfgtasks
55         if (nfgtasks*nodes.ne.nprocs) then
56           write (*,'(a)') 'ERROR: Number of processors assigned',
57      &     ' to coarse-grained tasks must be divisor',
58      &     ' of the total number of processors.'
59           call MPI_Finalize( MPI_COMM_WORLD, IERR )
60           stop
61         endif
62 C Put the ranks of coarse-grain processes in one table and create
63 C the respective communicator. The processes with ranks "in between" 
64 C the ranks of CG processes will perform fine graining for the CG
65 C process with the next lower rank.
66         do i=0,nprocs-1,nfgtasks
67           cgtasks(i/nfgtasks)=i
68         enddo
69         if (lprn) then
70         print*,"Processor",me," cgtasks",(cgtasks(i),i=0,nodes-1)
71 c        print "(a,i5,a)","Processor",myrank," Before MPI_Comm_group"
72         endif
73 c        call memmon_print_usage()
74         call MPI_Comm_group(MPI_COMM_WORLD,world_group,IERR)
75         call MPI_Group_incl(world_group,nodes,cgtasks,cg_group,IERR)
76         call MPI_Comm_create(MPI_COMM_WORLD,cg_group,CG_COMM,IERR)
77         call MPI_Group_rank(cg_group,me,ierr)
78         call MPI_Group_free(world_group,ierr)
79         call MPI_Group_free(cg_group,ierr)
80 c        print "(a,i5,a)","Processor",myrank," After MPI_Comm_group"
81 c        call memmon_print_usage()
82         if (me.ne.MPI_UNDEFINED) call MPI_Comm_Rank(CG_COMM,me,ierr)
83         if (lprn) print *," Processor",myrank," CG rank",me
84 C Create communicators containig processes doing "fine grain" tasks. 
85 C The processes within each FG_COMM should have fast communication.
86         kolor=MyRank/nfgtasks
87         key=mod(MyRank,nfgtasks)
88         call MPI_Comm_split(MPI_COMM_WORLD,kolor,key,FG_COMM,ierr)
89         call MPI_Comm_size(FG_COMM,fg_size,ierr)
90         if (fg_size.ne.nfgtasks) then
91           write (*,*) "OOOOps... the number of fg tasks is",fg_size,
92      &      " but",nfgtasks," was requested. MyRank=",MyRank
93         endif
94         call MPI_Comm_rank(FG_COMM,fg_rank,ierr)
95         if (fg_size.gt.max_gs_size) then
96           kolor1=fg_rank/max_gs_size
97           key1=mod(fg_rank,max_gs_size)
98           call MPI_Comm_split(FG_COMM,kolor1,key1,FG_COMM1,ierr)
99           call MPI_Comm_size(FG_COMM1,nfgtasks1,ierr)
100           call MPI_Comm_rank(FG_COMM1,fg_rank1,ierr)
101         else
102           FG_COMM1=FG_COMM
103           nfgtasks1=nfgtasks
104           fg_rank1=fg_rank
105         endif
106       endif
107       if (lprn) then
108       if (fg_rank.eq.0) then
109       write (*,*) "Processor",MyRank," out of",nprocs,
110      & " rank in CG_COMM",me," size of CG_COMM",nodes,
111      & " size of FG_COMM",fg_size,
112      & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
113       else
114       write (*,*) "Processor",MyRank," out of",nprocs,
115      & " rank in FG_COMM",fg_rank," size of FG_COMM",fg_size,
116      & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
117       endif
118       endif
119 C Initialize other variables.
120 c      print '(a)','Before initialize'
121 c      call memmon_print_usage()
122       call initialize
123 c      print '(a,i5,a)','Processor',myrank,' After initialize'
124 c      call memmon_print_usage()
125 C Open task-dependent files.
126 c      print '(a,i5,a)','Processor',myrank,' Before openunits'
127 c      call memmon_print_usage()
128       call openunits
129 c      print '(a,i5,a)','Processor',myrank,' After openunits'
130 c      call memmon_print_usage()
131       if (me.eq.king .or. fg_rank.eq.0 .and. .not. out1file) 
132      &  write (iout,'(80(1h*)/a/80(1h*))') 
133      & 'United-residue force field calculation - parallel job.'
134 c      print *,"Processor",myrank," exited OPENUNITS"
135       return
136       end
137 C-----------------------------------------------------------------------------
138       subroutine finish_task
139       implicit real*8 (a-h,o-z)
140       include 'DIMENSIONS'
141       include 'mpif.h'
142       include 'COMMON.SETUP'
143       include 'COMMON.CONTROL'
144       include 'COMMON.REMD'
145       include 'COMMON.IOUNITS'
146       include 'COMMON.FFIELD'
147       include 'COMMON.TIME1'
148       include 'COMMON.MD'
149       integer ilen
150       external ilen
151 c
152       call MPI_Barrier(CG_COMM,ierr)
153       if (nfgtasks.gt.1) 
154      &    call MPI_Bcast(-1,1,MPI_INTEGER,king,FG_COMM,IERROR)
155       time1=MPI_WTIME()
156 c      if (me.eq.king .or. .not. out1file) then
157        write (iout,'(a,i4,a)') 'CG processor',me,' is finishing work.'
158        write (iout,*) 'Total wall clock time',time1-walltime,' sec'
159        if (nfgtasks.gt.1) then
160          write (iout,'(80(1h=)/a/(80(1h=)))') 
161      &    "Details of FG communication time"
162           write (iout,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))') 
163      &    "BROADCAST:",time_bcast,"REDUCE:",time_reduce,
164      &    "GATHER:",time_gather,
165      &    "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,
166      &    "BARRIER ene",time_barrier_e,
167      &    "BARRIER grad",time_barrier_g,"TOTAL:",
168      &    time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv
169      &    +time_barrier_e+time_barrier_g
170           write (*,*) 'Total wall clock time',time1-walltime,' sec'
171           write (*,*) "Processor",me," BROADCAST time",time_bcast,
172      &      " REDUCE time",
173      &      time_reduce," GATHER time",time_gather," SCATTER time",
174      &      time_scatter," SENDRECV",time_sendrecv,
175      &      " BARRIER ene",time_barrier_e," BARRIER grad",time_barrier_g
176         endif
177 c      endif
178       write (*,'(a,i4,a)') 'CG processor',me,' is finishing work.'
179       if (ilen(tmpdir).gt.0) then
180         write (*,*) "Processor",me,
181      &   ": moving output files to the parent directory..."
182         close(inp)
183         close(istat,status='keep')
184         if (ntwe.gt.0) call move_from_tmp(statname)
185         close(irest2,status='keep')
186         if (modecalc.eq.12.or.
187      &     (modecalc.eq.14 .and. .not.restart1file)) then
188           call move_from_tmp(rest2name) 
189         else if (modecalc.eq.14.and. me.eq.king) then
190           call move_from_tmp(mremd_rst_name)
191         endif
192         if (mdpdb) then
193          close(ipdb,status='keep')
194          call move_from_tmp(pdbname)
195         else if (me.eq.king .or. .not.traj1file) then
196          close(icart,status='keep')
197          call move_from_tmp(cartname)
198         endif
199         if (me.eq.king .or. .not. out1file) then
200           close (iout,status='keep')
201           call move_from_tmp(outname)
202         endif
203       endif
204       return
205       end
206 c-------------------------------------------------------------------------
207       subroutine pattern_receive      
208       implicit real*8 (a-h,o-z)
209       include 'DIMENSIONS'
210       include 'mpif.h'
211       include 'COMMON.SETUP'
212       include 'COMMON.THREAD'
213       include 'COMMON.IOUNITS'
214       integer tag,status(MPI_STATUS_SIZE)
215       integer source,ThreadType
216       logical flag
217       ThreadType=45
218       source=mpi_any_source
219       call mpi_iprobe(source,ThreadType,
220      &                 CG_COMM,flag,status,ierr)
221       do while (flag)
222         write (iout,*) 'Processor ',Me,' is receiving threading',
223      & ' pattern from processor',status(mpi_source)
224         write (*,*) 'Processor ',Me,' is receiving threading',
225      & ' pattern from processor',status(mpi_source)
226         nexcl=nexcl+1
227         call mpi_irecv(iexam(1,nexcl),2,mpi_integer,status(mpi_source),
228      &    ThreadType, CG_COMM,ireq,ierr)
229         write (iout,*) 'Received pattern:',nexcl,iexam(1,nexcl),
230      &    iexam(2,nexcl)
231         source=mpi_any_source
232       call mpi_iprobe(source,ThreadType,               
233      &                 CG_COMM,flag,status,ierr)
234       enddo
235       return
236       end
237 c----------------------------------------------------------------------------
238       subroutine pattern_send
239       implicit real*8 (a-h,o-z)
240       include 'DIMENSIONS'
241       include 'mpif.h'
242       include 'COMMON.INFO'
243       include 'COMMON.THREAD'
244       include 'COMMON.IOUNITS'
245       integer source,ThreadType,ireq
246       ThreadType=45 
247       do iproc=0,nprocs-1
248         if (iproc.ne.me .and. .not.Koniec(iproc) ) then
249           call mpi_isend(iexam(1,nexcl),2,mpi_integer,iproc,
250      &                  ThreadType, CG_COMM, ireq, ierr)
251           write (iout,*) 'CG processor ',me,' has sent pattern ',
252      &    'to processor',iproc
253           write (*,*) 'CG processor ',me,' has sent pattern ',
254      &    'to processor',iproc
255           write (iout,*) 'Pattern:',nexcl,iexam(1,nexcl),iexam(2,nexcl)
256         endif
257       enddo
258       return
259       end
260 c-----------------------------------------------------------------------------
261       subroutine send_stop_sig(Kwita)
262       implicit real*8 (a-h,o-z)
263       include 'DIMENSIONS'
264       include 'mpif.h'
265       include 'COMMON.INFO'
266       include 'COMMON.IOUNITS'
267       integer StopType,StopId,iproc,Kwita,NBytes
268       StopType=66
269 c     Kwita=-1
270 C     print *,'CG processor',me,' StopType=',StopType
271       Koniec(me)=.true.
272       if (me.eq.king) then
273 C Master sends the STOP signal to everybody.
274         write (iout,'(a,a)') 
275      &   'Master is sending STOP signal to other processors.'
276         do iproc=1,nprocs-1
277           print *,'Koniec(',iproc,')=',Koniec(iproc)
278           if (.not. Koniec(iproc)) then
279             call mpi_send(Kwita,1,mpi_integer,iproc,StopType,
280      &          mpi_comm_world,ierr)
281             write (iout,*) 'Iproc=',iproc,' StopID=',StopID
282             write (*,*) 'Iproc=',iproc,' StopID=',StopID
283           endif
284         enddo
285       else
286 C Else send the STOP signal to Master.
287         call mpi_send(Kwita,1,mpi_integer,MasterID,StopType,
288      &          mpi_comm_world,ierr)
289         write (iout,*) 'CG processor=',me,' StopID=',StopID
290         write (*,*) 'CG processor=',me,' StopID=',StopID
291       endif
292       return
293       end 
294 c-----------------------------------------------------------------------------
295       subroutine recv_stop_sig(Kwita)
296       implicit real*8 (a-h,o-z)
297       include 'DIMENSIONS' 
298       include 'mpif.h'
299       include 'COMMON.INFO'
300       include 'COMMON.IOUNITS'
301       integer source,StopType,StopId,iproc,Kwita
302       logical flag
303       StopType=66
304       Kwita=0
305       source=mpi_any_source
306 c     print *,'CG processor:',me,' StopType=',StopType
307       call mpi_iprobe(source,StopType,
308      &                 mpi_comm_world,flag,status,ierr)
309       do while (flag)
310         Koniec(status(mpi_source))=.true.
311         write (iout,*) 'CG processor ',me,' is receiving STOP signal',
312      & ' from processor',status(mpi_source)
313         write (*,*) 'CG processor ',me,' is receiving STOP signal',
314      & ' from processor',status(mpi_source)
315         call mpi_irecv(Kwita,1,mpi_integer,status(mpi_source),StopType,
316      &           mpi_comm_world,ireq,ierr)
317         call mpi_iprobe(source,StopType,
318      &                 mpi_comm_world,flag,status,ierr)
319       enddo       
320       return
321       end
322 c-----------------------------------------------------------------------------
323       subroutine send_MCM_info(ione)
324       implicit real*8 (a-h,o-z)
325       include 'DIMENSIONS'
326       include 'mpif.h'
327       include 'COMMON.SETUP'
328       include 'COMMON.MCM'
329       include 'COMMON.IOUNITS'
330       integer tag,status(MPI_STATUS_SIZE)
331       integer MCM_info_Type,MCM_info_ID,iproc,one,NBytes
332       common /aaaa/ isend,irecv
333       integer nsend
334       save nsend
335       nsend=nsend+1
336       MCM_info_Type=77
337 cd    write (iout,'(a,i4,a)') 'CG Processor',me,
338 cd   & ' is sending MCM info to Master.'
339       write (*,'(a,i4,a,i8)') 'CG processor',me,
340      & ' is sending MCM info to Master, MCM_info_ID=',MCM_info_ID
341       call mpi_isend(ione,1,mpi_integer,MasterID,
342      &               MCM_info_Type,mpi_comm_world, MCM_info_ID, ierr)
343 cd    write (iout,*) 'CG processor',me,' has sent info to the master;',
344 cd   &    ' MCM_info_ID=',MCM_info_ID
345       write (*,*) 'CG processor',me,' has sent info to the master;',
346      &    ' MCM_info_ID=',MCM_info_ID,' ierr ',ierr
347       isend=0
348       irecv=0
349       return
350       end 
351 c----------------------------------------------------------------------------
352       subroutine receive_MCM_info
353       implicit real*8 (a-h,o-z)
354       include 'DIMENSIONS'
355       include 'mpif.h'
356       include 'COMMON.SETUP'
357       include 'COMMON.MCM'
358       include 'COMMON.IOUNITS'
359       integer tag,status(MPI_STATUS_SIZE)
360       integer source,MCM_info_Type,MCM_info_ID,iproc,ione
361       logical flag
362       MCM_info_Type=77
363       source=mpi_any_source
364 c     print *,'source=',source,' dontcare=',dontcare
365       call mpi_iprobe(source,MCM_info_Type,
366      &                mpi_comm_world,flag,status,ierr)
367       do while (flag)
368         source=status(mpi_source)
369         itask=source/fgProcs+1
370 cd      write (iout,*) 'Master is receiving MCM info from processor ',
371 cd   &                 source,' itask',itask
372         write (*,*) 'Master is receiving MCM info from processor ',
373      &                 source,' itask',itask
374         call mpi_irecv(ione,1,mpi_integer,source,MCM_info_type,
375      &                  mpi_comm_world,MCM_info_ID,ierr)
376 cd      write (iout,*) 'Received from processor',source,' IONE=',ione 
377         write (*,*) 'Received from processor',source,' IONE=',ione 
378         nacc_tot=nacc_tot+1
379         if (ione.eq.2) nsave_part(itask)=nsave_part(itask)+1
380 cd      print *,'nsave_part(',itask,')=',nsave_part(itask)
381 cd      write (iout,*) 'Nacc_tot=',Nacc_tot
382 cd      write (*,*) 'Nacc_tot=',Nacc_tot
383         source=mpi_any_source
384               call mpi_iprobe(source,MCM_info_Type,
385      &                mpi_comm_world,flag,status,ierr)
386       enddo
387       return
388       end 
389 c---------------------------------------------------------------------------
390       subroutine send_thread_results
391       implicit real*8 (a-h,o-z)
392       include 'DIMENSIONS'
393       include 'mpif.h'
394       include 'COMMON.SETUP'
395       include 'COMMON.THREAD'
396       include 'COMMON.IOUNITS'
397       integer tag,status(MPI_STATUS_SIZE)
398       integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
399      &   EnerID,msglen,nbytes
400       double precision buffer(20*maxthread+2) 
401       ThreadType=444
402       EnerType=555
403       ipatt(1,nthread+1)=nthread
404       ipatt(2,nthread+1)=nexcl
405       do i=1,nthread
406         do j=1,n_ene
407           ener(j,i+nthread)=ener0(j,i)
408         enddo
409       enddo
410       ener(1,2*nthread+1)=max_time_for_thread
411       ener(2,2*nthread+1)=ave_time_for_thread
412 C Send the IPATT array
413       write (iout,*) 'CG processor',me,
414      & ' is sending IPATT array to master: NTHREAD=',nthread
415       write (*,*) 'CG processor',me,
416      & ' is sending IPATT array to master: NTHREAD=',nthread
417       msglen=2*nthread+2
418       call mpi_send(ipatt(1,1),msglen,MPI_INTEGER,MasterID,
419      & ThreadType,mpi_comm_world,ierror)
420       write (iout,*) 'CG processor',me,
421      & ' has sent IPATT array to master MSGLEN',msglen
422       write (*,*) 'CG processor',me,
423      & ' has sent IPATT array to master MSGLEN',msglen
424 C Send the energies.
425       msglen=n_ene2*nthread+2
426       write (iout,*) 'CG processor',me,' is sending energies to master.'
427       write (*,*) 'CG processor',me,' is sending energies to master.'
428       call mpi_send(ener(1,1),msglen,MPI_DOUBLE_PRECISION,MasterID,
429      & EnerType,mpi_comm_world,ierror)
430       write (iout,*) 'CG processor',me,' has sent energies to master.'
431       write (*,*) 'CG processor',me,' has sent energies to master.'
432       return
433       end
434 c----------------------------------------------------------------------------
435       subroutine receive_thread_results(iproc)
436       implicit real*8 (a-h,o-z)
437       include 'DIMENSIONS'
438       include 'mpif.h'
439       include 'COMMON.INFO'
440       include 'COMMON.THREAD'
441       include 'COMMON.IOUNITS'
442       integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
443      &   EnerID,ReadyType,ReadyID,Ready,msglen,nbytes,nthread_temp
444       double precision buffer(20*maxthread+2),max_time_for_thread_t,
445      & ave_time_for_thread_t
446       logical flag
447       ThreadType=444
448       EnerType=555
449 C Receive the IPATT array
450       call mpi_probe(iproc,ThreadType,
451      &                 mpi_comm_world,status,ierr)
452       call MPI_GET_COUNT(STATUS, MPI_INTEGER, MSGLEN, IERROR)
453       write (iout,*) 'Master is receiving IPATT array from processor:',
454      &    iproc,' MSGLEN',msglen
455       write (*,*) 'Master is receiving IPATT array from processor:',
456      &    iproc,' MSGLEN',msglen
457       call mpi_recv(ipatt(1,nthread+1),msglen,mpi_integer,iproc,
458      & ThreadType,
459      & mpi_comm_world,status,ierror)
460       write (iout,*) 'Master has received IPATT array from processor:',
461      &    iproc,' MSGLEN=',msglen
462       write (*,*) 'Master has received IPATT array from processor:',
463      &    iproc,' MSGLEN=',msglen
464       nthread_temp=ipatt(1,nthread+msglen/2)
465       nexcl_temp=ipatt(2,nthread+msglen/2)
466 C Receive the energies.
467       call mpi_probe(iproc,EnerType,
468      &                 mpi_comm_world,status,ierr)
469       call MPI_GET_COUNT(STATUS, MPI_DOUBLE_PRECISION, MSGLEN, IERROR)
470       write (iout,*) 'Master is receiving energies from processor:',
471      &    iproc,' MSGLEN=',MSGLEN
472       write (*,*) 'Master is receiving energies from processor:',
473      &    iproc,' MSGLEN=',MSGLEN
474       call mpi_recv(ener(1,nthread+1),msglen,
475      & MPI_DOUBLE_PRECISION,iproc,
476      & EnerType,MPI_COMM_WORLD,status,ierror)
477       write (iout,*) 'Msglen=',Msglen
478       write (*,*) 'Msglen=',Msglen
479       write (iout,*) 'Master has received energies from processor',iproc
480       write (*,*) 'Master has received energies from processor',iproc
481       write (iout,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
482       write (*,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
483       do i=1,nthread_temp
484         do j=1,n_ene
485           ener0(j,nthread+i)=ener(j,nthread+nthread_temp+i)
486         enddo
487       enddo
488       max_time_for_thread_t=ener(1,nthread+2*nthread_temp+1)
489       ave_time_for_thread_t=ener(2,nthread+2*nthread_temp+1)
490       write (iout,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
491       write (iout,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
492       write (*,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
493       write (*,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
494       if (max_time_for_thread_t.gt.max_time_for_thread)
495      & max_time_for_thread=max_time_for_thread_t
496       ave_time_for_thread=(nthread*ave_time_for_thread+
497      & nthread_temp*ave_time_for_thread_t)/(nthread+nthread_temp)
498       nthread=nthread+nthread_temp
499       return
500       end
501 #else
502       subroutine init_task
503       implicit real*8 (a-h,o-z)
504       include 'DIMENSIONS'
505       include 'COMMON.SETUP'
506       me=0
507       myrank=0
508       fg_rank=0
509       fg_size=1
510       nodes=1
511       nprocs=1
512       call initialize
513       call openunits
514       write (iout,'(80(1h*)/a/80(1h*))') 
515      & 'United-residue force field calculation - serial job.'
516       return
517       end
518 #endif