restoring read2sigma code after wrong merge
[unres.git] / source / unres / src_MD / MP.F
1 #ifdef MPI
2       subroutine init_task
3       implicit real*8 (a-h,o-z)
4       include 'DIMENSIONS'
5       include 'mpif.h'
6       include 'COMMON.SETUP'
7       include 'COMMON.CONTROL'
8       include 'COMMON.IOUNITS'
9       logical lprn /.false./
10 c      real*8 text1 /'group_i '/,text2/'group_f '/,
11 c     & text3/'initialb'/,text4/'initiale'/,
12 c     & text5/'openb'/,text6/'opene'/
13       integer cgtasks(0:max_cg_procs)
14       character*3 cfgprocs 
15       integer cg_size,fg_size,fg_size1
16 c  start parallel processing
17 c      print *,'Initializing MPI'
18       call mpi_init(ierr)
19       if (ierr.ne.0) then
20         print *, ' cannot initialize MPI'
21         stop
22       endif
23 c  determine # of nodes and current node
24       call MPI_Comm_rank( MPI_COMM_WORLD, me, ierr )
25       if (ierr.ne.0) then
26         print *, ' cannot determine rank of all processes'
27         call MPI_Finalize( MPI_COMM_WORLD, IERR )
28         stop
29       endif
30       call MPI_Comm_size( MPI_Comm_world, nodes, ierr )
31       if (ierr.ne.0) then
32         print *, ' cannot determine number of processes'
33         stop
34       endif
35       Nprocs=nodes
36       MyRank=me
37       write (*,*) "nodes",nodes," rank",me
38 C Determine the number of "fine-grain" tasks
39       call getenv_loc("FGPROCS",cfgprocs)
40       read (cfgprocs,'(i3)') nfgtasks
41       if (nfgtasks.eq.0) nfgtasks=1
42       call getenv_loc("MAXGSPROCS",cfgprocs)
43       read (cfgprocs,'(i3)') max_gs_size
44       if (max_gs_size.eq.0) max_gs_size=2
45       if (lprn) 
46      &  print *,"Processor",me," nfgtasks",nfgtasks,
47      & " max_gs_size",max_gs_size
48       if (nfgtasks.eq.1) then
49         CG_COMM = MPI_COMM_WORLD
50         fg_size=1
51         fg_rank=0
52         nfgtasks1=1
53         fg_rank1=0
54       else
55         nodes=nprocs/nfgtasks
56         if (nfgtasks*nodes.ne.nprocs) then
57           write (*,'(a)') 'ERROR: Number of processors assigned',
58      &     ' to coarse-grained tasks must be divisor',
59      &     ' of the total number of processors.'
60           call MPI_Finalize( MPI_COMM_WORLD, IERR )
61           stop
62         endif
63 C Put the ranks of coarse-grain processes in one table and create
64 C the respective communicator. The processes with ranks "in between" 
65 C the ranks of CG processes will perform fine graining for the CG
66 C process with the next lower rank.
67         do i=0,nprocs-1,nfgtasks
68           cgtasks(i/nfgtasks)=i
69         enddo
70         if (lprn) then
71         print*,"Processor",me," cgtasks",(cgtasks(i),i=0,nodes-1)
72 c        print "(a,i5,a)","Processor",myrank," Before MPI_Comm_group"
73         endif
74 c        call memmon_print_usage()
75         call MPI_Comm_group(MPI_COMM_WORLD,world_group,IERR)
76         call MPI_Group_incl(world_group,nodes,cgtasks,cg_group,IERR)
77         call MPI_Comm_create(MPI_COMM_WORLD,cg_group,CG_COMM,IERR)
78         call MPI_Group_rank(cg_group,me,ierr)
79         call MPI_Group_free(world_group,ierr)
80         call MPI_Group_free(cg_group,ierr)
81 c        print "(a,i5,a)","Processor",myrank," After MPI_Comm_group"
82 c        call memmon_print_usage()
83         if (me.ne.MPI_UNDEFINED) call MPI_Comm_Rank(CG_COMM,me,ierr)
84         if (lprn) print *," Processor",myrank," CG rank",me
85 C Create communicators containig processes doing "fine grain" tasks. 
86 C The processes within each FG_COMM should have fast communication.
87         kolor=MyRank/nfgtasks
88         key=mod(MyRank,nfgtasks)
89         call MPI_Comm_split(MPI_COMM_WORLD,kolor,key,FG_COMM,ierr)
90         call MPI_Comm_size(FG_COMM,fg_size,ierr)
91         if (fg_size.ne.nfgtasks) then
92           write (*,*) "OOOOps... the number of fg tasks is",fg_size,
93      &      " but",nfgtasks," was requested. MyRank=",MyRank
94         endif
95         call MPI_Comm_rank(FG_COMM,fg_rank,ierr)
96         if (fg_size.gt.max_gs_size) then
97           kolor1=fg_rank/max_gs_size
98           key1=mod(fg_rank,max_gs_size)
99           call MPI_Comm_split(FG_COMM,kolor1,key1,FG_COMM1,ierr)
100           call MPI_Comm_size(FG_COMM1,nfgtasks1,ierr)
101           call MPI_Comm_rank(FG_COMM1,fg_rank1,ierr)
102         else
103           FG_COMM1=FG_COMM
104           nfgtasks1=nfgtasks
105           fg_rank1=fg_rank
106         endif
107       endif
108       if (fg_rank.eq.0) then
109       write (*,*) "Processor",MyRank," out of",nprocs,
110      & " rank in CG_COMM",me," size of CG_COMM",nodes,
111      & " size of FG_COMM",fg_size,
112      & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
113       else
114       write (*,*) "Processor",MyRank," out of",nprocs,
115      & " rank in FG_COMM",fg_rank," size of FG_COMM",fg_size,
116      & " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
117       endif
118 C Initialize other variables.
119 c      print '(a)','Before initialize'
120 c      call memmon_print_usage()
121       call initialize
122 c      print '(a,i5,a)','Processor',myrank,' After initialize'
123 c      call memmon_print_usage()
124 C Open task-dependent files.
125 c      print '(a,i5,a)','Processor',myrank,' Before openunits'
126 c      call memmon_print_usage()
127       call openunits
128 c      print '(a,i5,a)','Processor',myrank,' After openunits'
129 c      call memmon_print_usage()
130       if (me.eq.king .or. fg_rank.eq.0 .and. .not. out1file) 
131      &  write (iout,'(80(1h*)/a/80(1h*))') 
132      & 'United-residue force field calculation - parallel job.'
133 c      print *,"Processor",myrank," exited OPENUNITS"
134       return
135       end
136 C-----------------------------------------------------------------------------
137       subroutine finish_task
138       implicit real*8 (a-h,o-z)
139       include 'DIMENSIONS'
140       include 'mpif.h'
141       include 'COMMON.SETUP'
142       include 'COMMON.CONTROL'
143       include 'COMMON.REMD'
144       include 'COMMON.IOUNITS'
145       include 'COMMON.FFIELD'
146       include 'COMMON.TIME1'
147       include 'COMMON.MD'
148       integer ilen
149       external ilen
150 c
151       call MPI_Barrier(CG_COMM,ierr)
152       if (nfgtasks.gt.1) 
153      &    call MPI_Bcast(-1,1,MPI_INTEGER,king,FG_COMM,IERROR)
154       time1=MPI_WTIME()
155       if (me.eq.king .or. .not. out1file) then
156        write (iout,'(a,i4,a)') 'CG processor',me,' is finishing work.'
157        write (iout,*) 'Total wall clock time',time1-walltime,' sec'
158        if (nfgtasks.gt.1) then
159          write (iout,'(80(1h=)/a/(80(1h=)))') 
160      &    "Details of FG communication time"
161           write (iout,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))') 
162      &    "BROADCAST:",time_bcast,"REDUCE:",time_reduce,
163      &    "GATHER:",time_gather,
164      &    "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,
165      &    "BARRIER ene",time_barrier_e,
166      &    "BARRIER grad",time_barrier_g,"TOTAL:",
167      &    time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv
168      &    +time_barrier_e+time_barrier_g
169           write (*,*) 'Total wall clock time',time1-walltime,' sec'
170           write (*,*) "Processor",me," BROADCAST time",time_bcast,
171      &      " REDUCE time",
172      &      time_reduce," GATHER time",time_gather," SCATTER time",
173      &      time_scatter," SENDRECV",time_sendrecv,
174      &      " BARRIER ene",time_barrier_e," BARRIER grad",time_barrier_g
175       endif
176       endif
177       write (*,'(a,i4,a)') 'CG processor',me,' is finishing work.'
178       if (ilen(tmpdir).gt.0) then
179         write (*,*) "Processor",me,
180      &   ": moving output files to the parent directory..."
181         close(inp)
182         close(istat,status='keep')
183         if (ntwe.gt.0) call move_from_tmp(statname)
184         close(irest2,status='keep')
185         if (modecalc.eq.12.or.
186      &     (modecalc.eq.14 .and. .not.restart1file)) then
187           call move_from_tmp(rest2name) 
188         else if (modecalc.eq.14.and. me.eq.king) then
189           call move_from_tmp(mremd_rst_name)
190         endif
191         if (mdpdb) then
192          close(ipdb,status='keep')
193          call move_from_tmp(pdbname)
194         else if (me.eq.king .or. .not.traj1file) then
195          close(icart,status='keep')
196          call move_from_tmp(cartname)
197         endif
198         if (me.eq.king .or. .not. out1file) then
199           close (iout,status='keep')
200           call move_from_tmp(outname)
201         endif
202       endif
203       return
204       end
205 c-------------------------------------------------------------------------
206       subroutine pattern_receive      
207       implicit real*8 (a-h,o-z)
208       include 'DIMENSIONS'
209       include 'mpif.h'
210       include 'COMMON.SETUP'
211       include 'COMMON.THREAD'
212       include 'COMMON.IOUNITS'
213       integer tag,status(MPI_STATUS_SIZE)
214       integer source,ThreadType
215       logical flag
216       ThreadType=45
217       source=mpi_any_source
218       call mpi_iprobe(source,ThreadType,
219      &                 CG_COMM,flag,status,ierr)
220       do while (flag)
221         write (iout,*) 'Processor ',Me,' is receiving threading',
222      & ' pattern from processor',status(mpi_source)
223         write (*,*) 'Processor ',Me,' is receiving threading',
224      & ' pattern from processor',status(mpi_source)
225         nexcl=nexcl+1
226         call mpi_irecv(iexam(1,nexcl),2,mpi_integer,status(mpi_source),
227      &    ThreadType, CG_COMM,ireq,ierr)
228         write (iout,*) 'Received pattern:',nexcl,iexam(1,nexcl),
229      &    iexam(2,nexcl)
230         source=mpi_any_source
231       call mpi_iprobe(source,ThreadType,               
232      &                 CG_COMM,flag,status,ierr)
233       enddo
234       return
235       end
236 c----------------------------------------------------------------------------
237       subroutine pattern_send
238       implicit real*8 (a-h,o-z)
239       include 'DIMENSIONS'
240       include 'mpif.h'
241       include 'COMMON.INFO'
242       include 'COMMON.THREAD'
243       include 'COMMON.IOUNITS'
244       integer source,ThreadType,ireq
245       ThreadType=45 
246       do iproc=0,nprocs-1
247         if (iproc.ne.me .and. .not.Koniec(iproc) ) then
248           call mpi_isend(iexam(1,nexcl),2,mpi_integer,iproc,
249      &                  ThreadType, CG_COMM, ireq, ierr)
250           write (iout,*) 'CG processor ',me,' has sent pattern ',
251      &    'to processor',iproc
252           write (*,*) 'CG processor ',me,' has sent pattern ',
253      &    'to processor',iproc
254           write (iout,*) 'Pattern:',nexcl,iexam(1,nexcl),iexam(2,nexcl)
255         endif
256       enddo
257       return
258       end
259 c-----------------------------------------------------------------------------
260       subroutine send_stop_sig(Kwita)
261       implicit real*8 (a-h,o-z)
262       include 'DIMENSIONS'
263       include 'mpif.h'
264       include 'COMMON.INFO'
265       include 'COMMON.IOUNITS'
266       integer StopType,StopId,iproc,Kwita,NBytes
267       StopType=66
268 c     Kwita=-1
269 C     print *,'CG processor',me,' StopType=',StopType
270       Koniec(me)=.true.
271       if (me.eq.king) then
272 C Master sends the STOP signal to everybody.
273         write (iout,'(a,a)') 
274      &   'Master is sending STOP signal to other processors.'
275         do iproc=1,nprocs-1
276           print *,'Koniec(',iproc,')=',Koniec(iproc)
277           if (.not. Koniec(iproc)) then
278             call mpi_send(Kwita,1,mpi_integer,iproc,StopType,
279      &          mpi_comm_world,ierr)
280             write (iout,*) 'Iproc=',iproc,' StopID=',StopID
281             write (*,*) 'Iproc=',iproc,' StopID=',StopID
282           endif
283         enddo
284       else
285 C Else send the STOP signal to Master.
286         call mpi_send(Kwita,1,mpi_integer,MasterID,StopType,
287      &          mpi_comm_world,ierr)
288         write (iout,*) 'CG processor=',me,' StopID=',StopID
289         write (*,*) 'CG processor=',me,' StopID=',StopID
290       endif
291       return
292       end 
293 c-----------------------------------------------------------------------------
294       subroutine recv_stop_sig(Kwita)
295       implicit real*8 (a-h,o-z)
296       include 'DIMENSIONS' 
297       include 'mpif.h'
298       include 'COMMON.INFO'
299       include 'COMMON.IOUNITS'
300       integer source,StopType,StopId,iproc,Kwita
301       logical flag
302       StopType=66
303       Kwita=0
304       source=mpi_any_source
305 c     print *,'CG processor:',me,' StopType=',StopType
306       call mpi_iprobe(source,StopType,
307      &                 mpi_comm_world,flag,status,ierr)
308       do while (flag)
309         Koniec(status(mpi_source))=.true.
310         write (iout,*) 'CG processor ',me,' is receiving STOP signal',
311      & ' from processor',status(mpi_source)
312         write (*,*) 'CG processor ',me,' is receiving STOP signal',
313      & ' from processor',status(mpi_source)
314         call mpi_irecv(Kwita,1,mpi_integer,status(mpi_source),StopType,
315      &           mpi_comm_world,ireq,ierr)
316         call mpi_iprobe(source,StopType,
317      &                 mpi_comm_world,flag,status,ierr)
318       enddo       
319       return
320       end
321 c-----------------------------------------------------------------------------
322       subroutine send_MCM_info(ione)
323       implicit real*8 (a-h,o-z)
324       include 'DIMENSIONS'
325       include 'mpif.h'
326       include 'COMMON.SETUP'
327       include 'COMMON.MCM'
328       include 'COMMON.IOUNITS'
329       integer tag,status(MPI_STATUS_SIZE)
330       integer MCM_info_Type,MCM_info_ID,iproc,one,NBytes
331       common /aaaa/ isend,irecv
332       integer nsend
333       save nsend
334       nsend=nsend+1
335       MCM_info_Type=77
336 cd    write (iout,'(a,i4,a)') 'CG Processor',me,
337 cd   & ' is sending MCM info to Master.'
338       write (*,'(a,i4,a,i8)') 'CG processor',me,
339      & ' is sending MCM info to Master, MCM_info_ID=',MCM_info_ID
340       call mpi_isend(ione,1,mpi_integer,MasterID,
341      &               MCM_info_Type,mpi_comm_world, MCM_info_ID, ierr)
342 cd    write (iout,*) 'CG processor',me,' has sent info to the master;',
343 cd   &    ' MCM_info_ID=',MCM_info_ID
344       write (*,*) 'CG processor',me,' has sent info to the master;',
345      &    ' MCM_info_ID=',MCM_info_ID,' ierr ',ierr
346       isend=0
347       irecv=0
348       return
349       end 
350 c----------------------------------------------------------------------------
351       subroutine receive_MCM_info
352       implicit real*8 (a-h,o-z)
353       include 'DIMENSIONS'
354       include 'mpif.h'
355       include 'COMMON.SETUP'
356       include 'COMMON.MCM'
357       include 'COMMON.IOUNITS'
358       integer tag,status(MPI_STATUS_SIZE)
359       integer source,MCM_info_Type,MCM_info_ID,iproc,ione
360       logical flag
361       MCM_info_Type=77
362       source=mpi_any_source
363 c     print *,'source=',source,' dontcare=',dontcare
364       call mpi_iprobe(source,MCM_info_Type,
365      &                mpi_comm_world,flag,status,ierr)
366       do while (flag)
367         source=status(mpi_source)
368         itask=source/fgProcs+1
369 cd      write (iout,*) 'Master is receiving MCM info from processor ',
370 cd   &                 source,' itask',itask
371         write (*,*) 'Master is receiving MCM info from processor ',
372      &                 source,' itask',itask
373         call mpi_irecv(ione,1,mpi_integer,source,MCM_info_type,
374      &                  mpi_comm_world,MCM_info_ID,ierr)
375 cd      write (iout,*) 'Received from processor',source,' IONE=',ione 
376         write (*,*) 'Received from processor',source,' IONE=',ione 
377         nacc_tot=nacc_tot+1
378         if (ione.eq.2) nsave_part(itask)=nsave_part(itask)+1
379 cd      print *,'nsave_part(',itask,')=',nsave_part(itask)
380 cd      write (iout,*) 'Nacc_tot=',Nacc_tot
381 cd      write (*,*) 'Nacc_tot=',Nacc_tot
382         source=mpi_any_source
383               call mpi_iprobe(source,MCM_info_Type,
384      &                mpi_comm_world,flag,status,ierr)
385       enddo
386       return
387       end 
388 c---------------------------------------------------------------------------
389       subroutine send_thread_results
390       implicit real*8 (a-h,o-z)
391       include 'DIMENSIONS'
392       include 'mpif.h'
393       include 'COMMON.SETUP'
394       include 'COMMON.THREAD'
395       include 'COMMON.IOUNITS'
396       integer tag,status(MPI_STATUS_SIZE)
397       integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
398      &   EnerID,msglen,nbytes
399       double precision buffer(20*maxthread+2) 
400       ThreadType=444
401       EnerType=555
402       ipatt(1,nthread+1)=nthread
403       ipatt(2,nthread+1)=nexcl
404       do i=1,nthread
405         do j=1,n_ene
406           ener(j,i+nthread)=ener0(j,i)
407         enddo
408       enddo
409       ener(1,2*nthread+1)=max_time_for_thread
410       ener(2,2*nthread+1)=ave_time_for_thread
411 C Send the IPATT array
412       write (iout,*) 'CG processor',me,
413      & ' is sending IPATT array to master: NTHREAD=',nthread
414       write (*,*) 'CG processor',me,
415      & ' is sending IPATT array to master: NTHREAD=',nthread
416       msglen=2*nthread+2
417       call mpi_send(ipatt(1,1),msglen,MPI_INTEGER,MasterID,
418      & ThreadType,mpi_comm_world,ierror)
419       write (iout,*) 'CG processor',me,
420      & ' has sent IPATT array to master MSGLEN',msglen
421       write (*,*) 'CG processor',me,
422      & ' has sent IPATT array to master MSGLEN',msglen
423 C Send the energies.
424       msglen=n_ene2*nthread+2
425       write (iout,*) 'CG processor',me,' is sending energies to master.'
426       write (*,*) 'CG processor',me,' is sending energies to master.'
427       call mpi_send(ener(1,1),msglen,MPI_DOUBLE_PRECISION,MasterID,
428      & EnerType,mpi_comm_world,ierror)
429       write (iout,*) 'CG processor',me,' has sent energies to master.'
430       write (*,*) 'CG processor',me,' has sent energies to master.'
431       return
432       end
433 c----------------------------------------------------------------------------
434       subroutine receive_thread_results(iproc)
435       implicit real*8 (a-h,o-z)
436       include 'DIMENSIONS'
437       include 'mpif.h'
438       include 'COMMON.INFO'
439       include 'COMMON.THREAD'
440       include 'COMMON.IOUNITS'
441       integer ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,
442      &   EnerID,ReadyType,ReadyID,Ready,msglen,nbytes,nthread_temp
443       double precision buffer(20*maxthread+2),max_time_for_thread_t,
444      & ave_time_for_thread_t
445       logical flag
446       ThreadType=444
447       EnerType=555
448 C Receive the IPATT array
449       call mpi_probe(iproc,ThreadType,
450      &                 mpi_comm_world,status,ierr)
451       call MPI_GET_COUNT(STATUS, MPI_INTEGER, MSGLEN, IERROR)
452       write (iout,*) 'Master is receiving IPATT array from processor:',
453      &    iproc,' MSGLEN',msglen
454       write (*,*) 'Master is receiving IPATT array from processor:',
455      &    iproc,' MSGLEN',msglen
456       call mpi_recv(ipatt(1,nthread+1),msglen,mpi_integer,iproc,
457      & ThreadType,
458      & mpi_comm_world,status,ierror)
459       write (iout,*) 'Master has received IPATT array from processor:',
460      &    iproc,' MSGLEN=',msglen
461       write (*,*) 'Master has received IPATT array from processor:',
462      &    iproc,' MSGLEN=',msglen
463       nthread_temp=ipatt(1,nthread+msglen/2)
464       nexcl_temp=ipatt(2,nthread+msglen/2)
465 C Receive the energies.
466       call mpi_probe(iproc,EnerType,
467      &                 mpi_comm_world,status,ierr)
468       call MPI_GET_COUNT(STATUS, MPI_DOUBLE_PRECISION, MSGLEN, IERROR)
469       write (iout,*) 'Master is receiving energies from processor:',
470      &    iproc,' MSGLEN=',MSGLEN
471       write (*,*) 'Master is receiving energies from processor:',
472      &    iproc,' MSGLEN=',MSGLEN
473       call mpi_recv(ener(1,nthread+1),msglen,
474      & MPI_DOUBLE_PRECISION,iproc,
475      & EnerType,MPI_COMM_WORLD,status,ierror)
476       write (iout,*) 'Msglen=',Msglen
477       write (*,*) 'Msglen=',Msglen
478       write (iout,*) 'Master has received energies from processor',iproc
479       write (*,*) 'Master has received energies from processor',iproc
480       write (iout,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
481       write (*,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
482       do i=1,nthread_temp
483         do j=1,n_ene
484           ener0(j,nthread+i)=ener(j,nthread+nthread_temp+i)
485         enddo
486       enddo
487       max_time_for_thread_t=ener(1,nthread+2*nthread_temp+1)
488       ave_time_for_thread_t=ener(2,nthread+2*nthread_temp+1)
489       write (iout,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
490       write (iout,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
491       write (*,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
492       write (*,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
493       if (max_time_for_thread_t.gt.max_time_for_thread)
494      & max_time_for_thread=max_time_for_thread_t
495       ave_time_for_thread=(nthread*ave_time_for_thread+
496      & nthread_temp*ave_time_for_thread_t)/(nthread+nthread_temp)
497       nthread=nthread+nthread_temp
498       return
499       end
500 #else
501       subroutine init_task
502       implicit real*8 (a-h,o-z)
503       include 'DIMENSIONS'
504       include 'COMMON.SETUP'
505       me=0
506       myrank=0
507       fg_rank=0
508       fg_size=1
509       nodes=1
510       nprocs=1
511       call initialize
512       call openunits
513       write (iout,'(80(1h*)/a/80(1h*))') 
514      & 'United-residue force field calculation - serial job.'
515       return
516       end
517 #endif