added v4.0 sources
[unres4.git] / source / unres / MPI.f90
1       module MPI_
2 !-----------------------------------------------------------------------------
3       use io_units
4       use MPI_data
5       implicit none
6 !-----------------------------------------------------------------------------
7 !
8 !
9 !-----------------------------------------------------------------------------
10       contains
11 !-----------------------------------------------------------------------------
12 ! MP.F
13 !-----------------------------------------------------------------------------
14 #ifdef MPI
15       subroutine init_task
16
17       use control, only: initialize,getenv_loc
18       use io_config, only: openunits
19       use control_data, only: out1file
20       include 'mpif.h'
21 !      implicit real*8 (a-h,o-z)
22 !      include 'DIMENSIONS'
23 !      include 'COMMON.SETUP'
24 !      include 'COMMON.CONTROL'
25 !      include 'COMMON.IOUNITS'
26       logical :: lprn=.false.
27 !      real*8 text1 /'group_i '/,text2/'group_f '/,
28 !     & text3/'initialb'/,text4/'initiale'/,
29 !     & text5/'openb'/,text6/'opene'/
30       integer,dimension(0:max_cg_procs) :: cgtasks !(0:max_cg_procs)
31       character(len=3) :: cfgprocs 
32       integer :: cg_size,fg_size,fg_size1
33 !el local variables
34       integer :: i,ierr,key
35       real(kind=8) :: world_group,cg_group
36
37       allocate(status(MPI_STATUS_SIZE))
38
39 !  start parallel processing
40       print *,'Initializing MPI'
41       print *,'MPI_STATUS_SIZE',MPI_STATUS_SIZE
42       call mpi_init(ierr)
43       write(2, *) "ierr",ierr
44       call flush(iout)
45       if (ierr.ne.0) then
46         print *, ' cannot initialize MPI'
47         stop
48       endif
49 !  determine # of nodes and current node
50       call MPI_COMM_RANK( MPI_COMM_WORLD, me, ierr )
51       if (ierr.ne.0) then
52         print *, ' cannot determine rank of all processes'
53         call MPI_Finalize( MPI_COMM_WORLD, IERR )
54         stop
55       endif
56       call MPI_Comm_size( MPI_Comm_world, nodes, ierr )
57       if (ierr.ne.0) then
58         print *, ' cannot determine number of processes'
59         stop
60       endif
61       Nprocs=nodes
62       MyRank=me
63 ! Determine the number of "fine-grain" tasks
64       print *,"Before getenv FGPROCS"
65       call getenv_loc("FGPROCS",cfgprocs)
66       print *,cfgprocs
67       read (cfgprocs,'(i3)') nfgtasks
68       if (nfgtasks.eq.0) nfgtasks=1
69       print *,nfgtasks
70       print *,"Before getenv MAXFGPROCS"
71       call getenv_loc("MAXGSPROCS",cfgprocs)
72       print *,cfgprocs
73       read (cfgprocs,'(i3)') max_gs_size
74       if (max_gs_size.eq.0) max_gs_size=2
75       if (lprn) &
76         print *,"Processor",me," nfgtasks",nfgtasks,&
77        " max_gs_size",max_gs_size
78       if (nfgtasks.eq.1) then
79         CG_COMM = MPI_COMM_WORLD
80         fg_size=1
81         fg_rank=0
82         nfgtasks1=1
83         fg_rank1=0
84       else
85         nodes=nprocs/nfgtasks
86         if (nfgtasks*nodes.ne.nprocs) then
87           write (*,'(a)') 'ERROR: Number of processors assigned',&
88            ' to coarse-grained tasks must be divisor',&
89            ' of the total number of processors.'
90           call MPI_Finalize( MPI_COMM_WORLD, IERR )
91           stop
92         endif
93 ! Put the ranks of coarse-grain processes in one table and create
94 ! the respective communicator. The processes with ranks "in between" 
95 ! the ranks of CG processes will perform fine graining for the CG
96 ! process with the next lower rank.
97 !el        allocate(cgtasks(0:nodes))
98         do i=0,nprocs-1,nfgtasks
99           cgtasks(i/nfgtasks)=i
100         enddo
101         if (lprn) then
102         print*,"Processor",me," cgtasks",(cgtasks(i),i=0,nodes-1)
103 !        print "(a,i5,a)","Processor",myrank," Before MPI_Comm_group"
104         endif
105 !        call memmon_print_usage()
106         call MPI_Comm_group(MPI_COMM_WORLD,world_group,IERR)
107         call MPI_Group_incl(world_group,nodes,cgtasks,cg_group,IERR)
108         call MPI_Comm_create(MPI_COMM_WORLD,cg_group,CG_COMM,IERR)
109         call MPI_Group_rank(cg_group,me,ierr)
110         call MPI_Group_free(world_group,ierr)
111         call MPI_Group_free(cg_group,ierr)
112 !        print "(a,i5,a)","Processor",myrank," After MPI_Comm_group"
113 !        call memmon_print_usage()
114         if (me.ne.MPI_UNDEFINED) call MPI_Comm_Rank(CG_COMM,me,ierr)
115         if (lprn) print *," Processor",myrank," CG rank",me
116 ! Create communicators containig processes doing "fine grain" tasks. 
117 ! The processes within each FG_COMM should have fast communication.
118         kolor=MyRank/nfgtasks
119         key=mod(MyRank,nfgtasks)
120         call MPI_Comm_split(MPI_COMM_WORLD,kolor,key,FG_COMM,ierr)
121         call MPI_Comm_size(FG_COMM,fg_size,ierr)
122         if (fg_size.ne.nfgtasks) then
123           write (*,*) "OOOOps... the number of fg tasks is",fg_size,&
124             " but",nfgtasks," was requested. MyRank=",MyRank
125         endif
126         call MPI_Comm_rank(FG_COMM,fg_rank,ierr)
127         if (fg_size.gt.max_gs_size) then
128           kolor1=fg_rank/max_gs_size
129           key1=mod(fg_rank,max_gs_size)
130           call MPI_Comm_split(FG_COMM,kolor1,key1,FG_COMM1,ierr)
131           call MPI_Comm_size(FG_COMM1,nfgtasks1,ierr)
132           call MPI_Comm_rank(FG_COMM1,fg_rank1,ierr)
133         else
134           FG_COMM1=FG_COMM
135           nfgtasks1=nfgtasks
136           fg_rank1=fg_rank
137         endif
138       endif
139       if (lprn) then
140       if (fg_rank.eq.0) then
141       write (*,*) "Processor",MyRank," out of",nprocs,&
142        " rank in CG_COMM",me," size of CG_COMM",nodes,&
143        " size of FG_COMM",fg_size,&
144        " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
145       else
146       write (*,*) "Processor",MyRank," out of",nprocs,&
147        " rank in FG_COMM",fg_rank," size of FG_COMM",fg_size,&
148        " rank in FG_COMM1",fg_rank1," size of FG_COMM1",nfgtasks1
149       endif
150       endif
151 ! Initialize other variables.
152 !      print '(a)','Before initialize'
153 !      call memmon_print_usage()
154       call initialize
155 !      print '(a,i5,a)','Processor',myrank,' After initialize'
156 !      call memmon_print_usage()
157 ! Open task-dependent files.
158 !      print '(a,i5,a)','Processor',myrank,' Before openunits'
159 !      call memmon_print_usage()
160       call openunits
161 !      print '(a,i5,a)','Processor',myrank,' After openunits'
162 !      call memmon_print_usage()
163       if (me.eq.king .or. fg_rank.eq.0 .and. .not. out1file) &
164         write (iout,'(80(1h*)/a/80(1h*))') &
165        'United-residue force field calculation - parallel job.'
166 !      print *,"Processor",myrank," exited OPENUNITS"
167 !el      deallocate(cgtasks)
168       return
169       end subroutine init_task
170 !-----------------------------------------------------------------------------
171       subroutine finish_task
172
173 !      implicit real*8 (a-h,o-z)
174 !      include 'DIMENSIONS'
175 !      use energy
176       use io_base, only:ilen,move_from_tmp
177       use MD_data, only: mdpdb,ntwe
178       use REMD_data, only: restart1file,traj1file
179       use control_data
180       include 'mpif.h'
181 !      use MD
182 !      include 'COMMON.SETUP'
183 !      include 'COMMON.CONTROL'
184 !      include 'COMMON.REMD'
185 !      include 'COMMON.IOUNITS'
186 !      include 'COMMON.FFIELD'
187 !      include 'COMMON.TIME1'
188 !      include 'COMMON.MD'
189 !el      integer ilen
190 !el      external ilen
191 !el local variables
192       integer :: IERROR,ierr
193       real(kind=8) :: time1
194 !
195       call MPI_Barrier(CG_COMM,ierr)
196       if (nfgtasks.gt.1) &
197           call MPI_Bcast(-1,1,MPI_INTEGER,king,FG_COMM,IERROR)
198       time1=MPI_WTIME()
199 !      if (me.eq.king .or. .not. out1file) then
200        write (iout,'(a,i4,a)') 'CG processor',me,' is finishing work.'
201        write (iout,*) 'Total wall clock time',time1-walltime,' sec'
202        if (nfgtasks.gt.1) then
203          write (iout,'(80(1h=)/a/(80(1h=)))') &
204           "Details of FG communication time"
205           write (iout,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))') &
206           "BROADCAST:",time_bcast,"REDUCE:",time_reduce,&
207           "GATHER:",time_gather,&
208           "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,&
209           "BARRIER ene",time_barrier_e,&
210           "BARRIER grad",time_barrier_g,"TOTAL:",&
211           time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv &
212           +time_barrier_e+time_barrier_g
213           write (*,*) 'Total wall clock time',time1-walltime,' sec'
214           write (*,*) "Processor",me," BROADCAST time",time_bcast,&
215             " REDUCE time",&
216             time_reduce," GATHER time",time_gather," SCATTER time",&
217             time_scatter," SENDRECV",time_sendrecv,&
218             " BARRIER ene",time_barrier_e," BARRIER grad",time_barrier_g
219         endif
220 !      endif
221       write (*,'(a,i4,a)') 'CG processor',me,' is finishing work.'
222       if (ilen(tmpdir).gt.0) then
223         write (*,*) "Processor",me,&
224          ": moving output files to the parent directory..."
225         close(inp)
226         close(istat,status='keep')
227         if (ntwe.gt.0) call move_from_tmp(statname)
228         close(irest2,status='keep')
229         if (modecalc.eq.12.or. &
230            (modecalc.eq.14 .and. .not.restart1file)) then
231           call move_from_tmp(rest2name) 
232         else if (modecalc.eq.14.and. me.eq.king) then
233           call move_from_tmp(mremd_rst_name)
234         endif
235         if (mdpdb) then
236          close(ipdb,status='keep')
237          call move_from_tmp(pdbname)
238         else if (me.eq.king .or. .not.traj1file) then
239          close(icart,status='keep')
240          call move_from_tmp(cartname)
241         endif
242         if (me.eq.king .or. .not. out1file) then
243           close (iout,status='keep')
244           call move_from_tmp(outname)
245         endif
246       endif
247       return
248       end subroutine finish_task
249 !-----------------------------------------------------------------------------
250       subroutine pattern_receive
251
252 !      implicit real*8 (a-h,o-z)
253 !      include 'DIMENSIONS'
254       use compare_data, only:nexcl,iexam
255       include 'mpif.h'
256 !      include 'COMMON.SETUP'
257 !      include 'COMMON.THREAD'
258 !      include 'COMMON.IOUNITS'
259       integer :: tag,status(MPI_STATUS_SIZE)
260       integer :: source,ThreadType
261       logical :: flag
262       integer :: ierr,ireq,iproc
263       ThreadType=45
264       source=mpi_any_source
265       call mpi_iprobe(source,ThreadType,&
266                        CG_COMM,flag,status,ierr)
267       do while (flag)
268         write (iout,*) 'Processor ',Me,' is receiving threading',&
269        ' pattern from processor',status(mpi_source)
270         write (*,*) 'Processor ',Me,' is receiving threading',&
271        ' pattern from processor',status(mpi_source)
272         nexcl=nexcl+1
273         call mpi_irecv(iexam(1,nexcl),2,mpi_integer,status(mpi_source),&
274           ThreadType, CG_COMM,ireq,ierr)
275         write (iout,*) 'Received pattern:',nexcl,iexam(1,nexcl),&
276           iexam(2,nexcl)
277         source=mpi_any_source
278       call mpi_iprobe(source,ThreadType,&         
279                        CG_COMM,flag,status,ierr)
280       enddo
281       return
282       end subroutine pattern_receive
283 !-----------------------------------------------------------------------------
284       subroutine pattern_send
285
286 !      implicit real*8 (a-h,o-z)
287 !      include 'DIMENSIONS'
288       use compare_data, only:nexcl,iexam
289       include 'mpif.h'
290 !      include 'COMMON.INFO'
291 !      include 'COMMON.THREAD'
292 !      include 'COMMON.IOUNITS'
293       integer :: source,ThreadType,ireq,iproc,ierr
294       ThreadType=45 
295       do iproc=0,nprocs-1
296         if (iproc.ne.me .and. .not.Koniec(iproc) ) then
297           call mpi_isend(iexam(1,nexcl),2,mpi_integer,iproc,&
298                         ThreadType, CG_COMM, ireq, ierr)
299           write (iout,*) 'CG processor ',me,' has sent pattern ',&
300           'to processor',iproc
301           write (*,*) 'CG processor ',me,' has sent pattern ',&
302           'to processor',iproc
303           write (iout,*) 'Pattern:',nexcl,iexam(1,nexcl),iexam(2,nexcl)
304         endif
305       enddo
306       end subroutine pattern_send
307 !-----------------------------------------------------------------------------
308       subroutine send_stop_sig(Kwita)
309
310 !      implicit real*8 (a-h,o-z)
311 !      include 'DIMENSIONS'
312       include 'mpif.h'
313 !      include 'COMMON.INFO'
314 !      include 'COMMON.IOUNITS'
315       integer :: StopType,StopId,iproc,Kwita,NBytes
316       integer :: ierr
317       StopType=66
318 !     Kwita=-1
319 !     print *,'CG processor',me,' StopType=',StopType
320       Koniec(me)=.true.
321       if (me.eq.king) then
322 ! Master sends the STOP signal to everybody.
323       write (iout,'(a,a)') &
324          'Master is sending STOP signal to other processors.'
325         do iproc=1,nprocs-1
326           print *,'Koniec(',iproc,')=',Koniec(iproc)
327           if (.not. Koniec(iproc)) then
328             call mpi_send(Kwita,1,mpi_integer,iproc,StopType,&
329                 mpi_comm_world,ierr)
330             write (iout,*) 'Iproc=',iproc,' StopID=',StopID
331             write (*,*) 'Iproc=',iproc,' StopID=',StopID
332           endif
333         enddo
334       else
335 ! Else send the STOP signal to Master.
336         call mpi_send(Kwita,1,mpi_integer,MasterID,StopType,&
337                 mpi_comm_world,ierr)
338         write (iout,*) 'CG processor=',me,' StopID=',StopID
339         write (*,*) 'CG processor=',me,' StopID=',StopID
340       endif
341       return
342       end subroutine send_stop_sig
343 !-----------------------------------------------------------------------------
344       subroutine recv_stop_sig(Kwita)
345 !      implicit real*8 (a-h,o-z)
346 !      include 'DIMENSIONS' 
347       include 'mpif.h'
348 !      include 'COMMON.INFO'
349 !      include 'COMMON.IOUNITS'
350       integer :: source,StopType,StopId,iproc,Kwita,ireq,ierr
351       logical :: flag
352
353       StopType=66
354       Kwita=0
355       source=mpi_any_source
356       allocate(koniec(0:nprocs)) !(0:maxprocs-1)
357
358 !     print *,'CG processor:',me,' StopType=',StopType
359       call mpi_iprobe(source,StopType,&
360                        mpi_comm_world,flag,status,ierr)
361       do while (flag)
362         Koniec(status(mpi_source))=.true.
363         write (iout,*) 'CG processor ',me,' is receiving STOP signal',&
364        ' from processor',status(mpi_source)
365         write (*,*) 'CG processor ',me,' is receiving STOP signal',&
366        ' from processor',status(mpi_source)
367         call mpi_irecv(Kwita,1,mpi_integer,status(mpi_source),StopType,&
368                  mpi_comm_world,ireq,ierr)
369         call mpi_iprobe(source,StopType,&
370                        mpi_comm_world,flag,status,ierr)
371       enddo
372       return
373       end subroutine recv_stop_sig
374 !-----------------------------------------------------------------------------
375       subroutine send_MCM_info(ione)
376
377 !      implicit real*8 (a-h,o-z)
378 !      include 'DIMENSIONS'
379       include 'mpif.h'
380 !      include 'COMMON.SETUP'
381 !      include 'COMMON.MCM'
382 !      include 'COMMON.IOUNITS'
383       integer :: tag,status(MPI_STATUS_SIZE)
384       integer :: MCM_info_Type,MCM_info_ID,iproc,one,NBytes
385 !el      common /aaaa/ isend,irecv
386       integer :: nsend,ione,ierr,isend,irecv
387       save nsend
388       nsend=nsend+1
389       MCM_info_Type=77
390 !cd    write (iout,'(a,i4,a)') 'CG Processor',me,
391 !cd   & ' is sending MCM info to Master.'
392       write (*,'(a,i4,a,i8)') 'CG processor',me,&
393        ' is sending MCM info to Master, MCM_info_ID=',MCM_info_ID
394       call mpi_isend(ione,1,mpi_integer,MasterID,&
395                      MCM_info_Type,mpi_comm_world, MCM_info_ID, ierr)
396 !cd    write (iout,*) 'CG processor',me,' has sent info to the master;',
397 !cd   &    ' MCM_info_ID=',MCM_info_ID
398       write (*,*) 'CG processor',me,' has sent info to the master;',&
399           ' MCM_info_ID=',MCM_info_ID,' ierr ',ierr
400       isend=0
401       irecv=0
402       return
403       end subroutine send_MCM_info
404 !-----------------------------------------------------------------------------
405       subroutine receive_MCM_info
406
407 !      implicit real*8 (a-h,o-z)
408 !      include 'DIMENSIONS'
409       use names
410       use MCM_data, only:nacc_tot,nsave_part
411       use compare_data, only:nthread,nexcl,ipatt
412       include 'mpif.h'
413 !      include 'COMMON.SETUP'
414 !      include 'COMMON.MCM'
415 !      include 'COMMON.IOUNITS'
416       integer :: tag,status(MPI_STATUS_SIZE)
417       integer ::  source,MCM_info_Type,MCM_info_ID,iproc,ione
418       logical :: flag
419       integer :: itask,ierr
420       MCM_info_Type=77
421       source=mpi_any_source
422 !     print *,'source=',source,' dontcare=',dontcare
423       call mpi_iprobe(source,MCM_info_Type,&
424                       mpi_comm_world,flag,status,ierr)
425       do while (flag)
426         source=status(mpi_source)
427         itask=source/fgProcs+1
428 !d      write (iout,*) 'Master is receiving MCM info from processor ',&
429 !d                     source,' itask',itask
430         write (*,*) 'Master is receiving MCM info from processor ',&
431                        source,' itask',itask
432         call mpi_irecv(ione,1,mpi_integer,source,MCM_info_type,&
433                         mpi_comm_world,MCM_info_ID,ierr)
434 !d      write (iout,*) 'Received from processor',source,' IONE=',ione 
435         write (*,*) 'Received from processor',source,' IONE=',ione 
436         nacc_tot=nacc_tot+1
437         if (ione.eq.2) nsave_part(itask)=nsave_part(itask)+1
438 !cd      print *,'nsave_part(',itask,')=',nsave_part(itask)
439 !cd      write (iout,*) 'Nacc_tot=',Nacc_tot
440 !cd      write (*,*) 'Nacc_tot=',Nacc_tot
441         source=mpi_any_source
442               call mpi_iprobe(source,MCM_info_Type,&
443                       mpi_comm_world,flag,status,ierr)
444       enddo
445       return
446       end subroutine receive_MCM_info
447 !-----------------------------------------------------------------------------
448       subroutine send_thread_results
449
450 !      implicit real*8 (a-h,o-z)
451 !      include 'DIMENSIONS'
452       use names
453       use compare_data
454       include 'mpif.h'
455 !      include 'COMMON.SETUP'
456 !      include 'COMMON.THREAD'
457 !      include 'COMMON.IOUNITS'
458       integer :: tag,status(MPI_STATUS_SIZE)
459       integer :: ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,&
460          EnerID,msglen,nbytes
461       real(kind=8) :: buffer(20*maxthread+2) 
462       integer :: i,j,ierror
463       ThreadType=444
464       EnerType=555
465       ipatt(1,nthread+1)=nthread
466       ipatt(2,nthread+1)=nexcl
467       do i=1,nthread
468         do j=1,n_ene
469           ener(j,i+nthread)=ener0(j,i)
470         enddo
471       enddo
472       ener(1,2*nthread+1)=max_time_for_thread
473       ener(2,2*nthread+1)=ave_time_for_thread
474 ! Send the IPATT array
475       write (iout,*) 'CG processor',me,&
476        ' is sending IPATT array to master: NTHREAD=',nthread
477       write (*,*) 'CG processor',me,&
478        ' is sending IPATT array to master: NTHREAD=',nthread
479       msglen=2*nthread+2
480       call mpi_send(ipatt(1,1),msglen,MPI_INTEGER,MasterID,&
481        ThreadType,mpi_comm_world,ierror)
482       write (iout,*) 'CG processor',me,&
483        ' has sent IPATT array to master MSGLEN',msglen
484       write (*,*) 'CG processor',me,&
485        ' has sent IPATT array to master MSGLEN',msglen
486 ! Send the energies.
487       msglen=n_ene2*nthread+2
488       write (iout,*) 'CG processor',me,' is sending energies to master.'
489       write (*,*) 'CG processor',me,' is sending energies to master.'
490       call mpi_send(ener(1,1),msglen,MPI_DOUBLE_PRECISION,MasterID,&
491        EnerType,mpi_comm_world,ierror)
492       write (iout,*) 'CG processor',me,' has sent energies to master.'
493       write (*,*) 'CG processor',me,' has sent energies to master.'
494       return
495       end subroutine send_thread_results
496 !-----------------------------------------------------------------------------
497       subroutine receive_thread_results(iproc)
498
499 !      implicit real*8 (a-h,o-z)
500 !      include 'DIMENSIONS'
501       use names
502       use compare_data
503       include 'mpif.h'
504 !      include 'COMMON.INFO'
505 !      include 'COMMON.THREAD'
506 !      include 'COMMON.IOUNITS'
507       integer :: ibuffer(2*maxthread+2),ThreadType,ThreadID,EnerType,&
508          EnerID,ReadyType,ReadyID,Ready,msglen,nbytes,nthread_temp
509       real(kind=8) :: buffer(20*maxthread+2),max_time_for_thread_t,&
510        ave_time_for_thread_t
511       logical :: flag
512       integer :: iproc,ierr,ierror,i,j
513       real(kind=8) :: nexcl_temp
514       ThreadType=444
515       EnerType=555
516 ! Receive the IPATT array
517       call mpi_probe(iproc,ThreadType,&
518                        mpi_comm_world,status,ierr)
519       call MPI_GET_COUNT(STATUS, MPI_INTEGER, MSGLEN, IERROR)
520       write (iout,*) 'Master is receiving IPATT array from processor:',&
521           iproc,' MSGLEN',msglen
522       write (*,*) 'Master is receiving IPATT array from processor:',&
523           iproc,' MSGLEN',msglen
524       call mpi_recv(ipatt(1,nthread+1),msglen,mpi_integer,iproc,&
525        ThreadType,&
526        mpi_comm_world,status,ierror)
527       write (iout,*) 'Master has received IPATT array from processor:',&
528           iproc,' MSGLEN=',msglen
529       write (*,*) 'Master has received IPATT array from processor:',&
530           iproc,' MSGLEN=',msglen
531       nthread_temp=ipatt(1,nthread+msglen/2)
532       nexcl_temp=ipatt(2,nthread+msglen/2)
533 ! Receive the energies.
534       call mpi_probe(iproc,EnerType,&
535                        mpi_comm_world,status,ierr)
536       call MPI_GET_COUNT(STATUS, MPI_DOUBLE_PRECISION, MSGLEN, IERROR)
537       write (iout,*) 'Master is receiving energies from processor:',&
538           iproc,' MSGLEN=',MSGLEN
539       write (*,*) 'Master is receiving energies from processor:',&
540           iproc,' MSGLEN=',MSGLEN
541       call mpi_recv(ener(1,nthread+1),msglen,&
542        MPI_DOUBLE_PRECISION,iproc,&
543        EnerType,MPI_COMM_WORLD,status,ierror)
544       write (iout,*) 'Msglen=',Msglen
545       write (*,*) 'Msglen=',Msglen
546       write (iout,*) 'Master has received energies from processor',iproc
547       write (*,*) 'Master has received energies from processor',iproc
548       write (iout,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
549       write (*,*) 'NTHREAD_TEMP=',nthread_temp,' NEXCL=',nexcl_temp
550       do i=1,nthread_temp
551         do j=1,n_ene
552           ener0(j,nthread+i)=ener(j,nthread+nthread_temp+i)
553         enddo
554       enddo
555       max_time_for_thread_t=ener(1,nthread+2*nthread_temp+1)
556       ave_time_for_thread_t=ener(2,nthread+2*nthread_temp+1)
557       write (iout,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
558       write (iout,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
559       write (*,*) 'MAX_TIME_FOR_THREAD:',max_time_for_thread_t
560       write (*,*) 'AVE_TIME_FOR_THREAD:',ave_time_for_thread_t
561       if (max_time_for_thread_t.gt.max_time_for_thread) &
562        max_time_for_thread=max_time_for_thread_t
563       ave_time_for_thread=(nthread*ave_time_for_thread+ &
564        nthread_temp*ave_time_for_thread_t)/(nthread+nthread_temp)
565       nthread=nthread+nthread_temp
566       return
567       end subroutine receive_thread_results
568 !-----------------------------------------------------------------------------
569 #else
570       subroutine init_task
571
572       use control, only: initialize,getenv_loc
573       use io_config, only: openunits
574 !      implicit real*8 (a-h,o-z)
575 !      include 'DIMENSIONS'
576 !      include 'COMMON.SETUP'
577       integer :: fg_size
578
579       me=0
580       myrank=0
581       fg_rank=0
582       fg_size=1
583       nodes=1
584       nprocs=1
585       call initialize
586       call openunits
587       write (iout,'(80(1h*)/a/80(1h*))') &
588        'United-residue force field calculation - serial job.'
589       return
590       end subroutine init_task
591 #endif
592 !-----------------------------------------------------------------------------
593 !-----------------------------------------------------------------------------
594       end module MPI_