update new files
[unres.git] / source / maxlik / src_MD_T_maxlik-NEWCORR-PMF-5 / zscorez.F
1       program ENE_OPT
2 c Optimize the UNRES energy function by minimization of a quartic target
3 c function or by the VMC method.
4       implicit none
5 #ifndef ISNAN
6       external proc_proc
7 #endif
8 #ifdef WINPGI
9 cMS$ATTRIBUTES C ::  proc_proc
10 #endif
11       include "DIMENSIONS.ZSCOPT"
12 #ifdef MPI
13       include "mpif.h"
14       integer IERROR,ERRCODE,kolor,key
15       include "COMMON.MPI"
16 #endif
17       include "COMMON.IOUNITS"
18       include "COMMON.OPTIM"
19       integer nvarr,iparm
20       double precision rr,x(max_paropt)
21       integer idumm
22       integer i
23 c      print *,"Starting..."
24 #ifdef MPI
25 c      print *,"Initializing MPI..."
26       call MPI_Init( IERROR )
27       call MPI_Comm_rank( MPI_COMM_WORLD, me, IERROR )
28       call MPI_Comm_size( MPI_COMM_WORLD, nprocs, IERROR )
29 c      print *,"Finished initializing MPI..."
30       Master = 0
31       Master1 = 1
32 c      print *,"Me",me," Master",master," Ierror",ierror
33       if (ierror.gt.0) then
34         write(iout,*) "SEVERE ERROR - Can't initialize MPI."
35         call mpi_finalize(ierror)
36         stop
37       endif
38 #endif
39 #ifndef ISNAN
40 c NaNQ initialization
41       i=-1
42       rr=dacos(100.0d0)
43 #ifdef WINPGI
44       idumm=proc_proc(rr,i)
45 #else
46       call proc_proc(rr,i)
47 #endif
48 #endif
49       call initialize
50 c      print *,"calling openunits"
51       call openunits
52 c      print *,"openunits called"
53       call read_general_data(*10)
54       write (iout,'(80(1h-)/10x,
55      & "Maximum likelihood optimization of UNRES energy function",
56      & " v. 05/10/16"/80(1h-))')
57       call flush(iout)
58       call cinfo
59 c      call promienie(*10)
60       write (iout,*) "Finished READ_GENERAL_DATA"
61       call flush(iout)
62       do iparm=1,nparmset
63         call parmread(iparm,*10)
64       enddo
65       call read_pmf_data(*10)
66       write (iout,*) "Finished parmread"
67       call flush(iout)
68       call read_optim_parm(*10)
69       call print_general_data(*10)
70       call read_protein_data(*10)
71       write (iout,*) "Finished READ_PROTEIN_DATA"
72       call flush(iout)
73       call read_database(*10)
74       write (iout,*) "Finished READ_DATABASE"
75       call flush(iout)
76 #ifdef MPI 
77 c      write (iout,*) Me,' calling PROC_GROUPS'
78       call proc_groups
79 c      write (iout,*) Me,' calling WORK_PARTITION_MAP'
80 c      call work_partition_map(nvarr)
81 #endif
82 #ifdef NEWCORR
83       call pmfread(*10)
84 #endif
85       call proc_data(nvarr,x,*10)
86       call read_thermal
87 #ifdef MPI
88       if (me.eq.Master) then
89 #endif
90       call maxlikopt(nvarr,x)
91 #ifdef MPI
92       call jebadelko(nvarr)
93       else
94         call jebadelko(nvarr)
95       endif
96       call bilans
97       call MPI_Finalize( IERROR )
98 #else
99       call bilans
100 #endif
101       stop
102    10 write (iout,*) "Error termination of the program"
103       call MPI_Finalize( IERROR )
104       stop
105       end
106 c------------------------------------------------------------------------------
107       subroutine bilans
108       implicit none
109       integer ntasks 
110       parameter (ntasks=11)
111       include "DIMENSIONS.ZSCOPT"
112 #ifdef MPI
113       include "mpif.h"
114       include "COMMON.MPI"
115       integer IERROR
116       double precision ttask_all(ntasks)
117       integer nctask_all(ntasks)
118 #endif
119       include "COMMON.IOUNITS"
120       integer i
121       double precision ttask
122       integer nctask
123       common /timing/ ttask(ntasks),nctask(ntasks)
124       character*16 tname(ntasks) /"function","gradient",9*''/
125
126       write (iout,*)
127       write (iout,'(80(1h-))')
128 #ifdef MPI
129       write (iout,*) "Routine call info from the processor ",me," ..."
130 #else
131       write (iout,*) "Routine call info ..."
132 #endif
133       write (iout,*)
134       write (iout,'(65(1h-))')
135       write (iout,100) "task","   # calls","     total time",
136      &  "  ave.time/call"
137       write (iout,'(65(1h-))')
138       do i=1,ntasks
139         write (iout,200) tname(i),nctask(i),ttask(i),
140      &  ttask(i)/(nctask(i)+1.0d-10)
141       enddo
142       write (iout,*) 
143 #ifdef MPI
144       call MPI_Reduce(ttask(1),ttask_all(1),ntasks, 
145      &  MPI_DOUBLE_PRECISION, MPI_SUM, Master, WHAM_COMM,IERROR)
146       call MPI_Reduce(nctask(1),nctask_all(1),ntasks, 
147      &  MPI_INTEGER, MPI_SUM, Master, WHAM_COMM,IERROR)
148       if (Me.eq.Master) then
149       write (iout,'(80(1h-))')
150       write (iout,*) "Routine call info from all processors ..."
151       write (iout,*)
152       write (iout,'(65(1h-))')
153       write (iout,100) "task","   # calls","     total time",
154      &  "  ave.time/call"
155       write (iout,'(65(1h-))')
156       do i=1,ntasks
157         write (iout,200) tname(i),nctask_all(i),ttask_all(i),
158      &    ttask_all(i)/(nctask_all(i)+1.0d-10)
159       enddo
160       write (iout,*) 
161       endif
162 #endif
163       return
164   100 format(a,t21,a,t31,a,t46,a)
165   200 format(a,t21,i10,f15.2,f15.8)
166       end
167 c------------------------------------------------------------------------------
168 #ifdef MPI
169       subroutine proc_groups
170 C Split the processors into the Master and Workers group, if needed.
171       implicit none
172       include "DIMENSIONS"
173       include "DIMENSIONS.ZSCOPT"
174       include "mpif.h"
175       include "COMMON.IOUNITS"
176       include "COMMON.MPI"
177       include "COMMON.VMCPAR"
178       integer n,chunk,iprot,i,j,ii,remainder
179       integer kolor,key,ierror,errcode
180       logical lprint
181       lprint=.true.
182 C No secondary structure constraints. 
183       Comm1 = WHAM_COMM
184       Me1 = Me
185       Nprocs1 = Nprocs
186       return
187       end
188 c-------------------------------------------------------------------------------
189       subroutine work_partition(lprint)
190 c Split the conformations between processors
191       implicit none
192       include "DIMENSIONS"
193       include "DIMENSIONS.ZSCOPT"
194       include "mpif.h"
195       include "COMMON.CLASSES"
196       include "COMMON.IOUNITS"
197       include "COMMON.MPI"
198       include "COMMON.VMCPAR"
199       integer n,chunk,iprot,i,j,ii,remainder
200       integer kolor,key,ierror,errcode
201       logical lprint
202 C
203 C Divide conformations between processors; for each proteins the first and
204 C the last conformation to handle by ith processor is stored in 
205 C indstart(i,iprot) and indend(i,iprot), respectively.
206 C
207 C First try to assign equal number of conformations to each processor.
208 C
209       do iprot=1,nprot
210         n=ntot_work(iprot)
211         write (iout,*) "Protein",iprot," n=",n
212         indstart(0,iprot)=1
213         chunk = N/nprocs1
214         scount(0,iprot) = chunk
215 c        print *,"i",0," indstart",indstart(0,iprot)," scount",
216 c     &     scount(0,iprot)
217         do i=1,nprocs1-1
218           indstart(i,iprot)=chunk+indstart(i-1,iprot) 
219           scount(i,iprot)=scount(i-1,iprot)
220 c          print *,"i",i," indstart",indstart(i,iprot)," scount",
221 c     &     scount(i,iprot)
222         enddo 
223 C
224 C Determine how many conformations remained yet unassigned.
225 C
226         remainder=N-(indstart(nprocs1-1,iprot)
227      &    +scount(nprocs1-1,iprot)-1)
228 c        print *,"remainder",remainder
229 C
230 C Assign the remainder conformations to consecutive processors, starting
231 C from the lowest rank; this continues until the list is exhausted.
232 C
233         if (remainder .gt. 0) then 
234           do i=1,remainder
235             scount(i-1,iprot) = scount(i-1,iprot) + 1
236             indstart(i,iprot) = indstart(i,iprot) + i
237           enddo
238           do i=remainder+1,nprocs1-1
239             indstart(i,iprot) = indstart(i,iprot) + remainder
240           enddo
241         endif
242
243         indstart(nprocs1,iprot)=N+1
244         scount(nprocs1,iprot)=0
245
246         do i=0,NProcs1
247           indend(i,iprot)=indstart(i,iprot)+scount(i,iprot)-1
248           idispl(i,iprot)=indstart(i,iprot)-1
249         enddo
250
251         N=0
252         do i=0,Nprocs1-1
253           N=N+indend(i,iprot)-indstart(i,iprot)+1
254         enddo
255
256 c        print *,"N",n," NTOT",ntot_work(iprot)
257         if (N.ne.ntot_work(iprot)) then
258           write (iout,*) "!!! Checksum error on processor",me
259           call flush(iout)
260           call MPI_Abort( WHAM_COMM, Ierror, Errcode )
261         endif
262
263         ii=0
264         do i=1,ntot_work(iprot)
265           if (i.ge.indstart(me1,iprot) .and. i.le.indend(me1,iprot)) 
266      &    then
267             ii=ii+1
268             i2ii(i,iprot)=ii
269           else
270             i2ii(i,iprot)=0
271           endif
272 c          write (iout,*) "i",i," iprot",iprot," i2ii",i2ii(i,iprot)
273         enddo
274
275       enddo ! iprot
276       if (lprint) then
277         write (iout,*) "Partition of work between processors"
278         do iprot=1,nprot
279           write (iout,*) "Protein",iprot
280 #ifdef DEBUG
281           write (iout,*) "The i2ii array"
282           do j=1,ntot_work(iprot)
283             write (iout,*) j,i2ii(j,iprot)
284           enddo
285 #endif
286           do i=0,nprocs1-1
287             write (iout,'(a,i5,a,i7,a,i7,a,i7)')
288      &        "Processor",i," indstart",indstart(i,iprot),
289      &        " indend",indend(i,iprot)," count",scount(i,iprot)
290           enddo
291         enddo
292       endif
293       return
294       end
295 c------------------------------------------------------------------------------
296       subroutine jebadelko(nvarr)
297       implicit none
298       include "DIMENSIONS"
299       include "DIMENSIONS.ZSCOPT"
300       include "mpif.h"
301       include "COMMON.IOUNITS"
302       include "COMMON.MPI"
303       include "COMMON.VMCPAR"
304       include "COMMON.CLASSES"
305       include "COMMON.OPTIM"
306       include "COMMON.WEIGHTS"
307       include "COMMON.WEIGHTDER"
308       include "COMMON.ENERGIES"
309       include "COMMON.TIME1"
310       include "COMMON.PROTNAME"
311       include "COMMON.PROTFILES"
312       include "COMMON.TORSION"
313       include "COMMON.COMPAR"
314       integer What, TAG, IERROR, status(MPI_STATUS_SIZE), istop, iprot, 
315      &  nvarr, nf, errcode, ider
316       integer count
317       double precision x(max_paropt), g(max_paropt), viol
318       integer uiparm,i,j
319       integer ibatch,ib
320       external fdum
321       double precision rdum,rdif
322       double precision tcpu,t1,t1w,t1_ini,t1w_ini
323       logical lprn,op
324       integer iun
325       lprn=.false.
326       write(iout,*) "Processor",me,me1," called JEBADELKO"
327       call flush(iout)
328       if (me.eq.Master) then
329         istop=0
330         call func1(nvarr,istop,x)
331       else
332 #ifdef DEBUG
333       write (iout,*) "ELOWEST at slave  starting JEBADELKO"
334       do iprot=1,nprot
335         do ibatch=1,natlike(iprot)+2
336           do ib=1,nbeta(ibatch,iprot)
337             write (iout,*) "iprot",iprot," ibatch",ibatch," ib",ib,
338      &         " elowest",elowest(ib,ibatch,iprot)
339           enddo
340         enddo
341       enddo
342 #endif
343         istop=1
344         t1w_ini = MPI_WTIME()
345         t1_ini = tcpu()
346         do while (istop.ne.0)
347 #ifdef DEBUG
348       write (iout,*) "ELOWEST at slave  calling FUNC1 from JBADELKO"
349       do iprot=1,nprot
350         do ibatch=1,natlike(iprot)+2
351           do ib=1,nbeta(ibatch,iprot)
352             write (iout,*) "iprot",iprot," ibatch",ibatch," ib",ib,
353      &         " elowest",elowest(ib,ibatch,iprot)
354           enddo
355         enddo
356       enddo
357 #endif
358           call func1(nvarr,istop,x)
359 c          write (iout,*) "slave: after func1"
360 c          call flush(iout)
361 #ifdef NEWCORR
362           if (istop.eq.1 .and. mod_fourier(nloctyp).gt.0) then
363             rdum = rdif(nvarr,x,g,ider)
364 c            write (iout,*) "slave: after rdif"
365 c            call flush(iout)
366           endif
367 #endif
368         enddo
369         t1w = mpi_wtime() - t1w_ini
370         t1 = tcpu() - t1_ini
371         write (iout,*)
372         write (iout,*) "CPU time",t1," wall clock time",t1w
373         call flush(iout)
374       endif
375 #ifdef DEBUG
376       write (iout,*)
377       write (iout,*) "Energies of processor",me
378       do j=1,nprot
379         write (iout,*)
380         write (iout,*) "Protein ",protname(iprot)
381         do i=1,ntot_work(j)
382           if (i.ge.indstart(me1,j).and.i.le.indend(me1,j)) then
383             write(iout,*)i,e_total(i,j),rmstb(i,j),iscore(i,0,j)
384           endif
385         enddo
386       enddo
387 #endif
388       do iprot=1,nprot
389         write (iout,*) "Deleting scratchfile",bprotfiles(iprot)
390         inquire (file=bprotfiles(iprot),number=iun,opened=op)
391         write (iout,*) "unit",iun," icbase",icbase
392         if (.not. op) then
393           open (icbase,file=bprotfiles(iprot),status="old",
394      &    form="unformatted",access="direct",recl=lenrec(iprot))
395           close(icbase,status="delete")
396         else
397           close(iun,status="delete")
398         endif
399         call flush(iout)
400         if (.not.mod_other_params) then
401         write (iout,*) "Deleting scratchfile",benefiles(iprot)
402         inquire (file=benefiles(iprot),number=iun,opened=op)
403         write (iout,*) "unit",iun," ientout",icbase
404         if (.not.op) then
405           open (ientout,file=benefiles(iprot),status="old",
406      &    form="unformatted",access="direct",recl=lenrec_ene(iprot))
407           close(ientout,status="delete")
408         else
409           close (iun,status="delete")
410         endif
411         endif
412         call flush(iout)
413       enddo
414       write (iout,*) "Processor",me,"leaves JEBADELKO"
415       call flush(iout)
416       return
417       end
418 #endif