new files for tmscore and reminimization
[unres.git] / source / unres / src_CSA / unres_csa.F
1 CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
2 C                                                                              C
3 C                                U N R E S                                     C
4 C                                                                              C
5 C Program to carry out conformational search of proteins in an united-residue  C
6 C approximation.                                                               C
7 C                                                                              C
8 CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
9       implicit real*8 (a-h,o-z)
10       include 'DIMENSIONS'
11
12
13 #ifdef MPI
14       include 'mpif.h'
15       include 'COMMON.SETUP'
16 #endif
17       include 'COMMON.TIME1'
18       include 'COMMON.INTERACT'
19       include 'COMMON.NAMES'
20       include 'COMMON.GEO'
21       include 'COMMON.HEADER'
22       include 'COMMON.CONTROL'
23       include 'COMMON.CONTACTS'
24       include 'COMMON.CHAIN'
25       include 'COMMON.VAR'
26       include 'COMMON.IOUNITS'
27       include 'COMMON.FFIELD'
28 c      include 'COMMON.REMD'
29 c      include 'COMMON.MD'
30       include 'COMMON.SBRIDGE'
31       double precision hrtime,mintime,sectime
32       character*64 text_mode_calc(-2:14) /'test',
33      & 'SC rotamer distribution',
34      & 'Energy evaluation or minimization',
35      & 'Regularization of PDB structure',
36      & 'Threading of a sequence on PDB structures',
37      & 'Monte Carlo (with minimization) ',
38      & 'Energy minimization of multiple conformations',
39      & 'Checking energy gradient',
40      & 'Entropic sampling Monte Carlo (with minimization)',
41      & 'Energy map',
42      & 'CSA calculations',
43      & 'Not used 9',
44      & 'Not used 10',
45      & 'Soft regularization of PDB structure',
46      & 'Mesoscopic molecular dynamics (MD) ',
47      & 'Not used 13',
48      & 'Replica exchange molecular dynamics (REMD)'/
49       external ilen
50
51 c      call memmon_print_usage()
52
53       call init_task
54       if (me.eq.king)
55      & write(iout,*)'### LAST MODIFIED  11/03/09 1:19PM by czarek'  
56       if (me.eq.king) call cinfo
57 C Read force field parameters and job setup data
58       call readrtns
59       call flush(iout)
60 C
61       if (me.eq.king .or. .not. out1file) then
62        write (iout,'(2a/)') 
63      & text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))),
64      & ' calculation.' 
65        if (minim) write (iout,'(a)') 
66      &  'Conformations will be energy-minimized.'
67        write (iout,'(80(1h*)/)') 
68       endif
69       call flush(iout)
70 C
71 c      if (modecalc.eq.-2) then
72 c        call test
73 c        stop
74 c      else if (modecalc.eq.-1) then
75 c        write(iout,*) "call check_sc_map next"
76 c        call check_bond
77 c        stop
78 c      endif
79 #ifdef MPI
80       if (fg_rank.gt.0) then
81 C Fine-grain slaves just do energy and gradient components.
82         call ergastulum ! slave workhouse in Latin
83       else
84 #endif
85       if (modecalc.eq.0) then
86         call exec_eeval_or_minim
87 c      else if (modecalc.eq.1) then
88 c        call exec_regularize
89 c      else if (modecalc.eq.2) then
90 c        call exec_thread
91 c      else if (modecalc.eq.3 .or. modecalc .eq.6) then
92 c        call exec_MC
93       else if (modecalc.eq.4) then
94         call exec_mult_eeval_or_minim
95       else if (modecalc.eq.5) then
96          call exec_checkgrad
97 c      else if (ModeCalc.eq.7) then
98 c        call exec_map
99       else if (ModeCalc.eq.8) then
100         call exec_CSA
101 c      else if (modecalc.eq.11) then
102 c        call exec_softreg
103 c      else if (modecalc.eq.12) then
104 c        call exec_MD
105 c      else if (modecalc.eq.14) then
106 c        call exec_MREMD
107       else
108         write (iout,'(a)') 'This calculation type is not supported',
109      &   ModeCalc
110       endif
111 #ifdef MPI
112       endif
113 C Finish task.
114       if (fg_rank.eq.0) call finish_task
115 c      call memmon_print_usage()
116 #ifdef TIMING
117        call print_detailed_timing
118 #endif
119       call MPI_Finalize(ierr)
120       stop 'Bye Bye...'
121 #else
122       call dajczas(tcpu(),hrtime,mintime,sectime)
123       stop '********** Program terminated normally.'
124 #endif
125       end
126 c---------------------------------------------------------------------------
127       subroutine exec_eeval_or_minim
128       implicit real*8 (a-h,o-z)
129       include 'DIMENSIONS'
130 #ifdef MPI
131       include 'mpif.h'
132 #endif
133       include 'COMMON.SETUP'
134       include 'COMMON.TIME1'
135       include 'COMMON.INTERACT'
136       include 'COMMON.NAMES'
137       include 'COMMON.GEO'
138       include 'COMMON.HEADER'
139       include 'COMMON.CONTROL'
140       include 'COMMON.CONTACTS'
141       include 'COMMON.CHAIN'
142       include 'COMMON.VAR'
143       include 'COMMON.IOUNITS'
144       include 'COMMON.FFIELD'
145 c      include 'COMMON.REMD'
146 c      include 'COMMON.MD'
147       include 'COMMON.SBRIDGE'
148       common /srutu/ icall
149       double precision energy(0:n_ene)
150       double precision energy_long(0:n_ene),energy_short(0:n_ene)
151       if (indpdb.eq.0) call chainbuild
152 c      time00=MPI_Wtime()
153       call chainbuild_cart
154 c      if (split_ene) then
155 c       print *,"Processor",myrank," after chainbuild"
156 c       icall=1
157 c       call etotal_long(energy_long(0))
158 c       write (iout,*) "Printing long range energy"
159 c       call enerprint(energy_long(0))
160 c       call etotal_short(energy_short(0))
161 c       write (iout,*) "Printing short range energy"
162 c       call enerprint(energy_short(0))
163 c       do i=0,n_ene
164 c         energy(i)=energy_long(i)+energy_short(i)
165 c         write (iout,*) i,energy_long(i),energy_short(i),energy(i)
166 c       enddo
167 c       write (iout,*) "Printing long+short range energy"
168 c       call enerprint(energy(0))
169 c      endif
170       call etotal(energy(0))
171 c      time_ene=MPI_Wtime()-time00
172       write (iout,*) "Time for energy evaluation",time_ene
173       print *,"after etotal"
174       etota = energy(0)
175       etot =etota
176       call enerprint(energy(0))
177 c      call hairpin(.true.,nharp,iharp)
178 c      call secondary2(.true.)
179       if (minim) then
180
181         if (dccart) then
182           print *, 'Calling MINIM_DC'
183 c          time1=MPI_WTIME()
184           call minim_dc(etot,iretcode,nfun)
185         else
186           if (indpdb.ne.0) then 
187             call bond_regular
188             call chainbuild
189           endif
190           call geom_to_var(nvar,varia)
191           print *,'Calling MINIMIZE.'
192 c          time1=MPI_WTIME()
193           call minimize(etot,varia,iretcode,nfun)
194         endif
195         print *,'SUMSL return code is',iretcode,' eval ',nfun
196 c        evals=nfun/(MPI_WTIME()-time1)
197         print *,'# eval/s',evals
198         print *,'refstr=',refstr
199 c        call hairpin(.true.,nharp,iharp)
200 c        call secondary2(.true.)
201         call etotal(energy(0))
202         etot = energy(0)
203         call enerprint(energy(0))
204
205         call intout
206         call briefout(0,etot)
207         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
208           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
209           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
210           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
211       else
212         print *,'refstr=',refstr
213         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
214         call briefout(0,etot)
215       endif
216       if (outpdb) call pdbout(etot,titel(:32),ipdb)
217       if (outmol2) call mol2out(etot,titel(:32))
218       return
219       end
220
221       subroutine exec_checkgrad
222       implicit real*8 (a-h,o-z)
223       include 'DIMENSIONS'
224 #ifdef MPI
225       include 'mpif.h'
226 #endif
227       include 'COMMON.SETUP'
228       include 'COMMON.TIME1'
229       include 'COMMON.INTERACT'
230       include 'COMMON.NAMES'
231       include 'COMMON.GEO'
232       include 'COMMON.HEADER'
233       include 'COMMON.CONTROL'
234       include 'COMMON.CONTACTS'
235       include 'COMMON.CHAIN'
236       include 'COMMON.VAR'
237       include 'COMMON.IOUNITS'
238       include 'COMMON.FFIELD'
239 c      include 'COMMON.REMD'
240       include 'COMMON.MD_'
241       include 'COMMON.SBRIDGE'
242       common /srutu/ icall
243       double precision energy(0:max_ene)
244 c      do i=2,nres
245 c        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
246 c        if (itype(i).ne.10) 
247 c     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
248 c      enddo
249       if (indpdb.eq.0) call chainbuild
250 c      do i=0,nres
251 c        do j=1,3
252 c          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
253 c        enddo
254 c      enddo
255 c      do i=1,nres-1
256 c        if (itype(i).ne.10) then
257 c          do j=1,3
258 c            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
259 c          enddo
260 c        endif
261 c      enddo
262 c      do j=1,3
263 c        dc(j,0)=ran_number(-0.2d0,0.2d0)
264 c      enddo
265       usampl=.true.
266       totT=1.d0
267       eq_time=0.0d0
268 c      call read_fragments
269       read(inp,*) t_bath
270       call rescale_weights(t_bath)
271       call chainbuild_cart
272       call cartprint
273       call intout
274       icall=1
275       call etotal(energy(0))
276       etot = energy(0)
277       call enerprint(energy(0))
278       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
279       print *,'icheckgrad=',icheckgrad
280       goto (10,20,30) icheckgrad
281   10  call check_ecartint
282       return
283   20  call check_cartgrad
284       return
285   30  call check_eint
286       return
287       end
288 c---------------------------------------------------------------------------
289       subroutine exec_CSA
290 #ifdef MPI
291       include "mpif.h"
292 #endif
293       include 'DIMENSIONS'
294       include 'COMMON.IOUNITS'
295 C Conformational Space Annealling programmed by Jooyoung Lee.
296 C This method works only with parallel machines!
297 #ifdef MPI
298       call together
299 #else
300       write (iout,*) "CSA works on parallel machines only"
301 #endif
302       return
303       end
304 c---------------------------------------------------------------------------
305       subroutine exec_mult_eeval_or_minim
306       implicit real*8 (a-h,o-z)
307       include 'DIMENSIONS'
308       include 'mpif.h'
309       integer muster(mpi_status_size)
310       include 'COMMON.SETUP'
311       include 'COMMON.TIME1'
312       include 'COMMON.INTERACT'
313       include 'COMMON.NAMES'
314       include 'COMMON.GEO'
315       include 'COMMON.HEADER'
316       include 'COMMON.CONTROL'
317       include 'COMMON.CONTACTS'
318       include 'COMMON.CHAIN'
319       include 'COMMON.VAR'
320       include 'COMMON.IOUNITS'
321       include 'COMMON.FFIELD'
322       include 'COMMON.SBRIDGE'
323       double precision varia(maxvar)
324       integer ind(6)
325       double precision energy(0:n_ene)
326       logical eof
327       eof=.false.
328
329       if(me.ne.king) then
330         call minim_mcmf
331         return
332       endif
333
334       close (intin)
335       open(intin,file=intinname,status='old')
336       write (istat,'(a5,100a12)')"#    ",
337      &  (wname(print_order(i)),i=1,nprint_ene)
338       if (refstr) then
339         write (istat,'(a5,100a12)')"#    ",
340      &   (ename(print_order(i)),i=1,nprint_ene),
341      &   "ETOT total","RMSD","nat.contact","nnt.contact",
342      &   "cont.order","TMscore"
343       else
344         write (istat,'(a5,100a12)')"#    ",
345      &    (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
346       endif
347
348       if (.not.minim) then
349         do while (.not. eof)
350           if (read_cart) then
351             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
352             call read_x(intin,*11)
353 c Broadcast the order to compute internal coordinates to the slaves.
354             if (nfgtasks.gt.1)
355      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
356             call int_from_cart1(.false.)
357           else
358             read (intin,'(i5)',end=1100,err=1100) iconf
359             call read_angles(intin,*11)
360             call geom_to_var(nvar,varia)
361             call chainbuild
362           endif
363           write (iout,'(a,i7)') 'Conformation #',iconf
364           call etotal(energy(0))
365           call briefout(iconf,energy(0))
366           call enerprint(energy(0))
367           etot=energy(0)
368           if (refstr) then 
369             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
370             call calc_tmscore(tm,.true.)
371             write (istat,'(i5,100(f12.3))') iconf,
372      &      (energy(print_order(i)),i=1,nprint_ene),etot,
373      &       rms,frac,frac_nn,co,tm
374           else
375             write (istat,'(i5,100(f12.3))') iconf,
376      &     (energy(print_order(i)),i=1,nprint_ene),etot
377           endif
378         enddo
379 1100    continue
380         goto 1101
381       endif
382
383       mm=0
384       imm=0
385       nft=0
386       ene0=0.0d0
387       n=0
388       iconf=0
389       do while (.not. eof)
390         mm=mm+1
391         if (mm.lt.nodes) then
392           if (read_cart) then
393             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
394             call read_x(intin,*11)
395 c Broadcast the order to compute internal coordinates to the slaves.
396             if (nfgtasks.gt.1) 
397      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
398             call int_from_cart1(.false.)
399           else
400             read (intin,'(i5)',end=11,err=11) iconf
401             call read_angles(intin,*11)
402             call geom_to_var(nvar,varia)
403             call chainbuild
404           endif
405
406           n=n+1
407           write (iout,*) 'Conformation #',iconf,' read'
408          imm=imm+1
409          ind(1)=1
410          ind(2)=n
411          ind(3)=0
412          ind(4)=0
413          ind(5)=0
414          ind(6)=0
415          ene0=0.0d0
416          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,
417      *                  ierr)
418          call mpi_send(varia,nvar,mpi_double_precision,mm,
419      *                  idreal,CG_COMM,ierr)
420          call mpi_send(ene0,1,mpi_double_precision,mm,
421      *                  idreal,CG_COMM,ierr)
422 c         print *,'task ',n,' sent to worker ',mm,nvar
423         else
424          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,
425      *                 CG_COMM,muster,ierr)
426          man=muster(mpi_source)
427 c         print *,'receiving result from worker ',man,' (',iii1,iii,')'
428          call mpi_recv(varia,nvar,mpi_double_precision, 
429      *               man,idreal,CG_COMM,muster,ierr)
430          call mpi_recv(ene,1,
431      *               mpi_double_precision,man,idreal,
432      *               CG_COMM,muster,ierr)
433          call mpi_recv(ene0,1,
434      *               mpi_double_precision,man,idreal,
435      *               CG_COMM,muster,ierr)
436 c         print *,'result received from worker ',man,' sending now'
437
438           call var_to_geom(nvar,varia)
439           call chainbuild
440           call etotal(energy(0))
441           iconf=ind(2)
442           write (iout,*)
443           write (iout,*)
444           write (iout,*) 'Conformation #',iconf,ind(5)
445
446           etot=energy(0)
447           call enerprint(energy(0))
448           call briefout(iconf,etot)
449           if (refstr) then 
450             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
451             call calc_tmscore(tm,.true.)
452             write (istat,'(i5,100(f12.3))') iconf,
453      &     (energy(print_order(i)),i=1,nprint_ene),etot,
454      &     rms,frac,frac_nn,co,tm
455           else
456             write (istat,'(i5,100(f12.3))') iconf,
457      &     (energy(print_order(i)),i=1,nprint_ene),etot
458           endif
459
460           imm=imm-1
461           if (read_cart) then
462             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
463             call read_x(intin,*11)
464 c Broadcast the order to compute internal coordinates to the slaves.
465             if (nfgtasks.gt.1)
466      &        call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
467             call int_from_cart1(.false.)
468           else
469             read (intin,'(i5)',end=1101,err=1101) iconf
470             call read_angles(intin,*11)
471             call geom_to_var(nvar,varia)
472             call chainbuild
473           endif
474           n=n+1
475           write (iout,*) 'Conformation #',iconf,' read'
476           imm=imm+1
477           ind(1)=1
478           ind(2)=n
479           ind(3)=0
480           ind(4)=0
481           ind(5)=0
482           ind(6)=0
483           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,
484      *                  ierr)
485           call mpi_send(varia,nvar,mpi_double_precision,man, 
486      *                  idreal,CG_COMM,ierr)
487           call mpi_send(ene0,1,mpi_double_precision,man,
488      *                  idreal,CG_COMM,ierr)
489           nf_mcmf=nf_mcmf+ind(4)
490           nmin=nmin+1
491         endif
492       enddo
493 11    continue
494       do j=1,imm
495         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,
496      *               CG_COMM,muster,ierr)
497         man=muster(mpi_source)
498         call mpi_recv(varia,nvar,mpi_double_precision, 
499      *               man,idreal,CG_COMM,muster,ierr)
500         call mpi_recv(ene,1,
501      *               mpi_double_precision,man,idreal,
502      *               CG_COMM,muster,ierr)
503         call mpi_recv(ene0,1,
504      *               mpi_double_precision,man,idreal,
505      *               CG_COMM,muster,ierr)
506
507         call var_to_geom(nvar,varia)
508         call chainbuild
509         call etotal(energy(0))
510         iconf=ind(2)
511         write (iout,*)
512         write (iout,*)
513         write (iout,*) 'Conformation #',iconf,ind(5)
514
515         etot=energy(0)
516         call enerprint(energy(0))
517         call briefout(iconf,etot)
518         if (refstr) then 
519           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
520           call calc_tmscore(tm,.true.)
521           write (istat,'(i5,100(f12.3))') iconf,
522      &   (energy(print_order(i)),i=1,nprint_ene),etot,
523      &   rms,frac,frac_nn,co,tm
524         else
525           write (istat,'(i5,100(f12.3))') iconf,
526      &    (energy(print_order(i)),i=1,nprint_ene),etot
527         endif
528         nmin=nmin+1
529       enddo
530 1101  continue
531       do i=1, nodes-1
532          ind(1)=0
533          ind(2)=0
534          ind(3)=0
535          ind(4)=0
536          ind(5)=0
537          ind(6)=0
538          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,
539      *                  ierr)
540       enddo
541       return
542       end
543 c---------------------------------------------------------------------------
544