changes need for chamm-gui
[unres4.git] / source / unres / unres.F90
1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2 !                                                                              !
3 !                                U N R E S                                     !
4 !                                                                              !
5 ! Program to carry out conformational search of proteins in an united-residue  !
6 ! approximation.                                                               !
7 !                                                                              !
8 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
9       program unres
10
11       use io_units
12       use control_data
13       use control, only:tcpu
14       use io_base, only:ilen
15       use geometry, only:chainbuild
16       use control, only:dajczas
17       use check_bond_
18       use energy
19       use compare, only: test
20       use map_
21       use MDyn
22       use MPI_
23       use io, only:readrtns
24
25 !      implicit real*8 (a-h,o-z)
26 !      include 'DIMENSIONS'
27 #ifdef MPI
28       use MPI_data ! include 'COMMON.SETUP'
29       implicit none
30       include 'mpif.h'
31       integer :: ierr
32 #else
33       use MPI_data, only: me,king
34       implicit none
35 #endif
36 !      include 'COMMON.TIME1'
37 !      include 'COMMON.INTERACT'
38 !      include 'COMMON.NAMES'
39 !      include 'COMMON.GEO'
40 !      include 'COMMON.HEADER'
41 !      include 'COMMON.CONTROL'
42 !      include 'COMMON.CONTACTS'
43 !      include 'COMMON.CHAIN'
44 !      include 'COMMON.VAR'
45 !      include 'COMMON.IOUNITS'
46 !      include 'COMMON.FFIELD'
47 !      include 'COMMON.REMD'
48 !      include 'COMMON.MD'
49 !      include 'COMMON.SBRIDGE'
50
51       real(kind=8) :: hrtime,mintime,sectime
52       character(len=64) ::  text_mode_calc(-2:14)
53       text_mode_calc(-2) = 'test'
54       text_mode_calc(-1) = 'cos'
55       text_mode_calc(0) = 'Energy evaluation or minimization'
56       text_mode_calc(1) = 'Regularization of PDB structure'
57       text_mode_calc(2) = 'Threading of a sequence on PDB structures'
58       text_mode_calc(3) = 'Monte Carlo (with minimization) '
59       text_mode_calc(4) = 'Energy minimization of multiple conformations'
60       text_mode_calc(5) = 'Checking energy gradient'
61       text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
62       text_mode_calc(7) = 'Energy map'
63       text_mode_calc(8) = 'CSA calculations'
64       text_mode_calc(9) = 'Not used 9'
65       text_mode_calc(10) = 'Not used 10'
66       text_mode_calc(11) = 'Soft regularization of PDB structure'
67       text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
68       text_mode_calc(13) = 'Not used 13'
69       text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
70 !      external ilen
71 !      call memmon_print_usage()
72
73       call init_task
74       if (me.eq.king) &
75         write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
76       if (me.eq.king) call cinfo
77 ! Read force field parameters and job setup data
78       call readrtns
79       call flush(iout)
80       write (iout,*) "After readrtns"
81       call cartprint
82 !
83       if (me.eq.king .or. .not. out1file) then
84        write (iout,'(2a/)') &
85        text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
86        ' calculation.' 
87        if (minim) write (iout,'(a)') &
88        'Conformations will be energy-minimized.'
89        write (iout,'(80(1h*)/)') 
90       endif
91       call flush(iout)
92 !
93       if (modecalc.eq.-2) then
94         call test
95         stop
96       else if (modecalc.eq.-1) then
97         write(iout,*) "call check_sc_map next"
98         call check_bond
99         stop
100       endif
101 !elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
102
103 #ifdef MPI
104       if (fg_rank.gt.0) then
105 ! Fine-grain slaves just do energy and gradient components.
106         call ergastulum ! slave workhouse in Latin
107       else
108 #endif
109       if (modecalc.eq.0) then
110         call exec_eeval_or_minim
111       else if (modecalc.eq.1) then
112         call exec_regularize
113       else if (modecalc.eq.2) then
114         call exec_thread
115       else if (modecalc.eq.3 .or. modecalc .eq.6) then
116         call exec_MC
117       else if (modecalc.eq.4) then
118         call exec_mult_eeval_or_minim
119       else if (modecalc.eq.5) then
120         call exec_checkgrad
121       else if (ModeCalc.eq.7) then
122         call exec_map
123       else if (ModeCalc.eq.8) then
124         call exec_CSA
125       else if (modecalc.eq.11) then
126         call exec_softreg
127       else if (modecalc.eq.12) then
128         call exec_MD
129         call exec_checkgrad
130       else if (modecalc.eq.14) then
131         call exec_MREMD
132       else
133         write (iout,'(a)') 'This calculation type is not supported',&
134          ModeCalc
135       endif
136
137 #ifdef MPI
138       endif
139 ! Finish task.
140       if (fg_rank.eq.0) call finish_task
141 !      call memmon_print_usage()
142 #ifdef TIMING
143        call print_detailed_timing
144 #endif
145       call MPI_Finalize(ierr)
146       stop 'Bye Bye...'
147 #else
148       call dajczas(tcpu(),hrtime,mintime,sectime)
149       stop '********** Program terminated normally.'
150 #endif
151
152       end program !UNRES
153 !-----------------------------------------------------------------------------
154 !
155 !-----------------------------------------------------------------------------
156       subroutine exec_MD
157       use MPI_data      !include 'COMMON.SETUP'
158       use control_data  !include 'COMMON.CONTROL'
159       use geometry, only:chainbuild,chainbuild_cart
160       use MDyn
161       use io_units      !include 'COMMON.IOUNITS'
162       use compare, only:alloc_compare_arrays
163 !      use io_common
164       implicit none
165 !      include 'DIMENSIONS'
166 #ifdef MPI
167       include "mpif.h"
168 #endif
169       print *,'Start MD'
170       call alloc_MD_arrays
171       call alloc_compare_arrays
172       print *,'After MD alloc'
173       if (me.eq.king .or. .not. out1file) &
174          write (iout,*) "Calling chainbuild"
175       if (extconf) then
176       call chainbuild
177       else
178       call chainbuild_cart
179       endif
180       call MD
181       return
182       end subroutine exec_MD
183 !---------------------------------------------------------------------------
184       subroutine exec_MREMD
185       use MPI_data      !include 'COMMON.SETUP'
186       use control_data  !include 'COMMON.CONTROL'
187       use io_units      !include 'COMMON.IOUNITS'
188 !      use io_common
189       use REMD_data     !include 'COMMON.REMD'
190       use geometry, only:chainbuild
191       use MREMDyn
192       use compare, only:alloc_compare_arrays
193
194       implicit none
195 !      include 'DIMENSIONS'
196 #ifdef MPI
197       include "mpif.h"
198 #endif
199
200       integer :: i
201       call alloc_MD_arrays
202       call alloc_MREMD_arrays
203       call alloc_compare_arrays
204 !     if (me.eq.king .or. .not. out1file) &
205 !         write (iout,*) "Calling chainbuild"
206 !      call chainbuild
207       if (me.eq.king .or. .not. out1file) &
208          write (iout,*) "Calling REMD",remd_mlist,nrep
209       if (remd_mlist) then 
210         call MREMD
211       else
212         do i=1,nrep
213           remd_m(i)=1
214         enddo
215         call MREMD
216       endif
217       return
218       end subroutine exec_MREMD
219 !-----------------------------------------------------------------------------
220       subroutine exec_eeval_or_minim
221       use MPI_data              !include 'COMMON.SETUP'
222       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
223       use io_units      !include 'COMMON.IOUNITS'
224       use names
225 !      use energy       !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
226       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
227 !      use REMD         !include 'COMMON.REMD'
228 !      use MD           !include 'COMMON.MD'
229
230       use energy_data
231       use MD_data, only: iset
232       use io_base
233       use geometry, only:chainbuild
234       use energy
235       use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
236       use minimm, only:minimize,minim_dc,sc_move
237       use compare_data !el
238       use comm_srutu
239       implicit none
240 !      implicit real*8 (a-h,o-z)
241 !      include 'DIMENSIONS'
242 #ifdef MPI
243       include 'mpif.h'
244 #endif
245       integer :: i
246 !el      common /srutu/ icall
247       real(kind=8) :: energy_(0:n_ene)
248       real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
249       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
250       real(kind=8) :: time00, evals, etota, etot, time_ene, time1
251       integer :: nharp,nft_sc,iretcode,nfun
252       integer,dimension(4,nres) :: iharp        !(4,nres/3)(4,maxres/3)
253       logical :: fail
254       real(kind=8) :: rms,frac,frac_nn,co
255
256       integer :: j,k
257        if (iset.eq.0) iset=1
258       call alloc_compare_arrays
259       if ((indpdb.eq.0).and.(.not.read_cart)) then 
260       call chainbuild
261       write(iout,*) 'Warning: Calling chainbuild'
262       endif
263 #ifdef MPI
264       time00=MPI_Wtime()
265 #endif
266       call chainbuild_cart
267 !      write(iout,*)"in exec_eeval or minimim",split_ene
268 !      do j=1,2*nres+2
269 !        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
270 !        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
271 !      enddo
272       if (split_ene) then
273 !      write(iout,*)"in exec_eeval or minimim"
274
275        print *,"Processor",myrank," after chainbuild"
276        icall=1
277
278        call etotal_long(energy_long)
279        write (iout,*) "Printing long range energy"
280        call enerprint(energy_long)
281
282        call etotal_short(energy_short)
283        write (iout,*) "Printing short range energy"
284        call enerprint(energy_short)
285        do i=0,n_ene
286          energy_(i)=energy_long(i)+energy_short(i)
287          write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
288        enddo
289        write (iout,*) "Printing long+short range energy"
290        call enerprint(energy_)
291       endif
292
293       call etotal(energy_)
294 #ifdef MPI
295       time_ene=MPI_Wtime()-time00
296 #endif
297       write (iout,*) "Time for energy evaluation",time_ene
298       print *,"after etotal"
299       etota = energy_(0)
300       etot = etota
301       call enerprint(energy_)
302       if (.false.) then
303       call hairpin(.true.,nharp,iharp)
304       call secondary2(.true.)
305       endif
306       if (minim) then
307 !rc overlap test
308         print *,"overlap",searchsc,overlapsc
309         if (overlapsc) then 
310           print *, 'Calling OVERLAP_SC'
311           call overlap_sc(fail)
312         endif 
313
314         if (searchsc) then 
315           call sc_move(2,nres-1,10,1d10,nft_sc,etot)
316           print *,'SC_move',nft_sc,etot
317           write(iout,*) 'SC_move',nft_sc,etot
318         endif 
319
320         if (dccart) then
321           print *, 'Calling MINIM_DC'
322 #ifdef MPI
323           time1=MPI_WTIME()
324 #endif
325 !    call check_ecartint !el
326           call minim_dc(etot,iretcode,nfun)
327 !    call check_ecartint !el
328         else 
329           if (indpdb.ne.0) then 
330             call bond_regular
331             write(iout,*) 'Warning: Calling chainbuild'
332             call chainbuild
333           endif
334           call geom_to_var(nvar,varia)
335           print *,'Calling MINIMIZE.'
336 #ifdef MPI
337           time1=MPI_WTIME()
338 #endif
339 !    call check_eint
340 !        call exec_checkgrad   !el
341           call minimize(etot,varia,iretcode,nfun)
342 !     call check_eint
343 !        call exec_checkgrad   !el
344         endif
345         print *,'SUMSL return code is',iretcode,' eval ',nfun
346 #ifdef MPI
347         evals=nfun/(MPI_WTIME()-time1)
348 #endif
349         print *,'# eval/s',evals
350         print *,'refstr=',refstr
351 !        call hairpin(.true.,nharp,iharp)
352 !        call secondary2(.true.)
353         call etotal(energy_)
354         etot = energy_(0)
355         call enerprint(energy_)
356
357         call intout
358         call briefout(0,etot)
359         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
360           write (iout,'(a,i3)') 'SUMSL return code:',iretcode
361           write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
362           write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
363       else
364         print *,'refstr=',refstr,frac,frac_nn,co
365         if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
366         print *,"after rms_nac_ncc"
367         call briefout(0,etot)
368       endif
369       if (outpdb) call pdbout(etot,titel(:32),ipdb)
370       if (outmol2) call mol2out(etot,titel(:32))
371       write(iout,*) "after exec_eeval_or_minim"
372       return
373       end subroutine exec_eeval_or_minim
374 !-----------------------------------------------------------------------------
375       subroutine exec_regularize
376 !      use MPI_data             !include 'COMMON.SETUP'
377       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
378       use io_units      !include 'COMMON.IOUNITS'
379       use names
380       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
381       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
382 !     use REMD          !include 'COMMON.REMD'
383 !     use MD            !include 'COMMON.MD'
384       use regularize_
385       use compare
386       implicit none
387 !      implicit real*8 (a-h,o-z)
388 !      include 'DIMENSIONS'
389 #ifdef MPI
390       include 'mpif.h'
391 #endif
392       real(kind=8) :: energy_(0:n_ene)
393       real(kind=8) :: etot
394       real(kind=8) :: rms,frac,frac_nn,co
395       integer :: iretcode
396
397       call alloc_compare_arrays
398       call gen_dist_constr
399       call sc_conf
400       call intout
401       call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
402       call etotal(energy_)
403       energy_(0)=energy_(0)-energy_(14)
404       etot=energy_(0)
405       call enerprint(energy_)
406       call intout
407       call briefout(0,etot)
408       if (outpdb) call pdbout(etot,titel(:32),ipdb)
409       if (outmol2) call mol2out(etot,titel(:32))
410       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
411       write (iout,'(a,i3)') 'SUMSL return code:',iretcode
412       return
413       end subroutine exec_regularize
414 !-----------------------------------------------------------------------------
415       subroutine exec_thread
416 !      use MPI_data             !include 'COMMON.SETUP'
417       use compare
418       implicit none
419 !      include 'DIMENSIONS'
420 #ifdef MP
421       include "mpif.h"
422 #endif
423       call alloc_compare_arrays
424       call thread_seq
425       return
426       end subroutine exec_thread
427 !-----------------------------------------------------------------------------
428       subroutine exec_MC
429 !      use MPI_data             !include 'COMMON.SETUP'
430       use control_data  !include 'COMMON.CONTROL'
431       use geometry_data
432       use energy_data
433       use mcm_md
434       implicit none
435 !      implicit real*8 (a-h,o-z)
436 !      include 'DIMENSIONS'
437       character(len=10) :: nodeinfo
438       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
439       integer :: ipar
440 #ifdef MPI
441       include "mpif.h"
442 #endif
443       call alloc_MCM_arrays
444       call mcm_setup
445       if (minim) then
446 #ifdef MPI
447         if (modecalc.eq.3) then
448           call do_mcm(ipar)
449         else
450           call entmcm
451         endif
452 #else
453         if (modecalc.eq.3) then
454           call do_mcm(ipar)
455         else
456           call entmcm
457         endif
458 #endif
459       else
460         call monte_carlo
461       endif
462       return
463       end subroutine exec_MC
464 !-----------------------------------------------------------------------------
465       subroutine exec_mult_eeval_or_minim
466       use MPI_data              !include 'COMMON.SETUP'
467       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
468       use io_units      !include 'COMMON.IOUNITS'
469       use names
470       use energy_data   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
471       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
472 !      use REMD         !include 'COMMON.REMD'
473 !      use MD           !include 'COMMON.MD'
474       use io_base
475       use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
476       use energy, only:etotal,enerprint
477       use compare, only:rms_nac_nnc
478       use minimm, only:minimize!,minim_mcmf
479 !      implicit real*8 (a-h,o-z)
480 !      include 'DIMENSIONS'
481 #ifdef MPI
482       use minimm, only:minim_mcmf
483       implicit none
484       include 'mpif.h'
485       integer :: ierror,ierr
486       real(kind=8) :: man
487       real(kind=8),dimension(mpi_status_size) :: muster
488 #else
489       implicit none
490 #endif
491       real(kind=8) :: varia(6*nres)     !(maxvar) (maxvar=6*maxres)
492       integer,dimension(6) :: ind
493       real(kind=8) :: energy_(0:n_ene)
494       logical :: eof
495       real(kind=8) :: etot,ene0
496       integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
497          nf_mcmf,j
498       real(kind=8) :: rms,frac,frac_nn,co,time,ene
499
500       eof=.false.
501 #ifdef MPI
502       if(me.ne.king) then
503         call minim_mcmf
504         return
505       endif
506
507       close (intin)
508       open(intin,file=intinname,status='old')
509       write (istat,'(a5,20a12)')"#    ",&
510         (wname(print_order(i)),i=1,nprint_ene)
511       if (refstr) then
512         write (istat,'(a5,20a12)')"#    ",&
513          (ename(print_order(i)),i=1,nprint_ene),&
514          "ETOT total","RMSD","nat.contact","nnt.contact"        
515       else
516         write (istat,'(a5,20a12)')"#    ",&
517           (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
518       endif
519
520       if (.not.minim) then
521         do while (.not. eof)
522           if (read_cart) then
523             read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
524             call read_x(intin,*11)
525 #ifdef MPI
526 ! Broadcast the order to compute internal coordinates to the slaves.
527             if (nfgtasks.gt.1) &
528               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
529 #endif
530             call int_from_cart1(.false.)
531           else
532             read (intin,'(i5)',end=1100,err=1100) iconf
533             call read_angles(intin,*11)
534             call geom_to_var(nvar,varia)
535             write(iout,*) 'Warning: Calling chainbuild1'
536             call chainbuild
537           endif
538           write (iout,'(a,i7)') 'Conformation #',iconf
539           call etotal(energy_)
540           call briefout(iconf,energy_(0))
541           call enerprint(energy_)
542           etot=energy_(0)
543           if (refstr) then 
544             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
545             write (istat,'(i5,20(f12.3))') iconf,&
546             (energy_(print_order(i)),i=1,nprint_ene),etot,&
547              rms,frac,frac_nn,co
548 !jlee end
549           else
550             write (istat,'(i5,16(f12.3))') iconf,&
551            (energy_(print_order(i)),i=1,nprint_ene),etot
552           endif
553         enddo
554 1100    continue
555         goto 1101
556       endif
557
558       mm=0
559       imm=0
560       nft=0
561       ene0=0.0d0
562       n=0
563       iconf=0
564 !      do n=1,nzsc
565       do while (.not. eof)
566         mm=mm+1
567         if (mm.lt.nodes) then
568           if (read_cart) then
569             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
570             call read_x(intin,*11)
571 #ifdef MPI
572 ! Broadcast the order to compute internal coordinates to the slaves.
573             if (nfgtasks.gt.1) &
574               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
575 #endif
576             call int_from_cart1(.false.)
577           else
578             read (intin,'(i5)',end=11,err=11) iconf
579             call read_angles(intin,*11)
580             call geom_to_var(nvar,varia)
581             write(iout,*) 'Warning: Calling chainbuild2'
582             call chainbuild
583           endif
584           write (iout,'(a,i7)') 'Conformation #',iconf
585           n=n+1
586          imm=imm+1
587          ind(1)=1
588          ind(2)=n
589          ind(3)=0
590          ind(4)=0
591          ind(5)=0
592          ind(6)=0
593          ene0=0.0d0
594          call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
595                         ierr)
596          call mpi_send(varia,nvar,mpi_double_precision,mm,&
597                         idreal,CG_COMM,ierr)
598          call mpi_send(ene0,1,mpi_double_precision,mm,&
599                         idreal,CG_COMM,ierr)
600 !         print *,'task ',n,' sent to worker ',mm,nvar
601         else
602          call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
603                        CG_COMM,muster,ierr)
604          man=muster(mpi_source)
605 !         print *,'receiving result from worker ',man,' (',iii1,iii,')'
606          call mpi_recv(varia,nvar,mpi_double_precision,&
607                      man,idreal,CG_COMM,muster,ierr)
608          call mpi_recv(ene,1,&
609                      mpi_double_precision,man,idreal,&
610                      CG_COMM,muster,ierr)
611          call mpi_recv(ene0,1,&
612                      mpi_double_precision,man,idreal,&
613                      CG_COMM,muster,ierr)
614 !         print *,'result received from worker ',man,' sending now'
615
616           call var_to_geom(nvar,varia)
617           write(iout,*) 'Warning: Calling chainbuild3'
618           call chainbuild
619           call etotal(energy_)
620           iconf=ind(2)
621           write (iout,*)
622           write (iout,*)
623           write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
624
625           etot=energy_(0)
626           call enerprint(energy_)
627           call briefout(it,etot)
628 !          if (minim) call briefout(it,etot)
629           if (refstr) then 
630             call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
631             write (istat,'(i5,19(f12.3))') iconf,&
632            (energy_(print_order(i)),i=1,nprint_ene),etot,&
633            rms,frac,frac_nn,co
634           else
635             write (istat,'(i5,15(f12.3))') iconf,&
636            (energy_(print_order(i)),i=1,nprint_ene),etot
637           endif
638
639           imm=imm-1
640           if (read_cart) then
641             read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
642             call read_x(intin,*11)
643 #ifdef MPI
644 ! Broadcast the order to compute internal coordinates to the slaves.
645             if (nfgtasks.gt.1) &
646               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
647 #endif
648             call int_from_cart1(.false.)
649           else
650             read (intin,'(i5)',end=1101,err=1101) iconf
651             call read_angles(intin,*11)
652             call geom_to_var(nvar,varia)
653             write(iout,*) 'Warning: Calling chainbuild4'
654             call chainbuild
655           endif
656           n=n+1
657           imm=imm+1
658           ind(1)=1
659           ind(2)=n
660           ind(3)=0
661           ind(4)=0
662           ind(5)=0
663           ind(6)=0
664           call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
665                         ierr)
666           call mpi_send(varia,nvar,mpi_double_precision,man,&
667                         idreal,CG_COMM,ierr)
668           call mpi_send(ene0,1,mpi_double_precision,man,&
669                         idreal,CG_COMM,ierr)
670           nf_mcmf=nf_mcmf+ind(4)
671           nmin=nmin+1
672         endif
673       enddo
674 11    continue
675       do j=1,imm
676         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
677                      CG_COMM,muster,ierr)
678         man=muster(mpi_source)
679         call mpi_recv(varia,nvar,mpi_double_precision,&
680                      man,idreal,CG_COMM,muster,ierr)
681         call mpi_recv(ene,1,&
682                      mpi_double_precision,man,idreal,&
683                      CG_COMM,muster,ierr)
684         call mpi_recv(ene0,1,&
685                      mpi_double_precision,man,idreal,&
686                      CG_COMM,muster,ierr)
687
688         call var_to_geom(nvar,varia)
689         write(iout,*) 'Warning: Calling chainbuild5'
690         call chainbuild
691         call etotal(energy_)
692         iconf=ind(2)
693         write (iout,*)
694         write (iout,*)
695         write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
696
697         etot=energy_(0)
698         call enerprint(energy_)
699         call briefout(it,etot)
700         if (refstr) then 
701           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
702           write (istat,'(i5,19(f12.3))') iconf,&
703          (energy_(print_order(i)),i=1,nprint_ene),etot,&
704          rms,frac,frac_nn,co
705         else
706           write (istat,'(i5,15(f12.3))') iconf,&
707           (energy_(print_order(i)),i=1,nprint_ene),etot
708         endif
709         nmin=nmin+1
710       enddo
711 1101  continue
712       do i=1, nodes-1
713          ind(1)=0
714          ind(2)=0
715          ind(3)=0
716          ind(4)=0
717          ind(5)=0
718          ind(6)=0
719          call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
720                         ierr)
721       enddo
722 #else
723       close (intin)
724       open(intin,file=intinname,status='old')
725       write (istat,'(a5,20a12)')"#    ",&
726          (wname(print_order(i)),i=1,nprint_ene)
727       write (istat,'("#    ",20(1pe12.4))') &
728          (weights(print_order(i)),i=1,nprint_ene)
729       if (refstr) then
730         write (istat,'(a5,20a12)')"#    ",&
731          (ename(print_order(i)),i=1,nprint_ene),&
732          "ETOT total","RMSD","nat.contact","nnt.contact"
733       else
734         write (istat,'(a5,14a12)')"#    ",&
735          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
736       endif
737       do while (.not. eof)
738           if (read_cart) then
739             read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
740             call read_x(intin,*11)
741 #ifdef MPI
742 ! Broadcast the order to compute internal coordinates to the slaves.
743             if (nfgtasks.gt.1) &
744               call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
745 #endif
746             call int_from_cart1(.false.)
747           else
748             read (intin,'(i5)',end=11,err=11) iconf
749             call read_angles(intin,*11)
750             call geom_to_var(nvar,varia)
751             write(iout,*) 'Warning: Calling chainbuild5'
752             call chainbuild
753           endif
754         write (iout,'(a,i7)') 'Conformation #',iconf
755         if (minim) call minimize(etot,varia,iretcode,nfun)
756         call etotal(energy_)
757
758         etot=energy_(0)
759         call enerprint(energy_)
760         if (minim) call briefout(it,etot) 
761         if (refstr) then 
762           call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
763           write (istat,'(i5,18(f12.3))') iconf,&
764          (energy_(print_order(i)),i=1,nprint_ene),&
765          etot,rms,frac,frac_nn,co
766 !jlee end
767         else
768           write (istat,'(i5,14(f12.3))') iconf,&
769          (energy_(print_order(i)),i=1,nprint_ene),etot
770         endif
771       enddo
772    11 continue
773 #endif
774       return
775       end subroutine exec_mult_eeval_or_minim
776 !-----------------------------------------------------------------------------
777       subroutine exec_checkgrad
778 !      use MPI_data             !include 'COMMON.SETUP'
779       use control_data  !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
780       use io_units      !include 'COMMON.IOUNITS'
781 !el      use energy_data, only:icall    !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
782       use geometry_data !include 'COMMON.GEO''COMMON.CHAIN'
783 !      use REMD         !include 'COMMON.REMD'
784       use MD_data               !include 'COMMON.MD'
785       use io_base, only:intout
786       use io_config, only:read_fragments
787       use geometry
788       use energy
789       use comm_srutu
790       implicit none
791 !      implicit real*8 (a-h,o-z)
792 !      include 'DIMENSIONS'
793 #ifdef MPI
794       include 'mpif.h'
795 #endif
796 !el      integer :: icall
797 !el      common /srutu/ icall
798       real(kind=8) :: energy_(0:max_ene)
799       real(kind=8) :: etot
800       integer :: i
801 !      do i=2,nres
802 !        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
803 !        if (itype(i).ne.10) 
804 !     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
805 !      enddo
806       if (indpdb.eq.0) then 
807         write(iout,*) 'Warning: Calling chainbuild'
808         call chainbuild
809       endif
810 !      do i=0,nres
811 !        do j=1,3
812 !          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
813 !        enddo
814 !      enddo
815 !      do i=1,nres-1
816 !        if (itype(i).ne.10) then
817 !          do j=1,3
818 !            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
819 !          enddo
820 !        endif
821 !      enddo
822 !      do j=1,3
823 !        dc(j,0)=ran_number(-0.2d0,0.2d0)
824 !      enddo
825 #ifdef UMB
826       usampl=.true.
827 #endif
828       totT=1.d0
829       eq_time=0.0d0
830       call read_fragments
831       call chainbuild_cart
832       call cartprint
833       call intout
834       icall=1
835       write (iout,*) "before etotal"
836       call etotal(energy_(0))
837       etot = energy_(0)
838       call enerprint(energy_(0))
839       write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
840       print *,'icheckgrad=',icheckgrad
841       goto (10,20,30) icheckgrad
842   10  call check_ecartint
843       call check_ecartint
844       return
845   20  call check_cartgrad
846       call check_cartgrad
847       return
848   30  call check_eint
849       return
850       end subroutine exec_checkgrad
851 !-----------------------------------------------------------------------------
852       subroutine exec_map
853 !      use map_data
854       use map_
855       use io_config, only:map_read
856       implicit none
857 ! Energy maps
858       call alloc_map_arrays
859       call map_read
860       call map
861       return
862       end subroutine exec_map
863 !-----------------------------------------------------------------------------
864       subroutine exec_CSA
865
866       use io_units      !include 'COMMON.IOUNITS'
867       use CSA
868       implicit none
869 #ifdef MPI
870       include "mpif.h"
871 #endif
872 !      include 'DIMENSIONS'
873 ! Conformational Space Annealling programmed by Jooyoung Lee.
874 ! This method works only with parallel machines!
875 #ifdef MPI
876       call alloc_CSA_arrays
877       call together
878 #else
879       write (iout,*) "CSA works on parallel machines only"
880 #endif
881       return
882       end subroutine exec_CSA
883 !-----------------------------------------------------------------------------
884       subroutine exec_softreg
885       use io_units      !include 'COMMON.IOUNITS'
886       use control_data  !include 'COMMON.CONTROL'
887       use energy_data
888       use io_base, only:intout,briefout
889       use geometry, only:chainbuild
890       use energy
891       use compare
892       implicit none
893 !      include 'DIMENSIONS'
894       real(kind=8) :: energy_(0:n_ene)
895 !el local variables
896       real(kind=8) :: rms,frac,frac_nn,co,etot
897       logical :: debug
898
899       call alloc_compare_arrays
900       write(iout,*) 'Warning: Calling chainbuild'
901       call chainbuild
902       call etotal(energy_)
903       call enerprint(energy_)
904       if (.not.lsecondary) then
905         write(iout,*) 'Calling secondary structure recognition'
906         call secondary2(debug)
907       else
908         write(iout,*) 'Using secondary structure supplied in pdb'
909       endif
910
911       call softreg
912
913       call etotal(energy_)
914       etot=energy_(0)
915       call enerprint(energy_)
916       call intout
917       call briefout(0,etot)
918       call secondary2(.true.)
919       if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
920       return
921       end subroutine exec_softreg
922 !-----------------------------------------------------------------------------
923 ! minimize_p.F
924 !-----------------------------------------------------------------------------
925 !el#ifdef MPI
926       subroutine ergastulum
927
928 !      implicit real*8 (a-h,o-z)
929 !      include 'DIMENSIONS'
930       use MD_data
931       use energy
932       use MDyn, only:setup_fricmat
933       use REMD, only:fricmat_mult,ginv_mult
934 #ifdef MPI
935       include "mpif.h"
936 #endif
937 !      include 'COMMON.SETUP'
938 !      include 'COMMON.DERIV'
939 !      include 'COMMON.VAR'
940 !      include 'COMMON.IOUNITS'
941 !      include 'COMMON.FFIELD'
942 !      include 'COMMON.INTERACT'
943 !      include 'COMMON.MD'
944 !      include 'COMMON.TIME1'
945       real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
946       real(kind=8) :: edum(0:n_ene),time_order(0:10)
947 !el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
948 !el      common /przechowalnia/ Gcopy
949       integer :: icall = 0
950
951 !el  local variables
952       real(kind=8) :: time00
953       integer :: iorder,i,j,nres2,ierr,ierror
954       nres2=2*nres
955 #ifndef FIVEDIAG
956       if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
957 ! common.MD
958       if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
959 #endif
960 !      common /mdpmpi/
961       if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
962       if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
963       if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
964       if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
965 #ifndef FIVEDIAG
966       if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
967 #endif
968 ! Workers wait for variables and NF, and NFL from the boss
969       iorder=0
970       do while (iorder.ge.0)
971 !      write (*,*) 'Processor',fg_rank,' CG group',kolor,
972 !     & ' receives order from Master'
973         time00=MPI_Wtime()
974         call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
975         time_Bcast=time_Bcast+MPI_Wtime()-time00
976         if (icall.gt.4 .and. iorder.ge.0) &
977          time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
978        icall=icall+1
979 !      write (*,*)
980 !     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
981         if (iorder.eq.0) then
982           call zerograd
983           call etotal(edum)
984 !          write (2,*) "After etotal"
985 !          write (2,*) "dimen",dimen," dimen3",dimen3
986 !          call flush(2)
987         else if (iorder.eq.2) then
988           call zerograd
989           call etotal_short(edum)
990 !          write (2,*) "After etotal_short"
991 !          write (2,*) "dimen",dimen," dimen3",dimen3
992 !          call flush(2)
993         else if (iorder.eq.3) then
994           call zerograd
995           call etotal_long(edum)
996 !          write (2,*) "After etotal_long"
997 !          write (2,*) "dimen",dimen," dimen3",dimen3
998 !          call flush(2)
999         else if (iorder.eq.1) then
1000           call sum_gradient
1001 !          write (2,*) "After sum_gradient"
1002 !          write (2,*) "dimen",dimen," dimen3",dimen3
1003 !          call flush(2)
1004         else if (iorder.eq.4) then
1005           call ginv_mult(z,d_a_tmp)
1006         else if (iorder.eq.5) then
1007 ! Setup MD things for a slave
1008           dimen=(nct-nnt+1)+nside
1009           dimen1=(nct-nnt)+(nct-nnt+1)
1010           dimen3=dimen*3
1011 !          write (2,*) "dimen",dimen," dimen3",dimen3
1012 !          call flush(2)
1013           call int_bounds(dimen,igmult_start,igmult_end)
1014           igmult_start=igmult_start-1
1015           call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
1016            ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1017            my_ng_count=igmult_end-igmult_start
1018           call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
1019            MPI_INTEGER,FG_COMM,IERROR)
1020           write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
1021 !          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
1022           myginv_ng_count=nres2*my_ng_count !el maxres2
1023 !          write (2,*) "igmult_start",igmult_start," igmult_end",
1024 !     &     igmult_end," my_ng_count",my_ng_count
1025 !          call flush(2)
1026           call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
1027            nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
1028           call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
1029            nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
1030 !          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
1031 !          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
1032 !          call flush(2)
1033 !          call MPI_Barrier(FG_COMM,IERROR)
1034           time00=MPI_Wtime()
1035           call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
1036            nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
1037            myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
1038 #ifdef TIMING
1039           time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
1040 #endif
1041           do i=1,dimen
1042             do j=1,2*my_ng_count
1043               ginv(j,i)=gcopy(i,j)
1044             enddo
1045           enddo
1046 !          write (2,*) "dimen",dimen," dimen3",dimen3
1047 !          write (2,*) "End MD setup"
1048 !          call flush(2)
1049 !           write (iout,*) "My chunk of ginv_block"
1050 !           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
1051         else if (iorder.eq.6) then
1052           call int_from_cart1(.false.)
1053         else if (iorder.eq.7) then
1054           call chainbuild_cart
1055         else if (iorder.eq.8) then
1056           call intcartderiv
1057         else if (iorder.eq.9) then
1058           call fricmat_mult(z,d_a_tmp)
1059         else if (iorder.eq.10) then
1060           call setup_fricmat
1061         endif
1062       enddo
1063       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
1064         ' absolute rank',myrank,' leves ERGASTULUM.'
1065       write(*,*)'Processor',fg_rank,' wait times for respective orders',&
1066        (' order[',i,']',time_order(i),i=0,10)
1067       return
1068       end subroutine ergastulum