rename
[unres4.git] / source / unres / unres.f90
diff --git a/source/unres/unres.f90 b/source/unres/unres.f90
deleted file mode 100644 (file)
index 89feccc..0000000
+++ /dev/null
@@ -1,1055 +0,0 @@
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-!                                                                              !
-!                                U N R E S                                     !
-!                                                                              !
-! Program to carry out conformational search of proteins in an united-residue  !
-! approximation.                                                               !
-!                                                                              !
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-      program unres
-
-      use io_units
-      use control_data
-      use control, only:tcpu
-      use io_base, only:ilen
-      use geometry, only:chainbuild
-      use control, only:dajczas
-      use check_bond_
-      use energy
-      use compare, only: test
-      use map_
-      use MDyn
-      use MPI_
-      use io, only:readrtns
-
-!      implicit real*8 (a-h,o-z)
-!      include 'DIMENSIONS'
-#ifdef MPI
-      use MPI_data ! include 'COMMON.SETUP'
-      implicit none
-      include 'mpif.h'
-      integer :: ierr
-#else
-      use MPI_data, only: me,king
-      implicit none
-#endif
-!      include 'COMMON.TIME1'
-!      include 'COMMON.INTERACT'
-!      include 'COMMON.NAMES'
-!      include 'COMMON.GEO'
-!      include 'COMMON.HEADER'
-!      include 'COMMON.CONTROL'
-!      include 'COMMON.CONTACTS'
-!      include 'COMMON.CHAIN'
-!      include 'COMMON.VAR'
-!      include 'COMMON.IOUNITS'
-!      include 'COMMON.FFIELD'
-!      include 'COMMON.REMD'
-!      include 'COMMON.MD'
-!      include 'COMMON.SBRIDGE'
-
-      real(kind=8) :: hrtime,mintime,sectime
-      character(len=64) ::  text_mode_calc(-2:14)
-      text_mode_calc(-2) = 'test'
-      text_mode_calc(-1) = 'cos'
-      text_mode_calc(0) = 'Energy evaluation or minimization'
-      text_mode_calc(1) = 'Regularization of PDB structure'
-      text_mode_calc(2) = 'Threading of a sequence on PDB structures'
-      text_mode_calc(3) = 'Monte Carlo (with minimization) '
-      text_mode_calc(4) = 'Energy minimization of multiple conformations'
-      text_mode_calc(5) = 'Checking energy gradient'
-      text_mode_calc(6) = 'Entropic sampling Monte Carlo (with minimization)'
-      text_mode_calc(7) = 'Energy map'
-      text_mode_calc(8) = 'CSA calculations'
-      text_mode_calc(9) = 'Not used 9'
-      text_mode_calc(10) = 'Not used 10'
-      text_mode_calc(11) = 'Soft regularization of PDB structure'
-      text_mode_calc(12) = 'Mesoscopic molecular dynamics (MD) '
-      text_mode_calc(13) = 'Not used 13'
-      text_mode_calc(14) = 'Replica exchange molecular dynamics (REMD)'
-!      external ilen
-!      call memmon_print_usage()
-
-      call init_task
-      if (me.eq.king) &
-        write(iout,*)'### LAST MODIFIED  09/03/15 15:32PM by EL'  
-      if (me.eq.king) call cinfo
-! Read force field parameters and job setup data
-      call readrtns
-      call flush(iout)
-!
-      if (me.eq.king .or. .not. out1file) then
-       write (iout,'(2a/)') &
-       text_mode_calc(modecalc)(:ilen(text_mode_calc(modecalc))), &
-       ' calculation.' 
-       if (minim) write (iout,'(a)') &
-       'Conformations will be energy-minimized.'
-       write (iout,'(80(1h*)/)') 
-      endif
-      call flush(iout)
-!
-      if (modecalc.eq.-2) then
-        call test
-        stop
-      else if (modecalc.eq.-1) then
-        write(iout,*) "call check_sc_map next"
-        call check_bond
-        stop
-      endif
-!elwrite(iout,*)"!!!!!!!!!!!!!!!!! in unres"
-
-#ifdef MPI
-      if (fg_rank.gt.0) then
-! Fine-grain slaves just do energy and gradient components.
-        call ergastulum ! slave workhouse in Latin
-      else
-#endif
-      if (modecalc.eq.0) then
-!write(iout,*)"!!!!!!!!!!!!!!!!! in unres"
-
-        call exec_eeval_or_minim
-!write(iout,*)"!!!!!!!!!!!!!!!!! in unres"
-
-      else if (modecalc.eq.1) then
-        call exec_regularize
-      else if (modecalc.eq.2) then
-        call exec_thread
-      else if (modecalc.eq.3 .or. modecalc .eq.6) then
-        call exec_MC
-      else if (modecalc.eq.4) then
-        call exec_mult_eeval_or_minim
-      else if (modecalc.eq.5) then
-        call exec_checkgrad
-!write(iout,*) "check grad dwa razy"
-!el        call exec_checkgrad
-      else if (ModeCalc.eq.7) then
-        call exec_map
-      else if (ModeCalc.eq.8) then
-        call exec_CSA
-      else if (modecalc.eq.11) then
-        call exec_softreg
-      else if (modecalc.eq.12) then
-        call exec_MD
-      else if (modecalc.eq.14) then
-        call exec_MREMD
-      else
-        write (iout,'(a)') 'This calculation type is not supported',&
-         ModeCalc
-      endif
-!elwrite(iout,*)"!!!!!!!!!!!!!!!!!"
-
-#ifdef MPI
-      endif
-! Finish task.
-      if (fg_rank.eq.0) call finish_task
-!      call memmon_print_usage()
-#ifdef TIMING
-       call print_detailed_timing
-#endif
-      call MPI_Finalize(ierr)
-      stop 'Bye Bye...'
-#else
-      call dajczas(tcpu(),hrtime,mintime,sectime)
-      stop '********** Program terminated normally.'
-#endif
-
-      end program !UNRES
-!-----------------------------------------------------------------------------
-!
-!-----------------------------------------------------------------------------
-      subroutine exec_MD
-      use MPI_data     !include 'COMMON.SETUP'
-      use control_data !include 'COMMON.CONTROL'
-      use geometry, only:chainbuild
-      use MDyn
-      use io_units     !include 'COMMON.IOUNITS'
-!      use io_common
-      implicit none
-!      include 'DIMENSIONS'
-#ifdef MPI
-      include "mpif.h"
-#endif
-      call alloc_MD_arrays
-      if (me.eq.king .or. .not. out1file) &
-         write (iout,*) "Calling chainbuild"
-      call chainbuild
-      call MD
-      return
-      end subroutine exec_MD
-!---------------------------------------------------------------------------
-      subroutine exec_MREMD
-      use MPI_data     !include 'COMMON.SETUP'
-      use control_data !include 'COMMON.CONTROL'
-      use io_units     !include 'COMMON.IOUNITS'
-!      use io_common
-      use REMD_data     !include 'COMMON.REMD'
-      use geometry, only:chainbuild
-      use MREMDyn
-
-      implicit none
-!      include 'DIMENSIONS'
-#ifdef MPI
-      include "mpif.h"
-#endif
-
-      integer :: i
-      call alloc_MD_arrays
-      call alloc_MREMD_arrays
-
-      if (me.eq.king .or. .not. out1file) &
-         write (iout,*) "Calling chainbuild"
-      call chainbuild
-      if (me.eq.king .or. .not. out1file) &
-         write (iout,*) "Calling REMD"
-      if (remd_mlist) then 
-        call MREMD
-      else
-        do i=1,nrep
-          remd_m(i)=1
-        enddo
-        call MREMD
-      endif
-      return
-      end subroutine exec_MREMD
-!-----------------------------------------------------------------------------
-      subroutine exec_eeval_or_minim
-      use MPI_data             !include 'COMMON.SETUP'
-      use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
-      use io_units     !include 'COMMON.IOUNITS'
-      use names
-!      use energy      !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
-      use geometry_data        !include 'COMMON.GEO''COMMON.CHAIN'
-!      use REMD        !include 'COMMON.REMD'
-!      use MD          !include 'COMMON.MD'
-
-      use energy_data
-
-      use io_base
-      use geometry, only:chainbuild
-      use energy
-      use compare, only:alloc_compare_arrays,hairpin,secondary2,rms_nac_nnc
-      use minimm, only:minimize,minim_dc,sc_move
-      use compare_data !el
-      use comm_srutu
-      implicit none
-!      implicit real*8 (a-h,o-z)
-!      include 'DIMENSIONS'
-#ifdef MPI
-      include 'mpif.h'
-#endif
-      integer :: i
-!el      common /srutu/ icall
-      real(kind=8) :: energy_(0:n_ene)
-      real(kind=8) :: energy_long(0:n_ene),energy_short(0:n_ene)
-      real(kind=8) :: varia(6*nres)    !(maxvar) (maxvar=6*maxres)
-      real(kind=8) :: time00, evals, etota, etot, time_ene, time1
-      integer :: nharp,nft_sc,iretcode,nfun
-      integer,dimension(4,nres/3) :: iharp     !(4,nres/3)(4,maxres/3)
-      logical :: fail
-      real(kind=8) :: rms,frac,frac_nn,co
-
-      integer :: j,k
-      call alloc_compare_arrays
-      if (indpdb.eq.0) call chainbuild
-#ifdef MPI
-      time00=MPI_Wtime()
-#endif
-      call chainbuild_cart
-!      write(iout,*)"in exec_eeval or minimim",split_ene
-!      do j=1,2*nres+2
-!        write(iout,*)"cccccc",j,(c(i,j),i=1,3)
-!        write(iout,*)"dcccccc",j,(dc(i,j),i=1,3)
-!      enddo
-      if (split_ene) then
-!      write(iout,*)"in exec_eeval or minimim"
-
-       print *,"Processor",myrank," after chainbuild"
-       icall=1
-!elwrite(iout,*)"in exec_eeval or minimim"
-
-       call etotal_long(energy_long)
-       write (iout,*) "Printing long range energy"
-       call enerprint(energy_long)
-!elwrite(iout,*)"in exec_eeval or minimim"
-
-       call etotal_short(energy_short)
-       write (iout,*) "Printing short range energy"
-       call enerprint(energy_short)
-       do i=0,n_ene
-         energy_(i)=energy_long(i)+energy_short(i)
-         write (iout,*) i,energy_long(i),energy_short(i),energy_(i)
-       enddo
-       write (iout,*) "Printing long+short range energy"
-       call enerprint(energy_)
-      endif
-
-      call etotal(energy_)
-!elwrite(iout,*)"after etotal in exec_eev"
-#ifdef MPI
-      time_ene=MPI_Wtime()-time00
-#endif
-      write (iout,*) "Time for energy evaluation",time_ene
-      print *,"after etotal"
-      etota = energy_(0)
-      etot = etota
-      call enerprint(energy_)
-!write(iout,*)"after enerprint"
-      call hairpin(.true.,nharp,iharp)
-!write(iout,*) "after hairpin"!,hfrag(1,1)
-      call secondary2(.true.)
-!write(iout,*) "after secondary2"
-      if (minim) then
-!rc overlap test
-!elwrite(iout,*) "after secondary2 minim",minim
-        if (overlapsc) then 
-          print *, 'Calling OVERLAP_SC'
-!write(iout,*) 'Calling OVERLAP_SC'
-          call overlap_sc(fail)
-!write(iout,*) 'after Calling OVERLAP_SC'
-        endif 
-
-        if (searchsc) then 
-          call sc_move(2,nres-1,10,1d10,nft_sc,etot)
-          print *,'SC_move',nft_sc,etot
-          write(iout,*) 'SC_move',nft_sc,etot
-        endif 
-
-        if (dccart) then
-!write(iout,*) 'CART  calling minim_dc', nvar
-          print *, 'Calling MINIM_DC'
-#ifdef MPI
-          time1=MPI_WTIME()
-#endif
-!    call check_ecartint !el
-          call minim_dc(etot,iretcode,nfun)
-!    call check_ecartint !el
-        else 
-!write(iout,*) "indpdb",indpdb
-          if (indpdb.ne.0) then 
-!write(iout,*) 'if indpdb', indpdb
-            call bond_regular
-            call chainbuild
-          endif
-          call geom_to_var(nvar,varia)
-!write(iout,*) 'po geom to var; calling minimize', nvar
-          print *,'Calling MINIMIZE.'
-#ifdef MPI
-          time1=MPI_WTIME()
-#endif
-!    call check_eint
-!        call exec_checkgrad   !el
-          call minimize(etot,varia,iretcode,nfun)
-!     call check_eint
-!        call exec_checkgrad   !el
-        endif
-        print *,'SUMSL return code is',iretcode,' eval ',nfun
-#ifdef MPI
-        evals=nfun/(MPI_WTIME()-time1)
-#endif
-        print *,'# eval/s',evals
-        print *,'refstr=',refstr
-        call hairpin(.true.,nharp,iharp)
-        call secondary2(.true.)
-        call etotal(energy_)
-        etot = energy_(0)
-        call enerprint(energy_)
-
-        call intout
-        call briefout(0,etot)
-        if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
-          write (iout,'(a,i3)') 'SUMSL return code:',iretcode
-          write (iout,'(a,i20)') '# of energy evaluations:',nfun+1
-          write (iout,'(a,f16.3)')'# of energy evaluations/sec:',evals
-      else
-!elwrite(iout,*) "after secondary2 minim",minim
-        print *,'refstr=',refstr
-        if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
-!elwrite(iout,*) "rms_nac"
-!elwrite(iout,*) "before briefout"
-        call briefout(0,etot)
-!elwrite(iout,*) "after briefout"
-      endif
-      if (outpdb) call pdbout(etot,titel(:32),ipdb)
-      if (outmol2) call mol2out(etot,titel(:32))
-!elwrite(iout,*) "after exec_eeval_or_minim"
-      return
-      end subroutine exec_eeval_or_minim
-!-----------------------------------------------------------------------------
-      subroutine exec_regularize
-!      use MPI_data            !include 'COMMON.SETUP'
-      use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
-      use io_units     !include 'COMMON.IOUNITS'
-      use names
-      use energy_data  !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
-      use geometry_data        !include 'COMMON.GEO''COMMON.CHAIN'
- !     use REMD        !include 'COMMON.REMD'
-!     use MD           !include 'COMMON.MD'
-      use regularize_
-      use compare
-      implicit none
-!      implicit real*8 (a-h,o-z)
-!      include 'DIMENSIONS'
-#ifdef MPI
-      include 'mpif.h'
-#endif
-      real(kind=8) :: energy_(0:n_ene)
-      real(kind=8) :: etot
-      real(kind=8) :: rms,frac,frac_nn,co
-      integer :: iretcode
-
-      call alloc_compare_arrays
-      call gen_dist_constr
-      call sc_conf
-      call intout
-      call regularize(nct-nnt+1,etot,rms,cref(1,nnt,1),iretcode)
-      call etotal(energy_)
-      energy_(0)=energy_(0)-energy_(14)
-      etot=energy_(0)
-      call enerprint(energy_)
-      call intout
-      call briefout(0,etot)
-      if (outpdb) call pdbout(etot,titel(:32),ipdb)
-      if (outmol2) call mol2out(etot,titel(:32))
-      if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
-      write (iout,'(a,i3)') 'SUMSL return code:',iretcode
-      return
-      end subroutine exec_regularize
-!-----------------------------------------------------------------------------
-      subroutine exec_thread
-!      use MPI_data            !include 'COMMON.SETUP'
-      use compare
-      implicit none
-!      include 'DIMENSIONS'
-#ifdef MP
-      include "mpif.h"
-#endif
-      call alloc_compare_arrays
-      call thread_seq
-      return
-      end subroutine exec_thread
-!-----------------------------------------------------------------------------
-      subroutine exec_MC
-!      use MPI_data            !include 'COMMON.SETUP'
-      use control_data !include 'COMMON.CONTROL'
-      use geometry_data
-      use energy_data
-      use mcm_md
-      implicit none
-!      implicit real*8 (a-h,o-z)
-!      include 'DIMENSIONS'
-      character(len=10) :: nodeinfo
-      real(kind=8) :: varia(6*nres)    !(maxvar) (maxvar=6*maxres)
-      integer :: ipar
-#ifdef MPI
-      include "mpif.h"
-#endif
-      call alloc_MCM_arrays
-      call mcm_setup
-      if (minim) then
-#ifdef MPI
-        if (modecalc.eq.3) then
-          call do_mcm(ipar)
-        else
-          call entmcm
-        endif
-#else
-        if (modecalc.eq.3) then
-          call do_mcm(ipar)
-        else
-          call entmcm
-        endif
-#endif
-      else
-        call monte_carlo
-      endif
-      return
-      end subroutine exec_MC
-!-----------------------------------------------------------------------------
-      subroutine exec_mult_eeval_or_minim
-      use MPI_data             !include 'COMMON.SETUP'
-      use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
-      use io_units     !include 'COMMON.IOUNITS'
-      use names
-      use energy_data  !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
-      use geometry_data        !include 'COMMON.GEO''COMMON.CHAIN'
-!      use REMD        !include 'COMMON.REMD'
-!      use MD          !include 'COMMON.MD'
-      use io_base
-      use geometry, only:chainbuild,geom_to_var,int_from_cart1,var_to_geom
-      use energy, only:etotal,enerprint
-      use compare, only:rms_nac_nnc
-      use minimm, only:minimize!,minim_mcmf
-!      implicit real*8 (a-h,o-z)
-!      include 'DIMENSIONS'
-#ifdef MPI
-      use minimm, only:minim_mcmf
-      implicit none
-      include 'mpif.h'
-      integer :: ierror,ierr
-      real(kind=8) :: man
-      real(kind=8),dimension(mpi_status_size) :: muster
-#else
-      implicit none
-#endif
-      real(kind=8) :: varia(6*nres)    !(maxvar) (maxvar=6*maxres)
-      integer,dimension(6) :: ind
-      real(kind=8) :: energy_(0:n_ene)
-      logical :: eof
-      real(kind=8) :: etot,ene0
-      integer :: mm,imm,nft,n,iconf,nmin,i,iretcode,nfun,it,&
-         nf_mcmf,j
-      real(kind=8) :: rms,frac,frac_nn,co,time,ene
-
-      eof=.false.
-#ifdef MPI
-      if(me.ne.king) then
-        call minim_mcmf
-        return
-      endif
-
-      close (intin)
-      open(intin,file=intinname,status='old')
-      write (istat,'(a5,20a12)')"#    ",&
-        (wname(print_order(i)),i=1,nprint_ene)
-      if (refstr) then
-        write (istat,'(a5,20a12)')"#    ",&
-         (ename(print_order(i)),i=1,nprint_ene),&
-         "ETOT total","RMSD","nat.contact","nnt.contact"        
-      else
-        write (istat,'(a5,20a12)')"#    ",&
-          (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
-      endif
-
-      if (.not.minim) then
-        do while (.not. eof)
-          if (read_cart) then
-            read (intin,'(e15.10,e15.5)',end=1100,err=1100) time,ene
-            call read_x(intin,*11)
-#ifdef MPI
-! Broadcast the order to compute internal coordinates to the slaves.
-            if (nfgtasks.gt.1) &
-              call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
-#endif
-            call int_from_cart1(.false.)
-          else
-            read (intin,'(i5)',end=1100,err=1100) iconf
-            call read_angles(intin,*11)
-            call geom_to_var(nvar,varia)
-            call chainbuild
-          endif
-          write (iout,'(a,i7)') 'Conformation #',iconf
-          call etotal(energy_)
-          call briefout(iconf,energy_(0))
-          call enerprint(energy_)
-          etot=energy_(0)
-          if (refstr) then 
-            call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
-            write (istat,'(i5,20(f12.3))') iconf,&
-            (energy_(print_order(i)),i=1,nprint_ene),etot,&
-             rms,frac,frac_nn,co
-!jlee end
-          else
-            write (istat,'(i5,16(f12.3))') iconf,&
-           (energy_(print_order(i)),i=1,nprint_ene),etot
-          endif
-        enddo
-1100    continue
-        goto 1101
-      endif
-
-      mm=0
-      imm=0
-      nft=0
-      ene0=0.0d0
-      n=0
-      iconf=0
-!      do n=1,nzsc
-      do while (.not. eof)
-        mm=mm+1
-        if (mm.lt.nodes) then
-          if (read_cart) then
-            read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
-            call read_x(intin,*11)
-#ifdef MPI
-! Broadcast the order to compute internal coordinates to the slaves.
-            if (nfgtasks.gt.1) &
-              call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
-#endif
-            call int_from_cart1(.false.)
-          else
-            read (intin,'(i5)',end=11,err=11) iconf
-            call read_angles(intin,*11)
-            call geom_to_var(nvar,varia)
-            call chainbuild
-          endif
-          write (iout,'(a,i7)') 'Conformation #',iconf
-          n=n+1
-         imm=imm+1
-         ind(1)=1
-         ind(2)=n
-         ind(3)=0
-         ind(4)=0
-         ind(5)=0
-         ind(6)=0
-         ene0=0.0d0
-         call mpi_send(ind,6,mpi_integer,mm,idint,CG_COMM,&
-                        ierr)
-         call mpi_send(varia,nvar,mpi_double_precision,mm,&
-                        idreal,CG_COMM,ierr)
-         call mpi_send(ene0,1,mpi_double_precision,mm,&
-                        idreal,CG_COMM,ierr)
-!         print *,'task ',n,' sent to worker ',mm,nvar
-        else
-         call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
-                       CG_COMM,muster,ierr)
-         man=muster(mpi_source)
-!         print *,'receiving result from worker ',man,' (',iii1,iii,')'
-         call mpi_recv(varia,nvar,mpi_double_precision,&
-                     man,idreal,CG_COMM,muster,ierr)
-         call mpi_recv(ene,1,&
-                     mpi_double_precision,man,idreal,&
-                     CG_COMM,muster,ierr)
-         call mpi_recv(ene0,1,&
-                     mpi_double_precision,man,idreal,&
-                     CG_COMM,muster,ierr)
-!         print *,'result received from worker ',man,' sending now'
-
-          call var_to_geom(nvar,varia)
-          call chainbuild
-          call etotal(energy_)
-          iconf=ind(2)
-          write (iout,*)
-          write (iout,*)
-          write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
-
-          etot=energy_(0)
-          call enerprint(energy_)
-          call briefout(it,etot)
-!          if (minim) call briefout(it,etot)
-          if (refstr) then 
-            call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
-            write (istat,'(i5,19(f12.3))') iconf,&
-           (energy_(print_order(i)),i=1,nprint_ene),etot,&
-           rms,frac,frac_nn,co
-          else
-            write (istat,'(i5,15(f12.3))') iconf,&
-           (energy_(print_order(i)),i=1,nprint_ene),etot
-          endif
-
-          imm=imm-1
-          if (read_cart) then
-            read (intin,'(e15.10,e15.5)',end=1101,err=1101) time,ene
-            call read_x(intin,*11)
-#ifdef MPI
-! Broadcast the order to compute internal coordinates to the slaves.
-            if (nfgtasks.gt.1) &
-              call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
-#endif
-            call int_from_cart1(.false.)
-          else
-            read (intin,'(i5)',end=1101,err=1101) iconf
-            call read_angles(intin,*11)
-            call geom_to_var(nvar,varia)
-            call chainbuild
-          endif
-          n=n+1
-          imm=imm+1
-          ind(1)=1
-          ind(2)=n
-          ind(3)=0
-          ind(4)=0
-          ind(5)=0
-          ind(6)=0
-          call mpi_send(ind,6,mpi_integer,man,idint,CG_COMM,&
-                        ierr)
-          call mpi_send(varia,nvar,mpi_double_precision,man,&
-                        idreal,CG_COMM,ierr)
-          call mpi_send(ene0,1,mpi_double_precision,man,&
-                        idreal,CG_COMM,ierr)
-          nf_mcmf=nf_mcmf+ind(4)
-          nmin=nmin+1
-        endif
-      enddo
-11    continue
-      do j=1,imm
-        call mpi_recv(ind,6,mpi_integer,mpi_any_source,idint,&
-                     CG_COMM,muster,ierr)
-        man=muster(mpi_source)
-        call mpi_recv(varia,nvar,mpi_double_precision,&
-                     man,idreal,CG_COMM,muster,ierr)
-        call mpi_recv(ene,1,&
-                     mpi_double_precision,man,idreal,&
-                     CG_COMM,muster,ierr)
-        call mpi_recv(ene0,1,&
-                     mpi_double_precision,man,idreal,&
-                     CG_COMM,muster,ierr)
-
-        call var_to_geom(nvar,varia)
-        call chainbuild
-        call etotal(energy_)
-        iconf=ind(2)
-        write (iout,*)
-        write (iout,*)
-        write (iout,'(a,2i7)') 'Conformation #',iconf,ind(5)
-
-        etot=energy_(0)
-        call enerprint(energy_)
-        call briefout(it,etot)
-        if (refstr) then 
-          call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
-          write (istat,'(i5,19(f12.3))') iconf,&
-         (energy_(print_order(i)),i=1,nprint_ene),etot,&
-         rms,frac,frac_nn,co
-        else
-          write (istat,'(i5,15(f12.3))') iconf,&
-          (energy_(print_order(i)),i=1,nprint_ene),etot
-        endif
-        nmin=nmin+1
-      enddo
-1101  continue
-      do i=1, nodes-1
-         ind(1)=0
-         ind(2)=0
-         ind(3)=0
-         ind(4)=0
-         ind(5)=0
-         ind(6)=0
-         call mpi_send(ind,6,mpi_integer,i,idint,CG_COMM,&
-                        ierr)
-      enddo
-#else
-      close (intin)
-      open(intin,file=intinname,status='old')
-      write (istat,'(a5,20a12)')"#    ",&
-         (wname(print_order(i)),i=1,nprint_ene)
-      write (istat,'("#    ",20(1pe12.4))') &
-         (weights(print_order(i)),i=1,nprint_ene)
-      if (refstr) then
-        write (istat,'(a5,20a12)')"#    ",&
-         (ename(print_order(i)),i=1,nprint_ene),&
-         "ETOT total","RMSD","nat.contact","nnt.contact"
-      else
-        write (istat,'(a5,14a12)')"#    ",&
-         (ename(print_order(i)),i=1,nprint_ene),"ETOT total"
-      endif
-      do while (.not. eof)
-          if (read_cart) then
-            read (intin,'(e15.10,e15.5)',end=11,err=11) time,ene
-            call read_x(intin,*11)
-#ifdef MPI
-! Broadcast the order to compute internal coordinates to the slaves.
-            if (nfgtasks.gt.1) &
-              call MPI_Bcast(6,1,MPI_INTEGER,king,FG_COMM,IERROR)
-#endif
-            call int_from_cart1(.false.)
-          else
-            read (intin,'(i5)',end=11,err=11) iconf
-            call read_angles(intin,*11)
-            call geom_to_var(nvar,varia)
-            call chainbuild
-          endif
-        write (iout,'(a,i7)') 'Conformation #',iconf
-        if (minim) call minimize(etot,varia,iretcode,nfun)
-        call etotal(energy_)
-
-        etot=energy_(0)
-        call enerprint(energy_)
-        if (minim) call briefout(it,etot) 
-        if (refstr) then 
-          call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
-          write (istat,'(i5,18(f12.3))') iconf,&
-         (energy_(print_order(i)),i=1,nprint_ene),&
-         etot,rms,frac,frac_nn,co
-!jlee end
-        else
-          write (istat,'(i5,14(f12.3))') iconf,&
-         (energy_(print_order(i)),i=1,nprint_ene),etot
-        endif
-      enddo
-   11 continue
-#endif
-      return
-      end subroutine exec_mult_eeval_or_minim
-!-----------------------------------------------------------------------------
-      subroutine exec_checkgrad
-!      use MPI_data            !include 'COMMON.SETUP'
-      use control_data !include 'COMMON.CONTROL''COMMON.TIME1''COMMON.NAMES''COMMON.HEADER'
-      use io_units     !include 'COMMON.IOUNITS'
-!el      use energy_data, only:icall   !include 'COMMON.INTERACT''COMMON.CONTACTS''COMMON.VAR''COMMON.FFIELD' 'COMMON.SBRIDGE'
-      use geometry_data        !include 'COMMON.GEO''COMMON.CHAIN'
-!      use REMD        !include 'COMMON.REMD'
-      use MD_data              !include 'COMMON.MD'
-      use io_base, only:intout
-      use io_config, only:read_fragments
-      use geometry
-      use energy
-      use comm_srutu
-      implicit none
-!      implicit real*8 (a-h,o-z)
-!      include 'DIMENSIONS'
-#ifdef MPI
-      include 'mpif.h'
-#endif
-!el      integer :: icall
-!el      common /srutu/ icall
-      real(kind=8) :: energy_(0:max_ene)
-      real(kind=8) :: etot
-      integer :: i
-!      do i=2,nres
-!        vbld(i)=vbld(i)+ran_number(-0.1d0,0.1d0)
-!        if (itype(i).ne.10) 
-!     &      vbld(i+nres)=vbld(i+nres)+ran_number(-0.001d0,0.001d0)
-!      enddo
-      if (indpdb.eq.0) call chainbuild
-!      do i=0,nres
-!        do j=1,3
-!          dc(j,i)=dc(j,i)+ran_number(-0.2d0,0.2d0)
-!        enddo
-!      enddo
-!      do i=1,nres-1
-!        if (itype(i).ne.10) then
-!          do j=1,3
-!            dc(j,i+nres)=dc(j,i+nres)+ran_number(-0.2d0,0.2d0)
-!          enddo
-!        endif
-!      enddo
-!      do j=1,3
-!        dc(j,0)=ran_number(-0.2d0,0.2d0)
-!      enddo
-      usampl=.true.
-      totT=1.d0
-      eq_time=0.0d0
-      call read_fragments
-      call chainbuild_cart
-      call cartprint
-      call intout
-      icall=1
-      call etotal(energy_(0))
-      etot = energy_(0)
-      call enerprint(energy_(0))
-      write (iout,*) "Uconst",Uconst," Uconst_back",uconst_back
-      print *,'icheckgrad=',icheckgrad
-      goto (10,20,30) icheckgrad
-  10  call check_ecartint
-      return
-  20  call check_cartgrad
-      return
-  30  call check_eint
-      return
-      end subroutine exec_checkgrad
-!-----------------------------------------------------------------------------
-      subroutine exec_map
-!      use map_data
-      use map_
-      use io_config, only:map_read
-      implicit none
-! Energy maps
-      call alloc_map_arrays
-      call map_read
-      call map
-      return
-      end subroutine exec_map
-!-----------------------------------------------------------------------------
-      subroutine exec_CSA
-
-      use io_units     !include 'COMMON.IOUNITS'
-      use CSA
-
-      implicit none
-#ifdef MPI
-      include "mpif.h"
-#endif
-!      include 'DIMENSIONS'
-! Conformational Space Annealling programmed by Jooyoung Lee.
-! This method works only with parallel machines!
-#ifdef MPI
-      call alloc_CSA_arrays
-      call together
-#else
-      write (iout,*) "CSA works on parallel machines only"
-#endif
-      return
-      end subroutine exec_CSA
-!-----------------------------------------------------------------------------
-      subroutine exec_softreg
-      use io_units     !include 'COMMON.IOUNITS'
-      use control_data !include 'COMMON.CONTROL'
-      use energy_data
-      use io_base, only:intout,briefout
-      use geometry, only:chainbuild
-      use energy
-      use compare
-      implicit none
-!      include 'DIMENSIONS'
-      real(kind=8) :: energy_(0:n_ene)
-!el local variables
-      real(kind=8) :: rms,frac,frac_nn,co,etot
-      logical :: debug
-
-      call alloc_compare_arrays
-      call chainbuild
-      call etotal(energy_)
-      call enerprint(energy_)
-      if (.not.lsecondary) then
-        write(iout,*) 'Calling secondary structure recognition'
-        call secondary2(debug)
-      else
-        write(iout,*) 'Using secondary structure supplied in pdb'
-      endif
-
-      call softreg
-
-      call etotal(energy_)
-      etot=energy_(0)
-      call enerprint(energy_)
-      call intout
-      call briefout(0,etot)
-      call secondary2(.true.)
-      if (refstr) call rms_nac_nnc(rms,frac,frac_nn,co,.true.)
-      return
-      end subroutine exec_softreg
-!-----------------------------------------------------------------------------
-! minimize_p.F
-!-----------------------------------------------------------------------------
-!el#ifdef MPI
-      subroutine ergastulum
-
-!      implicit real*8 (a-h,o-z)
-!      include 'DIMENSIONS'
-      use MD_data
-      use energy
-      use MDyn, only:setup_fricmat
-      use REMD, only:fricmat_mult,ginv_mult
-#ifdef MPI
-      include "mpif.h"
-#endif
-!      include 'COMMON.SETUP'
-!      include 'COMMON.DERIV'
-!      include 'COMMON.VAR'
-!      include 'COMMON.IOUNITS'
-!      include 'COMMON.FFIELD'
-!      include 'COMMON.INTERACT'
-!      include 'COMMON.MD'
-!      include 'COMMON.TIME1'
-      real(kind=8),dimension(6*nres) :: z,d_a_tmp       !(maxres6) maxres6=6*maxres
-      real(kind=8) :: edum(0:n_ene),time_order(0:10)
-!el      real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2) maxres2=2*maxres
-!el      common /przechowalnia/ Gcopy
-      integer :: icall = 0
-
-!el  local variables
-      real(kind=8) :: time00
-      integer :: iorder,i,j,nres2,ierr,ierror
-      nres2=2*nres
-      if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2))
-! common.MD
-      if(.not.allocated(Ginv)) allocate(Ginv(nres2,nres2)) !(maxres2,maxres2)
-!      common /mdpmpi/
-      if(.not.allocated(ng_start)) allocate(ng_start(0:nfgtasks-1))
-      if(.not.allocated(ng_counts)) allocate(ng_counts(0:nfgtasks-1))
-      if(.not.allocated(nginv_counts)) allocate(nginv_counts(0:nfgtasks-1)) !(0:MaxProcs-1)
-      if(.not.allocated(nginv_start)) allocate(nginv_start(0:nfgtasks)) !(0:MaxProcs)
-
-      if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2)) !maxres2=2*maxres
-
-! Workers wait for variables and NF, and NFL from the boss
-      iorder=0
-      do while (iorder.ge.0)
-!      write (*,*) 'Processor',fg_rank,' CG group',kolor,
-!     & ' receives order from Master'
-        time00=MPI_Wtime()
-        call MPI_Bcast(iorder,1,MPI_INTEGER,king,FG_COMM,IERR)
-        time_Bcast=time_Bcast+MPI_Wtime()-time00
-        if (icall.gt.4 .and. iorder.ge.0) &
-         time_order(iorder)=time_order(iorder)+MPI_Wtime()-time00
-       icall=icall+1
-!      write (*,*)
-!     & 'Processor',fg_rank,' completed receive MPI_BCAST order',iorder
-        if (iorder.eq.0) then
-          call zerograd
-          call etotal(edum)
-!          write (2,*) "After etotal"
-!          write (2,*) "dimen",dimen," dimen3",dimen3
-!          call flush(2)
-        else if (iorder.eq.2) then
-          call zerograd
-          call etotal_short(edum)
-!          write (2,*) "After etotal_short"
-!          write (2,*) "dimen",dimen," dimen3",dimen3
-!          call flush(2)
-        else if (iorder.eq.3) then
-          call zerograd
-          call etotal_long(edum)
-!          write (2,*) "After etotal_long"
-!          write (2,*) "dimen",dimen," dimen3",dimen3
-!          call flush(2)
-        else if (iorder.eq.1) then
-          call sum_gradient
-!          write (2,*) "After sum_gradient"
-!          write (2,*) "dimen",dimen," dimen3",dimen3
-!          call flush(2)
-        else if (iorder.eq.4) then
-          call ginv_mult(z,d_a_tmp)
-        else if (iorder.eq.5) then
-! Setup MD things for a slave
-          dimen=(nct-nnt+1)+nside
-          dimen1=(nct-nnt)+(nct-nnt+1)
-          dimen3=dimen*3
-!          write (2,*) "dimen",dimen," dimen3",dimen3
-!          call flush(2)
-          call int_bounds(dimen,igmult_start,igmult_end)
-          igmult_start=igmult_start-1
-          call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
-           ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
-           my_ng_count=igmult_end-igmult_start
-          call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
-           MPI_INTEGER,FG_COMM,IERROR)
-          write (2,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) !sp
-!          write (2,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
-          myginv_ng_count=nres2*my_ng_count !el maxres2
-!          write (2,*) "igmult_start",igmult_start," igmult_end",
-!     &     igmult_end," my_ng_count",my_ng_count
-!          call flush(2)
-          call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,& !el maxres2
-           nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
-          call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
-           nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
-!          write (2,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
-!          write (2,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
-!          call flush(2)
-!          call MPI_Barrier(FG_COMM,IERROR)
-          time00=MPI_Wtime()
-          call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
-           nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
-           myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
-#ifdef TIMING
-          time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
-#endif
-          do i=1,dimen
-            do j=1,2*my_ng_count
-              ginv(j,i)=gcopy(i,j)
-            enddo
-          enddo
-!          write (2,*) "dimen",dimen," dimen3",dimen3
-!          write (2,*) "End MD setup"
-!          call flush(2)
-!           write (iout,*) "My chunk of ginv_block"
-!           call MATOUT2(my_ng_count,dimen3,maxres2,maxers2,ginv_block)
-        else if (iorder.eq.6) then
-          call int_from_cart1(.false.)
-        else if (iorder.eq.7) then
-          call chainbuild_cart
-        else if (iorder.eq.8) then
-          call intcartderiv
-        else if (iorder.eq.9) then
-          call fricmat_mult(z,d_a_tmp)
-        else if (iorder.eq.10) then
-          call setup_fricmat
-        endif
-      enddo
-      write (*,*) 'Processor',fg_rank,' CG group',kolor,&
-        ' absolute rank',myrank,' leves ERGASTULUM.'
-      write(*,*)'Processor',fg_rank,' wait times for respective orders',&
-       (' order[',i,']',time_order(i),i=0,10)
-      return
-      end subroutine ergastulum