cmake for unres4 wham
[unres4.git] / source / unres / control.F90
diff --git a/source/unres/control.F90 b/source/unres/control.F90
new file mode 100644 (file)
index 0000000..8d22bf0
--- /dev/null
@@ -0,0 +1,2160 @@
+      module control
+!-----------------------------------------------------------------------------
+      use io_units
+      use names
+      use MPI_data
+      use geometry_data
+      use energy_data
+      use control_data
+      use minim_data
+      use geometry, only:int_bounds
+#ifndef CLUSTER
+      use csa_data
+#ifdef WHAM_RUN
+      use wham_data
+#endif
+#endif
+      implicit none
+!-----------------------------------------------------------------------------
+! commom.control
+!      common /cntrl/
+!      integer :: modecalc,iscode,indpdb,indback,indphi,iranconf,&
+!       icheckgrad,iprint,i2ndstr,mucadyn,constr_dist,symetr
+!      logical :: minim,refstr,pdbref,outpdb,outmol2,overlapsc,&
+!       energy_dec,sideadd,lsecondary,read_cart,unres_pdb,&
+!       vdisulf,searchsc,lmuca,dccart,extconf,out1file,&
+!       gnorm_check,gradout,split_ene
+!... minim = .true. means DO minimization.
+!... energy_dec = .true. means print energy decomposition matrix
+!-----------------------------------------------------------------------------
+! common.time1
+!     FOUND_NAN - set by calcf to stop sumsl via stopx
+!      COMMON/TIME1/
+      real(kind=8) :: STIME,BATIME,PREVTIM,RSTIME
+!el      real(kind=8) :: TIMLIM,SAFETY
+!el      real(kind=8) :: WALLTIME
+!      COMMON/STOPTIM/
+      integer :: ISTOP
+!      common /sumsl_flag/
+      logical :: FOUND_NAN
+!      common /timing/
+      real(kind=8) :: t_init
+!       time_bcast,time_reduce,time_gather,&
+!       time_sendrecv,time_barrier_e,time_barrier_g,time_scatter,&
+       !t_eelecij,
+!       time_allreduce,&
+!       time_lagrangian,time_cartgrad,&
+!       time_sumgradient,time_intcartderiv,time_inttocart,time_intfcart,&
+!       time_mat,time_fricmatmult,&
+!       time_scatter_fmat,time_scatter_ginv,&
+!       time_scatter_fmatmult,time_scatter_ginvmult,&
+!       t_eshort,t_elong,t_etotal
+!-----------------------------------------------------------------------------
+! initialize_p.F
+!-----------------------------------------------------------------------------
+!      block data
+!      integer,parameter :: MaxMoveType = 4
+!      character(len=14),dimension(-1:MaxMoveType+1) :: MovTypID=(/'pool','chain regrow',&
+!      character :: MovTypID(-1:MaxMoveType+1)=(/'pool','chain regrow',&
+!       'multi-bond','phi','theta','side chain','total'/)
+! Conversion from poises to molecular unit and the gas constant
+!el      real(kind=8) :: cPoise=2.9361d0, Rb=0.001986d0
+!-----------------------------------------------------------------------------
+!      common /przechowalnia/ subroutines: init_int_table,add_int,add_int_from
+      integer,dimension(:),allocatable :: iturn3_start_all,&
+        iturn3_end_all,iturn4_start_all,iturn4_end_all,iatel_s_all,&
+        iatel_e_all !(0:max_fg_procs)
+      integer,dimension(:,:),allocatable :: ielstart_all,&
+        ielend_all !(maxres,0:max_fg_procs-1)
+
+!      common /przechowalnia/ subroutine: init_int_table
+      integer,dimension(:),allocatable :: ntask_cont_from_all,&
+        ntask_cont_to_all !(0:max_fg_procs-1)
+      integer,dimension(:,:),allocatable :: itask_cont_from_all,&
+        itask_cont_to_all !(0:max_fg_procs-1,0:max_fg_procs-1)
+!-----------------------------------------------------------------------------
+!
+!
+!-----------------------------------------------------------------------------
+      contains
+!-----------------------------------------------------------------------------
+! initialize_p.F
+!-----------------------------------------------------------------------------
+      subroutine initialize
+!
+! Define constants and zero out tables.
+!
+      use comm_iofile
+      use comm_machsw
+      use MCM_data, only: MovTypID
+!      implicit real*8 (a-h,o-z)
+!      include 'DIMENSIONS'
+#ifdef MPI
+      include 'mpif.h'
+#endif
+#ifndef ISNAN
+      external proc_proc
+#ifdef WINPGI
+!MS$ATTRIBUTES C ::  proc_proc
+#endif
+#endif
+!      include 'COMMON.IOUNITS'
+!      include 'COMMON.CHAIN'
+!      include 'COMMON.INTERACT'
+!      include 'COMMON.GEO'
+!      include 'COMMON.LOCAL'
+!      include 'COMMON.TORSION'
+!      include 'COMMON.FFIELD'
+!      include 'COMMON.SBRIDGE'
+!      include 'COMMON.MCM'
+!      include 'COMMON.MINIM' 
+!      include 'COMMON.DERIV'
+!      include 'COMMON.SPLITELE'
+!      implicit none
+! Common blocks from the diagonalization routines
+!el      integer :: IR,IW,IP,IJK,IPK,IDAF,NAV,IODA(400)
+!el      integer :: KDIAG,ICORFL,IXDR
+!el      COMMON /IOFILE/ IR,IW,IP,IJK,IPK,IDAF,NAV,IODA
+!el      COMMON /MACHSW/ KDIAG,ICORFL,IXDR
+      logical :: mask_r
+!      real*8 text1 /'initial_i'/
+      real(kind=4) :: rr
+
+!local variables el
+      integer :: i,j,k,l,ichir1,ichir2,iblock,m,maxit
+
+#if .not. defined(WHAM_RUN) && .not. defined(CLUSTER)
+      mask_r=.false.
+#ifndef ISNAN
+! NaNQ initialization
+      i=-1
+      rr=dacos(100.0d0)
+#ifdef WINPGI
+      idumm=proc_proc(rr,i)
+#elif defined(WHAM_RUN)
+      call proc_proc(rr,i)
+#endif
+#endif
+
+      kdiag=0
+      icorfl=0
+      iw=2
+      
+      allocate(MovTypID(-1:MaxMoveType+1))
+      MovTypID=(/'pool          ','chain regrow  ',&
+       'multi-bond    ','phi           ','theta         ',&
+       'side chain    ','total         '/)
+#endif
+!
+! The following is just to define auxiliary variables used in angle conversion
+!
+      pi=4.0D0*datan(1.0D0)
+      dwapi=2.0D0*pi
+      dwapi3=dwapi/3.0D0
+      pipol=0.5D0*pi
+      deg2rad=pi/180.0D0
+      rad2deg=1.0D0/deg2rad
+      angmin=10.0D0*deg2rad
+!el#ifdef CLUSTER
+!el      Rgas = 1.987D-3
+!el#endif
+!
+! Define I/O units.
+!
+      inp=    1
+      iout=   2
+      ipdbin= 3
+      ipdb=   7
+#ifdef CLUSTER
+      imol2= 18
+      jplot= 19
+!el      jstatin=10
+      imol2=  4
+      jrms=30
+#else
+      icart = 30
+      imol2=  4
+      ithep_pdb=51
+      irotam_pdb=52
+      irest1=55
+      irest2=56
+      iifrag=57
+      ientin=18
+      ientout=19
+!rc for write_rmsbank1  
+      izs1=21
+!dr  include secondary structure prediction bias
+      isecpred=27
+#endif
+      igeom=  8
+      intin=  9
+      ithep= 11
+      irotam=12
+      itorp= 13
+      itordp= 23
+      ielep= 14
+      isidep=15
+#if defined(WHAM_RUN) || defined(CLUSTER)
+      isidep1=22 !wham
+#else
+!
+! CSA I/O units (separated from others especially for Jooyoung)
+!
+      icsa_rbank=30
+      icsa_seed=31
+      icsa_history=32
+      icsa_bank=33
+      icsa_bank1=34
+      icsa_alpha=35
+      icsa_alpha1=36
+      icsa_bankt=37
+      icsa_int=39
+      icsa_bank_reminimized=38
+      icsa_native_int=41
+      icsa_in=40
+!rc for ifc error 118
+      icsa_pdb=42
+#endif
+      iscpp=25
+      icbase=16
+      ifourier=20
+      istat= 17
+      ibond = 28
+      isccor = 29
+#ifdef WHAM_RUN
+!
+! WHAM files
+!
+      ihist=30
+      iweight=31
+      izsc=32
+#endif
+#if defined(WHAM_RUN) || defined(CLUSTER)
+!
+! setting the mpi variables for WHAM
+!
+      fgprocs=1
+      nfgtasks=1
+      nfgtasks1=1
+#endif
+!
+! Set default weights of the energy terms.
+!
+      wsc=1.0D0 ! in wham:  wlong=1.0D0
+      welec=1.0D0
+      wtor =1.0D0
+      wang =1.0D0
+      wscloc=1.0D0
+      wstrain=1.0D0
+!
+! Zero out tables.
+!
+!      print '(a,$)','Inside initialize'
+!      call memmon_print_usage()
+      
+!      do i=1,maxres2
+!      do j=1,3
+!        c(j,i)=0.0D0
+!        dc(j,i)=0.0D0
+!       enddo
+!      enddo
+!      do i=1,maxres
+!      do j=1,3
+!        xloc(j,i)=0.0D0
+!        enddo
+!      enddo
+!      do i=1,ntyp
+!      do j=1,ntyp
+!        aa(i,j)=0.0D0
+!        bb(i,j)=0.0D0
+!        augm(i,j)=0.0D0
+!        sigma(i,j)=0.0D0
+!        r0(i,j)=0.0D0
+!        chi(i,j)=0.0D0
+!        enddo
+!      do j=1,2
+!        bad(i,j)=0.0D0
+!        enddo
+!      chip(i)=0.0D0
+!      alp(i)=0.0D0
+!      sigma0(i)=0.0D0
+!      sigii(i)=0.0D0
+!      rr0(i)=0.0D0
+!      a0thet(i)=0.0D0
+!      do j=1,2
+!         do ichir1=-1,1
+!          do ichir2=-1,1
+!          athet(j,i,ichir1,ichir2)=0.0D0
+!          bthet(j,i,ichir1,ichir2)=0.0D0
+!          enddo
+!         enddo
+!        enddo
+!        do j=0,3
+!        polthet(j,i)=0.0D0
+!        enddo
+!      do j=1,3
+!        gthet(j,i)=0.0D0
+!        enddo
+!      theta0(i)=0.0D0
+!      sig0(i)=0.0D0
+!      sigc0(i)=0.0D0
+!      do j=1,maxlob
+!        bsc(j,i)=0.0D0
+!        do k=1,3
+!          censc(k,j,i)=0.0D0
+!          enddo
+!          do k=1,3
+!          do l=1,3
+!            gaussc(l,k,j,i)=0.0D0
+!            enddo
+!          enddo
+!        nlob(i)=0
+!        enddo
+!      enddo
+!      nlob(ntyp1)=0
+!      dsc(ntyp1)=0.0D0
+!      do i=-maxtor,maxtor
+!        itortyp(i)=0
+!c      write (iout,*) "TU DOCHODZE",i,itortyp(i)
+!       do iblock=1,2
+!        do j=-maxtor,maxtor
+!          do k=1,maxterm
+!            v1(k,j,i,iblock)=0.0D0
+!            v2(k,j,i,iblock)=0.0D0
+!          enddo
+!        enddo
+!        enddo
+!      enddo
+!      do iblock=1,2
+!       do i=-maxtor,maxtor
+!        do j=-maxtor,maxtor
+!         do k=-maxtor,maxtor
+!          do l=1,maxtermd_1
+!            v1c(1,l,i,j,k,iblock)=0.0D0
+!            v1s(1,l,i,j,k,iblock)=0.0D0
+!            v1c(2,l,i,j,k,iblock)=0.0D0
+!            v1s(2,l,i,j,k,iblock)=0.0D0
+!          enddo !l
+!          do l=1,maxtermd_2
+!           do m=1,maxtermd_2
+!            v2c(m,l,i,j,k,iblock)=0.0D0
+!            v2s(m,l,i,j,k,iblock)=0.0D0
+!           enddo !m
+!          enddo !l
+!        enddo !k
+!       enddo !j
+!      enddo !i
+!      enddo !iblock
+
+!      do i=1,maxres
+!      itype(i)=0
+!      itel(i)=0
+!      enddo
+! Initialize the bridge arrays
+      ns=0
+      nss=0 
+      nhpb=0
+!      do i=1,maxss
+!      iss(i)=0
+!      enddo
+!      do i=1,maxdim
+!      dhpb(i)=0.0D0
+!      enddo
+!      do i=1,maxres
+!      ihpb(i)=0
+!      jhpb(i)=0
+!      enddo
+!
+! Initialize timing.
+!
+      call set_timers
+!
+! Initialize variables used in minimization.
+!   
+!c     maxfun=5000
+!c     maxit=2000
+      maxfun=500
+      maxit=200
+      tolf=1.0D-2
+      rtolf=5.0D-4
+! 
+! Initialize the variables responsible for the mode of gradient storage.
+!
+      nfl=0
+      icg=1
+      
+#ifdef WHAM_RUN
+      allocate(iww(max_eneW))
+      do i=1,14
+        do j=1,14
+          if (print_order(i).eq.j) then
+            iww(print_order(i))=j
+            goto 1121
+          endif
+        enddo
+1121    continue
+      enddo
+#endif
+#if defined(WHAM_RUN) || defined(CLUSTER)
+      ndih_constr=0
+
+!      allocate(ww0(max_eneW))
+!      ww0 = reshape((/1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,&
+!          1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,0.4d0,1.0d0,&
+!          1.0d0,0.0d0,0.0/), shape(ww0))
+!
+      calc_grad=.false.
+! Set timers and counters for the respective routines
+      t_func = 0.0d0
+      t_grad = 0.0d0
+      t_fhel = 0.0d0
+      t_fbet = 0.0d0
+      t_ghel = 0.0d0
+      t_gbet = 0.0d0
+      t_viol = 0.0d0
+      t_gviol = 0.0d0
+      n_func = 0
+      n_grad = 0
+      n_fhel = 0
+      n_fbet = 0
+      n_ghel = 0
+      n_gbet = 0
+      n_viol = 0
+      n_gviol = 0
+      n_map = 0
+#endif
+!
+! Initialize constants used to split the energy into long- and short-range
+! components
+!
+      r_cut=2.0d0
+      rlamb=0.3d0
+#ifndef SPLITELE
+      nprint_ene=nprint_ene-1
+#endif
+      return
+      end subroutine initialize
+!-----------------------------------------------------------------------------
+      subroutine init_int_table
+
+      use geometry, only:int_bounds1
+!el      use MPI_data
+!el      implicit none
+!      implicit real*8 (a-h,o-z)
+!      include 'DIMENSIONS'
+#ifdef MPI
+      include 'mpif.h'
+      integer,dimension(15) :: blocklengths,displs
+#endif
+!      include 'COMMON.CONTROL'
+!      include 'COMMON.SETUP'
+!      include 'COMMON.CHAIN'
+!      include 'COMMON.INTERACT'
+!      include 'COMMON.LOCAL'
+!      include 'COMMON.SBRIDGE'
+!      include 'COMMON.TORCNSTR'
+!      include 'COMMON.IOUNITS'
+!      include 'COMMON.DERIV'
+!      include 'COMMON.CONTACTS'
+!el      integer,dimension(0:nfgtasks) :: iturn3_start_all,iturn3_end_all,&
+!el        iturn4_start_all,iturn4_end_all,iatel_s_all,iatel_e_all  !(0:max_fg_procs)
+!el      integer,dimension(nres,0:nfgtasks) :: ielstart_all,&
+!el        ielend_all !(maxres,0:max_fg_procs-1)
+!el      integer,dimension(0:nfgtasks-1) :: ntask_cont_from_all,&
+!el        ntask_cont_to_all !(0:max_fg_procs-1),
+!el      integer,dimension(0:nfgtasks-1,0:nfgtasks-1) :: itask_cont_from_all,&
+!el        itask_cont_to_all !(0:max_fg_procs-1,0:max_fg_procs-1)
+
+!el      common /przechowalnia/ iturn3_start_all,iturn3_end_all,&
+!el        iturn4_start_all,iturn4_end_all,iatel_s_all,iatel_e_all,&
+!el        ielstart_all,ielend_all,ntask_cont_from_all,itask_cont_from_all,&
+!el        ntask_cont_to_all,itask_cont_to_all
+
+      integer :: FG_GROUP,CONT_FROM_GROUP,CONT_TO_GROUP
+      logical :: scheck,lprint,flag
+
+!el local variables
+      integer :: ind_scint=0,ind_scint_old,ii,jj,i,j,iint
+
+#ifdef MPI
+      integer :: my_sc_int(0:nfgtasks-1),my_ele_int(0:nfgtasks-1)
+      integer :: my_sc_intt(0:nfgtasks),my_ele_intt(0:nfgtasks)
+      integer :: n_sc_int_tot,my_sc_inde,my_sc_inds,ind_sctint,npept
+      integer :: nele_int_tot,my_ele_inds,my_ele_inde,ind_eleint_old,&
+            ind_eleint,ijunk,nele_int_tot_vdw,my_ele_inds_vdw,&
+            my_ele_inde_vdw,ind_eleint_vdw,ind_eleint_vdw_old,&
+            nscp_int_tot,my_scp_inds,my_scp_inde,ind_scpint,&
+            ind_scpint_old,nsumgrad,nlen,ngrad_start,ngrad_end,&
+            ierror,k,ierr,iaux,ncheck_to,ncheck_from,ind_typ,&
+            ichunk,int_index_old
+
+!el      allocate(itask_cont_from(0:nfgtasks-1)) !(0:max_fg_procs-1)
+!el      allocate(itask_cont_to(0:nfgtasks-1)) !(0:max_fg_procs-1)
+
+!... Determine the numbers of start and end SC-SC interaction
+!... to deal with by current processor.
+!write (iout,*) '******INIT_INT_TABLE nres=',nres,' nnt=',nnt,' nct=',nct
+      do i=0,nfgtasks-1
+        itask_cont_from(i)=fg_rank
+        itask_cont_to(i)=fg_rank
+      enddo
+      lprint=energy_dec
+!      lprint=.true.
+      if (lprint) &
+       write (iout,*)'INIT_INT_TABLE nres=',nres,' nnt=',nnt,' nct=',nct
+      n_sc_int_tot=(nct-nnt+1)*(nct-nnt)/2-nss
+      call int_bounds(n_sc_int_tot,my_sc_inds,my_sc_inde)
+!write (iout,*) 'INIT_INT_TABLE nres=',nres,' nnt=',nnt,' nct=',nct
+      if (lprint) &
+        write (iout,*) 'Processor',fg_rank,' CG group',kolor,&
+        ' absolute rank',MyRank,&
+        ' n_sc_int_tot',n_sc_int_tot,' my_sc_inds=',my_sc_inds,&
+        ' my_sc_inde',my_sc_inde
+      ind_sctint=0
+      iatsc_s=0
+      iatsc_e=0
+#endif
+!el       common /przechowalnia/
+      allocate(iturn3_start_all(0:nfgtasks))
+      allocate(iturn3_end_all(0:nfgtasks))
+      allocate(iturn4_start_all(0:nfgtasks))
+      allocate(iturn4_end_all(0:nfgtasks))
+      allocate(iatel_s_all(0:nfgtasks))
+      allocate(iatel_e_all(0:nfgtasks))
+      allocate(ielstart_all(nres,0:nfgtasks-1))
+      allocate(ielend_all(nres,0:nfgtasks-1))
+
+      allocate(ntask_cont_from_all(0:nfgtasks-1))
+      allocate(ntask_cont_to_all(0:nfgtasks-1))
+      allocate(itask_cont_from_all(0:nfgtasks-1,0:nfgtasks-1))
+      allocate(itask_cont_to_all(0:nfgtasks-1,0:nfgtasks-1))
+!el----------
+!      lprint=.false.
+      do i=1,nres !el  !maxres
+        nint_gr(i)=0
+        nscp_gr(i)=0
+        ielstart(i)=0
+        ielend(i)=0
+        do j=1,maxint_gr
+          istart(i,j)=0
+          iend(i,j)=0
+          iscpstart(i,j)=0
+          iscpend(i,j)=0    
+        enddo
+      enddo
+      ind_scint=0
+      ind_scint_old=0
+!d    write (iout,*) 'ns=',ns,' nss=',nss,' ihpb,jhpb',
+!d   &   (ihpb(i),jhpb(i),i=1,nss)
+      do i=nnt,nct-1
+        scheck=.false.
+        if (dyn_ss) goto 10
+        do ii=1,nss
+          if (ihpb(ii).eq.i+nres) then
+            scheck=.true.
+            jj=jhpb(ii)-nres
+            goto 10
+          endif
+        enddo
+   10   continue
+!d      write (iout,*) 'i=',i,' scheck=',scheck,' jj=',jj
+        if (scheck) then
+          if (jj.eq.i+1) then
+#ifdef MPI
+!            write (iout,*) 'jj=i+1'
+            call int_partition(ind_scint,my_sc_inds,my_sc_inde,i,&
+       iatsc_s,iatsc_e,i+2,nct,nint_gr(i),istart(i,1),iend(i,1),*12)
+#else
+            nint_gr(i)=1
+            istart(i,1)=i+2
+            iend(i,1)=nct
+#endif
+          else if (jj.eq.nct) then
+#ifdef MPI
+!            write (iout,*) 'jj=nct'
+            call int_partition(ind_scint,my_sc_inds,my_sc_inde,i,&
+        iatsc_s,iatsc_e,i+1,nct-1,nint_gr(i),istart(i,1),iend(i,1),*12)
+#else
+            nint_gr(i)=1
+            istart(i,1)=i+1
+            iend(i,1)=nct-1
+#endif
+          else
+#ifdef MPI
+            call int_partition(ind_scint,my_sc_inds,my_sc_inde,i,&
+       iatsc_s,iatsc_e,i+1,jj-1,nint_gr(i),istart(i,1),iend(i,1),*12)
+            ii=nint_gr(i)+1
+            call int_partition(ind_scint,my_sc_inds,my_sc_inde,i,&
+       iatsc_s,iatsc_e,jj+1,nct,nint_gr(i),istart(i,ii),iend(i,ii),*12)
+#else
+            nint_gr(i)=2
+            istart(i,1)=i+1
+            iend(i,1)=jj-1
+            istart(i,2)=jj+1
+            iend(i,2)=nct
+#endif
+          endif
+        else
+#ifdef MPI
+          call int_partition(ind_scint,my_sc_inds,my_sc_inde,i,&
+          iatsc_s,iatsc_e,i+1,nct,nint_gr(i),istart(i,1),iend(i,1),*12)
+#else
+          nint_gr(i)=1
+          istart(i,1)=i+1
+          iend(i,1)=nct
+          ind_scint=ind_scint+nct-i
+#endif
+        endif
+#ifdef MPI
+        ind_scint_old=ind_scint
+#endif
+      enddo
+   12 continue
+#ifndef MPI
+      iatsc_s=nnt
+      iatsc_e=nct-1
+#endif
+      if (iatsc_s.eq.0) iatsc_s=1
+#ifdef MPI
+      if (lprint) write (*,*) 'Processor',fg_rank,' CG Group',kolor,&
+         ' absolute rank',myrank,' iatsc_s=',iatsc_s,' iatsc_e=',iatsc_e
+#endif
+      if (lprint) then
+      write (iout,'(a)') 'Interaction array:'
+      do i=iatsc_s,iatsc_e
+        write (iout,'(i3,2(2x,2i3))') &
+       i,(istart(i,iint),iend(i,iint),iint=1,nint_gr(i))
+      enddo
+      endif
+      ispp=4 !?? wham ispp=2
+#ifdef MPI
+! Now partition the electrostatic-interaction array
+      npept=nct-nnt
+      nele_int_tot=(npept-ispp)*(npept-ispp+1)/2
+      call int_bounds(nele_int_tot,my_ele_inds,my_ele_inde)
+      if (lprint) &
+       write (*,*) 'Processor',fg_rank,' CG group',kolor,&
+        ' absolute rank',MyRank,&
+        ' nele_int_tot',nele_int_tot,' my_ele_inds=',my_ele_inds,&
+                    ' my_ele_inde',my_ele_inde
+      iatel_s=0
+      iatel_e=0
+      ind_eleint=0
+      ind_eleint_old=0
+      do i=nnt,nct-3
+        ijunk=0
+        call int_partition(ind_eleint,my_ele_inds,my_ele_inde,i,&
+          iatel_s,iatel_e,i+ispp,nct-1,ijunk,ielstart(i),ielend(i),*13)
+      enddo ! i 
+   13 continue
+      if (iatel_s.eq.0) iatel_s=1
+      nele_int_tot_vdw=(npept-2)*(npept-2+1)/2
+!      write (iout,*) "nele_int_tot_vdw",nele_int_tot_vdw
+      call int_bounds(nele_int_tot_vdw,my_ele_inds_vdw,my_ele_inde_vdw)
+!      write (iout,*) "my_ele_inds_vdw",my_ele_inds_vdw,
+!     & " my_ele_inde_vdw",my_ele_inde_vdw
+      ind_eleint_vdw=0
+      ind_eleint_vdw_old=0
+      iatel_s_vdw=0
+      iatel_e_vdw=0
+      do i=nnt,nct-3
+        ijunk=0
+        call int_partition(ind_eleint_vdw,my_ele_inds_vdw,&
+          my_ele_inde_vdw,i,&
+          iatel_s_vdw,iatel_e_vdw,i+2,nct-1,ijunk,ielstart_vdw(i),&
+          ielend_vdw(i),*15)
+!        write (iout,*) i," ielstart_vdw",ielstart_vdw(i),
+!     &   " ielend_vdw",ielend_vdw(i)
+      enddo ! i 
+      if (iatel_s_vdw.eq.0) iatel_s_vdw=1
+   15 continue
+#else
+      iatel_s=nnt
+      iatel_e=nct-5 ! ?? wham iatel_e=nct-3
+      do i=iatel_s,iatel_e
+        ielstart(i)=i+4 ! ?? wham +2
+        ielend(i)=nct-1
+      enddo
+      iatel_s_vdw=nnt
+      iatel_e_vdw=nct-3
+      do i=iatel_s_vdw,iatel_e_vdw
+        ielstart_vdw(i)=i+2
+        ielend_vdw(i)=nct-1
+      enddo
+#endif
+      if (lprint) then
+        write (*,'(a)') 'Processor',fg_rank,' CG group',kolor,&
+        ' absolute rank',MyRank
+        write (iout,*) 'Electrostatic interaction array:'
+        do i=iatel_s,iatel_e
+          write (iout,'(i3,2(2x,2i3))') i,ielstart(i),ielend(i)
+        enddo
+      endif ! lprint
+!     iscp=3
+      iscp=2
+! Partition the SC-p interaction array
+#ifdef MPI
+      nscp_int_tot=(npept-iscp+1)*(npept-iscp+1)
+      call int_bounds(nscp_int_tot,my_scp_inds,my_scp_inde)
+      if (lprint) write (iout,*) 'Processor',fg_rank,' CG group',kolor,&
+        ' absolute rank',myrank,&
+        ' nscp_int_tot',nscp_int_tot,' my_scp_inds=',my_scp_inds,&
+                    ' my_scp_inde',my_scp_inde
+      iatscp_s=0
+      iatscp_e=0
+      ind_scpint=0
+      ind_scpint_old=0
+      do i=nnt,nct-1
+        if (i.lt.nnt+iscp) then
+!d        write (iout,*) 'i.le.nnt+iscp'
+          call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i,&
+            iatscp_s,iatscp_e,i+iscp,nct,nscp_gr(i),iscpstart(i,1),&
+            iscpend(i,1),*14)
+        else if (i.gt.nct-iscp) then
+!d        write (iout,*) 'i.gt.nct-iscp'
+          call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i,&
+            iatscp_s,iatscp_e,nnt,i-iscp,nscp_gr(i),iscpstart(i,1),&
+            iscpend(i,1),*14)
+        else
+          call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i,&
+            iatscp_s,iatscp_e,nnt,i-iscp,nscp_gr(i),iscpstart(i,1),&
+           iscpend(i,1),*14)
+          ii=nscp_gr(i)+1
+          call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i,&
+            iatscp_s,iatscp_e,i+iscp,nct,nscp_gr(i),iscpstart(i,ii),&
+            iscpend(i,ii),*14)
+        endif
+      enddo ! i
+   14 continue
+#else
+      iatscp_s=nnt
+      iatscp_e=nct-1
+      do i=nnt,nct-1
+        if (i.lt.nnt+iscp) then
+          nscp_gr(i)=1
+          iscpstart(i,1)=i+iscp
+          iscpend(i,1)=nct
+        elseif (i.gt.nct-iscp) then
+          nscp_gr(i)=1
+          iscpstart(i,1)=nnt
+          iscpend(i,1)=i-iscp
+        else
+          nscp_gr(i)=2
+          iscpstart(i,1)=nnt
+          iscpend(i,1)=i-iscp
+          iscpstart(i,2)=i+iscp
+          iscpend(i,2)=nct
+        endif 
+      enddo ! i
+#endif
+      if (iatscp_s.eq.0) iatscp_s=1
+      if (lprint) then
+        write (iout,'(a)') 'SC-p interaction array:'
+        do i=iatscp_s,iatscp_e
+          write (iout,'(i3,2(2x,2i3))') &
+              i,(iscpstart(i,j),iscpend(i,j),j=1,nscp_gr(i))
+        enddo
+      endif ! lprint
+! Partition local interactions
+#ifdef MPI
+      call int_bounds(nres-2,loc_start,loc_end)
+      loc_start=loc_start+1
+      loc_end=loc_end+1
+      call int_bounds(nres-2,ithet_start,ithet_end)
+      ithet_start=ithet_start+2
+      ithet_end=ithet_end+2
+      call int_bounds(nct-nnt-2,iturn3_start,iturn3_end) 
+      iturn3_start=iturn3_start+nnt
+      iphi_start=iturn3_start+2
+      iturn3_end=iturn3_end+nnt
+      iphi_end=iturn3_end+2
+      iturn3_start=iturn3_start-1
+      iturn3_end=iturn3_end-1
+      call int_bounds(nres-3,itau_start,itau_end)
+      itau_start=itau_start+3
+      itau_end=itau_end+3
+      call int_bounds(nres-3,iphi1_start,iphi1_end)
+      iphi1_start=iphi1_start+3
+      iphi1_end=iphi1_end+3
+      call int_bounds(nct-nnt-3,iturn4_start,iturn4_end) 
+      iturn4_start=iturn4_start+nnt
+      iphid_start=iturn4_start+2
+      iturn4_end=iturn4_end+nnt
+      iphid_end=iturn4_end+2
+      iturn4_start=iturn4_start-1
+      iturn4_end=iturn4_end-1
+      call int_bounds(nres-2,ibond_start,ibond_end) 
+      ibond_start=ibond_start+1
+      ibond_end=ibond_end+1
+      call int_bounds(nct-nnt,ibondp_start,ibondp_end) 
+      ibondp_start=ibondp_start+nnt
+      ibondp_end=ibondp_end+nnt
+      call int_bounds1(nres-1,ivec_start,ivec_end) 
+!      print *,"Processor",myrank,fg_rank,fg_rank1,
+!     &  " ivec_start",ivec_start," ivec_end",ivec_end
+      iset_start=loc_start+2
+      iset_end=loc_end+2
+      if (ndih_constr.eq.0) then
+        idihconstr_start=1
+        idihconstr_end=0
+      else
+        call int_bounds(ndih_constr,idihconstr_start,idihconstr_end)
+      endif
+!      nsumgrad=(nres-nnt)*(nres-nnt+1)/2
+!      nlen=nres-nnt+1
+      nsumgrad=(nres-nnt)*(nres-nnt+1)/2
+      nlen=nres-nnt+1
+      call int_bounds(nsumgrad,ngrad_start,ngrad_end)
+      igrad_start=((2*nlen+1) &
+         -sqrt(float((2*nlen-1)**2-8*(ngrad_start-1))))/2
+      igrad_end=((2*nlen+1) &
+         -sqrt(float((2*nlen-1)**2-8*(ngrad_end-1))))/2
+!el      allocate(jgrad_start(igrad_start:igrad_end))
+!el      allocate(jgrad_end(igrad_start:igrad_end)) !(maxres)
+      jgrad_start(igrad_start)= &
+         ngrad_start-(2*nlen-igrad_start)*(igrad_start-1)/2 &
+         +igrad_start
+      jgrad_end(igrad_start)=nres
+      if (igrad_end.gt.igrad_start) jgrad_start(igrad_end)=igrad_end+1
+      jgrad_end(igrad_end)=ngrad_end-(2*nlen-igrad_end)*(igrad_end-1)/2 &
+          +igrad_end
+      do i=igrad_start+1,igrad_end-1
+        jgrad_start(i)=i+1
+        jgrad_end(i)=nres
+      enddo
+      if (lprint) then 
+        write (*,*) 'Processor:',fg_rank,' CG group',kolor,&
+       ' absolute rank',myrank,&
+       ' loc_start',loc_start,' loc_end',loc_end,&
+       ' ithet_start',ithet_start,' ithet_end',ithet_end,&
+       ' iphi_start',iphi_start,' iphi_end',iphi_end,&
+       ' iphid_start',iphid_start,' iphid_end',iphid_end,&
+       ' ibond_start',ibond_start,' ibond_end',ibond_end,&
+       ' ibondp_start',ibondp_start,' ibondp_end',ibondp_end,&
+       ' iturn3_start',iturn3_start,' iturn3_end',iturn3_end,&
+       ' iturn4_start',iturn4_start,' iturn4_end',iturn4_end,&
+       ' ivec_start',ivec_start,' ivec_end',ivec_end,&
+       ' iset_start',iset_start,' iset_end',iset_end,&
+       ' idihconstr_start',idihconstr_start,' idihconstr_end',&
+         idihconstr_end
+       write (*,*) 'Processor:',fg_rank,myrank,' igrad_start',&
+         igrad_start,' igrad_end',igrad_end,' ngrad_start',ngrad_start,&
+         ' ngrad_end',ngrad_end
+       do i=igrad_start,igrad_end
+         write(*,*) 'Processor:',fg_rank,myrank,i,&
+          jgrad_start(i),jgrad_end(i)
+       enddo
+      endif
+      if (nfgtasks.gt.1) then
+        call MPI_Allgather(ivec_start,1,MPI_INTEGER,ivec_displ(0),1,&
+          MPI_INTEGER,FG_COMM1,IERROR)
+        iaux=ivec_end-ivec_start+1
+        call MPI_Allgather(iaux,1,MPI_INTEGER,ivec_count(0),1,&
+          MPI_INTEGER,FG_COMM1,IERROR)
+        call MPI_Allgather(iset_start-2,1,MPI_INTEGER,iset_displ(0),1,&
+          MPI_INTEGER,FG_COMM,IERROR)
+        iaux=iset_end-iset_start+1
+        call MPI_Allgather(iaux,1,MPI_INTEGER,iset_count(0),1,&
+          MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(ibond_start,1,MPI_INTEGER,ibond_displ(0),1,&
+          MPI_INTEGER,FG_COMM,IERROR)
+        iaux=ibond_end-ibond_start+1
+        call MPI_Allgather(iaux,1,MPI_INTEGER,ibond_count(0),1,&
+          MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(ithet_start,1,MPI_INTEGER,ithet_displ(0),1,&
+          MPI_INTEGER,FG_COMM,IERROR)
+        iaux=ithet_end-ithet_start+1
+        call MPI_Allgather(iaux,1,MPI_INTEGER,ithet_count(0),1,&
+          MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(iphi_start,1,MPI_INTEGER,iphi_displ(0),1,&
+          MPI_INTEGER,FG_COMM,IERROR)
+        iaux=iphi_end-iphi_start+1
+        call MPI_Allgather(iaux,1,MPI_INTEGER,iphi_count(0),1,&
+          MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(iphi1_start,1,MPI_INTEGER,iphi1_displ(0),1,&
+          MPI_INTEGER,FG_COMM,IERROR)
+        iaux=iphi1_end-iphi1_start+1
+        call MPI_Allgather(iaux,1,MPI_INTEGER,iphi1_count(0),1,&
+          MPI_INTEGER,FG_COMM,IERROR)
+        do i=0,nfgtasks-1
+          do j=1,nres
+            ielstart_all(j,i)=0
+            ielend_all(j,i)=0
+          enddo
+        enddo
+        call MPI_Allgather(iturn3_start,1,MPI_INTEGER,&
+          iturn3_start_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(iturn4_start,1,MPI_INTEGER,&
+          iturn4_start_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(iturn3_end,1,MPI_INTEGER,&
+          iturn3_end_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(iturn4_end,1,MPI_INTEGER,&
+          iturn4_end_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(iatel_s,1,MPI_INTEGER,&
+          iatel_s_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(iatel_e,1,MPI_INTEGER,&
+          iatel_e_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(ielstart(1),nres,MPI_INTEGER,&
+          ielstart_all(1,0),nres,MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgather(ielend(1),nres,MPI_INTEGER,&
+          ielend_all(1,0),nres,MPI_INTEGER,FG_COMM,IERROR)
+        if (lprint) then
+        write (iout,*) "iatel_s_all",(iatel_s_all(i),i=0,nfgtasks)
+        write (iout,*) "iatel_e_all",(iatel_e_all(i),i=0,nfgtasks)
+        write (iout,*) "iturn3_start_all",&
+          (iturn3_start_all(i),i=0,nfgtasks-1)
+        write (iout,*) "iturn3_end_all",&
+          (iturn3_end_all(i),i=0,nfgtasks-1)
+        write (iout,*) "iturn4_start_all",&
+          (iturn4_start_all(i),i=0,nfgtasks-1)
+        write (iout,*) "iturn4_end_all",&
+          (iturn4_end_all(i),i=0,nfgtasks-1)
+        write (iout,*) "The ielstart_all array"
+        do i=nnt,nct
+          write (iout,'(20i4)') i,(ielstart_all(i,j),j=0,nfgtasks-1)
+        enddo
+        write (iout,*) "The ielend_all array"
+        do i=nnt,nct
+          write (iout,'(20i4)') i,(ielend_all(i,j),j=0,nfgtasks-1)
+        enddo
+        call flush(iout)
+        endif
+        ntask_cont_from=0
+        ntask_cont_to=0
+        itask_cont_from(0)=fg_rank
+        itask_cont_to(0)=fg_rank
+        flag=.false.
+!el        allocate(iturn3_sent(4,iturn3_start:iturn3_end))
+!el        allocate(iturn4_sent(4,iturn4_start:iturn4_end)) !(4,maxres)
+        do ii=iturn3_start,iturn3_end
+          call add_int(ii,ii+2,iturn3_sent(1,ii),&
+                      ntask_cont_to,itask_cont_to,flag)
+        enddo
+        do ii=iturn4_start,iturn4_end
+          call add_int(ii,ii+3,iturn4_sent(1,ii),&
+                      ntask_cont_to,itask_cont_to,flag)
+        enddo
+        do ii=iturn3_start,iturn3_end
+          call add_int_from(ii,ii+2,ntask_cont_from,itask_cont_from)
+        enddo
+        do ii=iturn4_start,iturn4_end
+          call add_int_from(ii,ii+3,ntask_cont_from,itask_cont_from)
+        enddo
+        if (lprint) then
+        write (iout,*) "After turn3 ntask_cont_from",ntask_cont_from,&
+         " ntask_cont_to",ntask_cont_to
+        write (iout,*) "itask_cont_from",&
+          (itask_cont_from(i),i=1,ntask_cont_from)
+        write (iout,*) "itask_cont_to",&
+          (itask_cont_to(i),i=1,ntask_cont_to)
+        call flush(iout)
+        endif
+!        write (iout,*) "Loop forward"
+!        call flush(iout)
+        do i=iatel_s,iatel_e
+!          write (iout,*) "from loop i=",i
+!          call flush(iout)
+          do j=ielstart(i),ielend(i)
+            call add_int_from(i,j,ntask_cont_from,itask_cont_from)
+          enddo
+        enddo
+!        write (iout,*) "Loop backward iatel_e-1",iatel_e-1,
+!     &     " iatel_e",iatel_e
+!        call flush(iout)
+        nat_sent=0
+        do i=iatel_s,iatel_e
+!          write (iout,*) "i",i," ielstart",ielstart(i),
+!     &      " ielend",ielend(i)
+!          call flush(iout)
+          flag=.false.
+          do j=ielstart(i),ielend(i)
+            call add_int(i,j,iint_sent(1,j,nat_sent+1),ntask_cont_to,&
+                        itask_cont_to,flag)
+          enddo
+          if (flag) then
+            nat_sent=nat_sent+1
+            iat_sent(nat_sent)=i
+          endif
+        enddo
+        if (lprint) then
+        write (iout,*)"After longrange ntask_cont_from",ntask_cont_from,&
+         " ntask_cont_to",ntask_cont_to
+        write (iout,*) "itask_cont_from",&
+          (itask_cont_from(i),i=1,ntask_cont_from)
+        write (iout,*) "itask_cont_to",&
+          (itask_cont_to(i),i=1,ntask_cont_to)
+        call flush(iout)
+        write (iout,*) "iint_sent"
+        do i=1,nat_sent
+          ii=iat_sent(i)
+          write (iout,'(20i4)') ii,(j,(iint_sent(k,j,i),k=1,4),&
+            j=ielstart(ii),ielend(ii))
+        enddo
+        write (iout,*) "iturn3_sent iturn3_start",iturn3_start,&
+          " iturn3_end",iturn3_end
+        write (iout,'(20i4)') (i,(iturn3_sent(j,i),j=1,4),&
+           i=iturn3_start,iturn3_end)
+        write (iout,*) "iturn4_sent iturn4_start",iturn4_start,&
+          " iturn4_end",iturn4_end
+        write (iout,'(20i4)') (i,(iturn4_sent(j,i),j=1,4),&
+           i=iturn4_start,iturn4_end)
+        call flush(iout)
+        endif
+        call MPI_Gather(ntask_cont_from,1,MPI_INTEGER,&
+         ntask_cont_from_all,1,MPI_INTEGER,king,FG_COMM,IERR)
+!        write (iout,*) "Gather ntask_cont_from ended"
+!        call flush(iout)
+        call MPI_Gather(itask_cont_from(0),nfgtasks,MPI_INTEGER,&
+         itask_cont_from_all(0,0),nfgtasks,MPI_INTEGER,king,&
+         FG_COMM,IERR)
+!        write (iout,*) "Gather itask_cont_from ended"
+!        call flush(iout)
+        call MPI_Gather(ntask_cont_to,1,MPI_INTEGER,ntask_cont_to_all,&
+         1,MPI_INTEGER,king,FG_COMM,IERR)
+!        write (iout,*) "Gather ntask_cont_to ended"
+!        call flush(iout)
+        call MPI_Gather(itask_cont_to,nfgtasks,MPI_INTEGER,&
+         itask_cont_to_all,nfgtasks,MPI_INTEGER,king,FG_COMM,IERR)
+!        write (iout,*) "Gather itask_cont_to ended"
+!        call flush(iout)
+        if (fg_rank.eq.king) then
+          write (iout,*)"Contact receive task map (proc, #tasks, tasks)"
+          do i=0,nfgtasks-1
+            write (iout,'(20i4)') i,ntask_cont_from_all(i),&
+              (itask_cont_from_all(j,i),j=1,ntask_cont_from_all(i)) 
+          enddo
+          write (iout,*)
+          call flush(iout)
+          write (iout,*) "Contact send task map (proc, #tasks, tasks)"
+          do i=0,nfgtasks-1
+            write (iout,'(20i4)') i,ntask_cont_to_all(i),&
+             (itask_cont_to_all(j,i),j=1,ntask_cont_to_all(i)) 
+          enddo
+          write (iout,*)
+          call flush(iout)
+! Check if every send will have a matching receive
+          ncheck_to=0
+          ncheck_from=0
+          do i=0,nfgtasks-1
+            ncheck_to=ncheck_to+ntask_cont_to_all(i)
+            ncheck_from=ncheck_from+ntask_cont_from_all(i)
+          enddo
+          write (iout,*) "Control sums",ncheck_from,ncheck_to
+          if (ncheck_from.ne.ncheck_to) then
+            write (iout,*) "Error: #receive differs from #send."
+            write (iout,*) "Terminating program...!"
+            call flush(iout)
+            flag=.false.
+          else
+            flag=.true.
+            do i=0,nfgtasks-1
+              do j=1,ntask_cont_to_all(i)
+                ii=itask_cont_to_all(j,i)
+                do k=1,ntask_cont_from_all(ii)
+                  if (itask_cont_from_all(k,ii).eq.i) then
+                    if(lprint)write(iout,*)"Matching send/receive",i,ii
+                    exit
+                  endif
+                enddo
+                if (k.eq.ntask_cont_from_all(ii)+1) then
+                  flag=.false.
+                  write (iout,*) "Error: send by",j," to",ii,&
+                    " would have no matching receive"
+                endif
+              enddo
+            enddo
+          endif
+          if (.not.flag) then
+            write (iout,*) "Unmatched sends; terminating program"
+            call flush(iout)
+          endif
+        endif
+        call MPI_Bcast(flag,1,MPI_LOGICAL,king,FG_COMM,IERROR)
+!        write (iout,*) "flag broadcast ended flag=",flag
+!        call flush(iout)
+        if (.not.flag) then
+          call MPI_Finalize(IERROR)
+          stop "Error in INIT_INT_TABLE: unmatched send/receive."
+        endif
+        call MPI_Comm_group(FG_COMM,fg_group,IERR)
+!        write (iout,*) "MPI_Comm_group ended"
+!        call flush(iout)
+        call MPI_Group_incl(fg_group,ntask_cont_from+1,&
+          itask_cont_from(0),CONT_FROM_GROUP,IERR)
+        call MPI_Group_incl(fg_group,ntask_cont_to+1,itask_cont_to(0),&
+          CONT_TO_GROUP,IERR)
+        do i=1,nat_sent
+          ii=iat_sent(i)
+          iaux=4*(ielend(ii)-ielstart(ii)+1)
+          call MPI_Group_translate_ranks(fg_group,iaux,&
+            iint_sent(1,ielstart(ii),i),CONT_TO_GROUP,&
+            iint_sent_local(1,ielstart(ii),i),IERR )
+!          write (iout,*) "Ranks translated i=",i
+!          call flush(iout)
+        enddo
+        iaux=4*(iturn3_end-iturn3_start+1)
+        call MPI_Group_translate_ranks(fg_group,iaux,&
+           iturn3_sent(1,iturn3_start),CONT_TO_GROUP,&
+           iturn3_sent_local(1,iturn3_start),IERR)
+        iaux=4*(iturn4_end-iturn4_start+1)
+        call MPI_Group_translate_ranks(fg_group,iaux,&
+           iturn4_sent(1,iturn4_start),CONT_TO_GROUP,&
+           iturn4_sent_local(1,iturn4_start),IERR)
+        if (lprint) then
+        write (iout,*) "iint_sent_local"
+        do i=1,nat_sent
+          ii=iat_sent(i)
+          write (iout,'(20i4)') ii,(j,(iint_sent_local(k,j,i),k=1,4),&
+            j=ielstart(ii),ielend(ii))
+          call flush(iout)
+        enddo
+        write (iout,*) "iturn3_sent_local iturn3_start",iturn3_start,&
+          " iturn3_end",iturn3_end
+        write (iout,'(20i4)') (i,(iturn3_sent_local(j,i),j=1,4),&
+           i=iturn3_start,iturn3_end)
+        write (iout,*) "iturn4_sent_local iturn4_start",iturn4_start,&
+          " iturn4_end",iturn4_end
+        write (iout,'(20i4)') (i,(iturn4_sent_local(j,i),j=1,4),&
+           i=iturn4_start,iturn4_end)
+        call flush(iout)
+        endif
+        call MPI_Group_free(fg_group,ierr)
+        call MPI_Group_free(cont_from_group,ierr)
+        call MPI_Group_free(cont_to_group,ierr)
+        call MPI_Type_contiguous(3,MPI_DOUBLE_PRECISION,MPI_UYZ,IERROR)
+        call MPI_Type_commit(MPI_UYZ,IERROR)
+        call MPI_Type_contiguous(18,MPI_DOUBLE_PRECISION,MPI_UYZGRAD,&
+          IERROR)
+        call MPI_Type_commit(MPI_UYZGRAD,IERROR)
+        call MPI_Type_contiguous(2,MPI_DOUBLE_PRECISION,MPI_MU,IERROR)
+        call MPI_Type_commit(MPI_MU,IERROR)
+        call MPI_Type_contiguous(4,MPI_DOUBLE_PRECISION,MPI_MAT1,IERROR)
+        call MPI_Type_commit(MPI_MAT1,IERROR)
+        call MPI_Type_contiguous(8,MPI_DOUBLE_PRECISION,MPI_MAT2,IERROR)
+        call MPI_Type_commit(MPI_MAT2,IERROR)
+        call MPI_Type_contiguous(6,MPI_DOUBLE_PRECISION,MPI_THET,IERROR)
+        call MPI_Type_commit(MPI_THET,IERROR)
+        call MPI_Type_contiguous(9,MPI_DOUBLE_PRECISION,MPI_GAM,IERROR)
+        call MPI_Type_commit(MPI_GAM,IERROR)
+
+!el        allocate(lentyp(0:nfgtasks-1))
+#ifndef MATGATHER
+! 9/22/08 Derived types to send matrices which appear in correlation terms
+        do i=0,nfgtasks-1
+          if (ivec_count(i).eq.ivec_count(0)) then
+            lentyp(i)=0
+          else
+            lentyp(i)=1
+          endif
+        enddo
+        do ind_typ=lentyp(0),lentyp(nfgtasks-1)
+        if (ind_typ.eq.0) then
+          ichunk=ivec_count(0)
+        else
+          ichunk=ivec_count(1)
+        endif
+!        do i=1,4
+!          blocklengths(i)=4
+!        enddo
+!        displs(1)=0
+!        do i=2,4
+!          displs(i)=displs(i-1)+blocklengths(i-1)*maxres
+!        enddo
+!        do i=1,4
+!          blocklengths(i)=blocklengths(i)*ichunk
+!        enddo
+!        write (iout,*) "blocklengths and displs"
+!        do i=1,4
+!          write (iout,*) i,blocklengths(i),displs(i)
+!        enddo
+!        call flush(iout)
+!        call MPI_Type_indexed(4,blocklengths(1),displs(1),
+!     &    MPI_DOUBLE_PRECISION,MPI_ROTAT1(ind_typ),IERROR)
+!        call MPI_Type_commit(MPI_ROTAT1(ind_typ),IERROR)
+!        write (iout,*) "MPI_ROTAT1",MPI_ROTAT1 
+!        do i=1,4
+!          blocklengths(i)=2
+!        enddo
+!        displs(1)=0
+!        do i=2,4
+!          displs(i)=displs(i-1)+blocklengths(i-1)*maxres
+!        enddo
+!        do i=1,4
+!          blocklengths(i)=blocklengths(i)*ichunk
+!        enddo
+!        write (iout,*) "blocklengths and displs"
+!        do i=1,4
+!          write (iout,*) i,blocklengths(i),displs(i)
+!        enddo
+!        call flush(iout)
+!        call MPI_Type_indexed(4,blocklengths(1),displs(1),
+!     &    MPI_DOUBLE_PRECISION,MPI_ROTAT2(ind_typ),IERROR)
+!        call MPI_Type_commit(MPI_ROTAT2(ind_typ),IERROR)
+!        write (iout,*) "MPI_ROTAT2",MPI_ROTAT2 
+        do i=1,8
+          blocklengths(i)=2
+        enddo
+        displs(1)=0
+        do i=2,8
+          displs(i)=displs(i-1)+blocklengths(i-1)*nres !maxres
+        enddo
+        do i=1,15
+          blocklengths(i)=blocklengths(i)*ichunk
+        enddo
+        call MPI_Type_indexed(8,blocklengths,displs,&
+          MPI_DOUBLE_PRECISION,MPI_PRECOMP11(ind_typ),IERROR)
+        call MPI_Type_commit(MPI_PRECOMP11(ind_typ),IERROR)
+        do i=1,8
+          blocklengths(i)=4
+        enddo
+        displs(1)=0
+        do i=2,8
+          displs(i)=displs(i-1)+blocklengths(i-1)*nres !maxres
+        enddo
+        do i=1,15
+          blocklengths(i)=blocklengths(i)*ichunk
+        enddo
+        call MPI_Type_indexed(8,blocklengths,displs,&
+          MPI_DOUBLE_PRECISION,MPI_PRECOMP12(ind_typ),IERROR)
+        call MPI_Type_commit(MPI_PRECOMP12(ind_typ),IERROR)
+        do i=1,6
+          blocklengths(i)=4
+        enddo
+        displs(1)=0
+        do i=2,6
+          displs(i)=displs(i-1)+blocklengths(i-1)*nres !maxres
+        enddo
+        do i=1,6
+          blocklengths(i)=blocklengths(i)*ichunk
+        enddo
+        call MPI_Type_indexed(6,blocklengths,displs,&
+          MPI_DOUBLE_PRECISION,MPI_PRECOMP22(ind_typ),IERROR)
+        call MPI_Type_commit(MPI_PRECOMP22(ind_typ),IERROR)
+        do i=1,2
+          blocklengths(i)=8
+        enddo
+        displs(1)=0
+        do i=2,2
+          displs(i)=displs(i-1)+blocklengths(i-1)*nres !maxres
+        enddo
+        do i=1,2
+          blocklengths(i)=blocklengths(i)*ichunk
+        enddo
+        call MPI_Type_indexed(2,blocklengths,displs,&
+          MPI_DOUBLE_PRECISION,MPI_PRECOMP23(ind_typ),IERROR)
+        call MPI_Type_commit(MPI_PRECOMP23(ind_typ),IERROR)
+        do i=1,4
+          blocklengths(i)=1
+        enddo
+        displs(1)=0
+        do i=2,4
+          displs(i)=displs(i-1)+blocklengths(i-1)*nres !maxres
+        enddo
+        do i=1,4
+          blocklengths(i)=blocklengths(i)*ichunk
+        enddo
+        call MPI_Type_indexed(4,blocklengths,displs,&
+          MPI_DOUBLE_PRECISION,MPI_ROTAT_OLD(ind_typ),IERROR)
+        call MPI_Type_commit(MPI_ROTAT_OLD(ind_typ),IERROR)
+        enddo
+#endif
+      endif
+      iint_start=ivec_start+1
+      iint_end=ivec_end+1
+      do i=0,nfgtasks-1
+          iint_count(i)=ivec_count(i)
+          iint_displ(i)=ivec_displ(i)
+          ivec_displ(i)=ivec_displ(i)-1
+          iset_displ(i)=iset_displ(i)-1
+          ithet_displ(i)=ithet_displ(i)-1
+          iphi_displ(i)=iphi_displ(i)-1
+          iphi1_displ(i)=iphi1_displ(i)-1
+          ibond_displ(i)=ibond_displ(i)-1
+      enddo
+      if (nfgtasks.gt.1 .and. fg_rank.eq.king &
+          .and. (me.eq.0 .or. .not. out1file)) then
+        write (iout,*) "IVEC_DISPL, IVEC_COUNT, ISET_START, ISET_COUNT"
+        do i=0,nfgtasks-1
+          write (iout,*) i,ivec_displ(i),ivec_count(i),iset_displ(i),&
+            iset_count(i)
+        enddo
+        write (iout,*) "iphi_start",iphi_start," iphi_end",iphi_end,&
+          " iphi1_start",iphi1_start," iphi1_end",iphi1_end
+        write (iout,*)"IPHI_COUNT, IPHI_DISPL, IPHI1_COUNT, IPHI1_DISPL"
+        do i=0,nfgtasks-1
+          write (iout,*) i,iphi_count(i),iphi_displ(i),iphi1_count(i),&
+            iphi1_displ(i)
+        enddo
+        write(iout,'(i10,a,i10,a,i10,a/a,i3,a)') n_sc_int_tot,' SC-SC ',&
+          nele_int_tot,' electrostatic and ',nscp_int_tot,&
+          ' SC-p interactions','were distributed among',nfgtasks,&
+          ' fine-grain processors.'
+      endif
+#else
+      loc_start=2
+      loc_end=nres-1
+      ithet_start=3 
+      ithet_end=nres
+      iturn3_start=nnt
+      iturn3_end=nct-3
+      iturn4_start=nnt
+      iturn4_end=nct-4
+      iphi_start=nnt+3
+      iphi_end=nct
+      iphi1_start=4
+      iphi1_end=nres
+      idihconstr_start=1
+      idihconstr_end=ndih_constr
+      iphid_start=iphi_start
+      iphid_end=iphi_end-1
+      itau_start=4
+      itau_end=nres
+      ibond_start=2
+      ibond_end=nres-1
+      ibondp_start=nnt
+      ibondp_end=nct-1
+      ivec_start=1
+      ivec_end=nres-1
+      iset_start=3
+      iset_end=nres+1
+      iint_start=2
+      iint_end=nres-1
+#endif
+!el       common /przechowalnia/
+!      deallocate(iturn3_start_all)
+!      deallocate(iturn3_end_all)
+!      deallocate(iturn4_start_all)
+!      deallocate(iturn4_end_all)
+!      deallocate(iatel_s_all)
+!      deallocate(iatel_e_all)
+!      deallocate(ielstart_all)
+!      deallocate(ielend_all)
+
+!      deallocate(ntask_cont_from_all)
+!      deallocate(ntask_cont_to_all)
+!      deallocate(itask_cont_from_all)
+!      deallocate(itask_cont_to_all)
+!el----------
+      return
+      end subroutine init_int_table
+#ifdef MPI
+!-----------------------------------------------------------------------------
+      subroutine add_int(ii,jj,itask,ntask_cont_to,itask_cont_to,flag)
+
+!el      implicit none
+!      include "DIMENSIONS"
+!      include "COMMON.INTERACT"
+!      include "COMMON.SETUP"
+!      include "COMMON.IOUNITS"
+      integer :: ii,jj,ntask_cont_to
+      integer,dimension(4) :: itask
+      integer :: itask_cont_to(0:nfgtasks-1)   !(0:max_fg_procs-1)
+      logical :: flag
+!el      integer,dimension(0:nfgtasks) :: iturn3_start_all,iturn3_end_all,iturn4_start_all,&
+!el       iturn4_end_all,iatel_s_all,iatel_e_all       !(0:max_fg_procs)
+!el      integer,dimension(nres,0:nfgtasks-1) :: ielstart_all,ielend_all       !(maxres,0:max_fg_procs-1)
+!el      common /przechowalnia/ iturn3_start_all,iturn3_end_all,iturn4_start_all,&
+!el       iturn4_end_all,iatel_s_all,iatel_e_all,ielstart_all,ielend_all
+      integer :: iproc,isent,k,l
+! Determines whether to send interaction ii,jj to other processors; a given
+! interaction can be sent to at most 2 processors.
+! Sets flag=.true. if interaction ii,jj needs to be sent to at least 
+! one processor, otherwise flag is unchanged from the input value.
+      isent=0
+      itask(1)=fg_rank
+      itask(2)=fg_rank
+      itask(3)=fg_rank
+      itask(4)=fg_rank
+!      write (iout,*) "ii",ii," jj",jj
+! Loop over processors to check if anybody could need interaction ii,jj
+      do iproc=0,fg_rank-1
+! Check if the interaction matches any turn3 at iproc
+        do k=iturn3_start_all(iproc),iturn3_end_all(iproc)
+          l=k+2
+          if (k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1 &
+         .or. k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1 .and. l.eq.jj-1) &
+          then 
+!            write (iout,*) "turn3 to iproc",iproc," ij",ii,jj,"kl",k,l
+!            call flush(iout)
+            flag=.true.
+            if (iproc.ne.itask(1).and.iproc.ne.itask(2) &
+              .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then
+              isent=isent+1
+              itask(isent)=iproc
+              call add_task(iproc,ntask_cont_to,itask_cont_to)
+            endif
+          endif
+        enddo
+! Check if the interaction matches any turn4 at iproc
+        do k=iturn4_start_all(iproc),iturn4_end_all(iproc)
+          l=k+3
+          if (k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1 &
+         .or. k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1 .and. l.eq.jj-1) &
+          then 
+!            write (iout,*) "turn3 to iproc",iproc," ij",ii,jj," kl",k,l
+!            call flush(iout)
+            flag=.true.
+            if (iproc.ne.itask(1).and.iproc.ne.itask(2) &
+              .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then
+              isent=isent+1
+              itask(isent)=iproc
+              call add_task(iproc,ntask_cont_to,itask_cont_to)
+            endif
+          endif
+        enddo
+        if (iatel_s_all(iproc).gt.0 .and. iatel_e_all(iproc).gt.0 .and. &
+        iatel_s_all(iproc).le.ii-1 .and. iatel_e_all(iproc).ge.ii-1)then
+          if (ielstart_all(ii-1,iproc).le.jj-1.and. &
+              ielend_all(ii-1,iproc).ge.jj-1) then
+            flag=.true.
+            if (iproc.ne.itask(1).and.iproc.ne.itask(2) &
+              .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then
+              isent=isent+1
+              itask(isent)=iproc
+              call add_task(iproc,ntask_cont_to,itask_cont_to)
+            endif
+          endif
+          if (ielstart_all(ii-1,iproc).le.jj+1.and. &
+              ielend_all(ii-1,iproc).ge.jj+1) then
+            flag=.true.
+            if (iproc.ne.itask(1).and.iproc.ne.itask(2) &
+              .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then
+              isent=isent+1
+              itask(isent)=iproc
+              call add_task(iproc,ntask_cont_to,itask_cont_to)
+            endif
+          endif
+        endif
+      enddo
+      return
+      end subroutine add_int
+!-----------------------------------------------------------------------------
+      subroutine add_int_from(ii,jj,ntask_cont_from,itask_cont_from)
+
+!el      use MPI_data
+!el      implicit none
+!      include "DIMENSIONS"
+!      include "COMMON.INTERACT"
+!      include "COMMON.SETUP"
+!      include "COMMON.IOUNITS"
+      integer :: ii,jj,itask(2),ntask_cont_from,&
+       itask_cont_from(0:nfgtasks-1)   !(0:max_fg_procs)
+      logical :: flag
+!el      integer,dimension(0:nfgtasks) :: iturn3_start_all,iturn3_end_all,&
+!el       iturn4_start_all,iturn4_end_all,iatel_s_all,iatel_e_all      !(0:max_fg_procs)
+!el      integer,dimension(nres,0:nfgtasks-1) :: ielstart_all,ielend_all       !(maxres,0:max_fg_procs-1)
+!el      common /przechowalnia/ iturn3_start_all,iturn3_end_all,iturn4_start_all,&
+!el       iturn4_end_all,iatel_s_all,iatel_e_all,ielstart_all,ielend_all
+      integer :: iproc,k,l
+      do iproc=fg_rank+1,nfgtasks-1
+        do k=iturn3_start_all(iproc),iturn3_end_all(iproc)
+          l=k+2
+          if (k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1.and.l.eq.jj-1 &
+         .or. k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1) &
+          then
+!            write (iout,*)"turn3 from iproc",iproc," ij",ii,jj," kl",k,l
+            call add_task(iproc,ntask_cont_from,itask_cont_from)
+          endif
+        enddo 
+        do k=iturn4_start_all(iproc),iturn4_end_all(iproc)
+          l=k+3
+          if (k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1.and.l.eq.jj-1 &
+         .or. k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1) &
+          then
+!            write (iout,*)"turn4 from iproc",iproc," ij",ii,jj," kl",k,l
+            call add_task(iproc,ntask_cont_from,itask_cont_from)
+          endif
+        enddo 
+        if (iatel_s_all(iproc).gt.0 .and. iatel_e_all(iproc).gt.0) then
+          if (ii+1.ge.iatel_s_all(iproc).and.ii+1.le.iatel_e_all(iproc)) &
+          then
+            if (jj+1.ge.ielstart_all(ii+1,iproc).and. &
+                jj+1.le.ielend_all(ii+1,iproc)) then
+              call add_task(iproc,ntask_cont_from,itask_cont_from)
+            endif            
+            if (jj-1.ge.ielstart_all(ii+1,iproc).and. &
+                jj-1.le.ielend_all(ii+1,iproc)) then
+              call add_task(iproc,ntask_cont_from,itask_cont_from)
+            endif
+          endif
+          if (ii-1.ge.iatel_s_all(iproc).and.ii-1.le.iatel_e_all(iproc)) &
+          then
+            if (jj-1.ge.ielstart_all(ii-1,iproc).and. &
+                jj-1.le.ielend_all(ii-1,iproc)) then
+              call add_task(iproc,ntask_cont_from,itask_cont_from)
+            endif
+            if (jj+1.ge.ielstart_all(ii-1,iproc).and. &
+                jj+1.le.ielend_all(ii-1,iproc)) then
+               call add_task(iproc,ntask_cont_from,itask_cont_from)
+            endif
+          endif
+        endif
+      enddo
+      return
+      end subroutine add_int_from
+!-----------------------------------------------------------------------------
+      subroutine add_task(iproc,ntask_cont,itask_cont)
+
+!el      use MPI_data
+!el      implicit none
+!      include "DIMENSIONS"
+      integer :: iproc,ntask_cont,itask_cont(0:nfgtasks-1)     !(0:max_fg_procs-1)
+      integer :: ii
+      do ii=1,ntask_cont
+        if (itask_cont(ii).eq.iproc) return
+      enddo
+      ntask_cont=ntask_cont+1
+      itask_cont(ntask_cont)=iproc
+      return
+      end subroutine add_task
+#endif
+!-----------------------------------------------------------------------------
+#if defined MPI || defined WHAM_RUN
+      subroutine int_partition(int_index,lower_index,upper_index,atom,&
+       at_start,at_end,first_atom,last_atom,int_gr,jat_start,jat_end,*)
+
+!      implicit real*8 (a-h,o-z)
+!      include 'DIMENSIONS'
+!      include 'COMMON.IOUNITS'
+      integer :: int_index,lower_index,upper_index,atom,at_start,at_end,&
+       first_atom,last_atom,int_gr,jat_start,jat_end,int_index_old
+      logical :: lprn
+      lprn=.false.
+      if (lprn) write (iout,*) 'int_index=',int_index
+      int_index_old=int_index
+      int_index=int_index+last_atom-first_atom+1
+      if (lprn) &
+         write (iout,*) 'int_index=',int_index,&
+                     ' int_index_old',int_index_old,&
+                     ' lower_index=',lower_index,&
+                     ' upper_index=',upper_index,&
+                     ' atom=',atom,' first_atom=',first_atom,&
+                     ' last_atom=',last_atom
+      if (int_index.ge.lower_index) then
+        int_gr=int_gr+1
+        if (at_start.eq.0) then
+          at_start=atom
+          jat_start=first_atom-1+lower_index-int_index_old
+        else
+          jat_start=first_atom
+        endif
+        if (lprn) write (iout,*) 'jat_start',jat_start
+        if (int_index.ge.upper_index) then
+          at_end=atom
+          jat_end=first_atom-1+upper_index-int_index_old
+          return 1
+        else
+          jat_end=last_atom
+        endif
+        if (lprn) write (iout,*) 'jat_end',jat_end
+      endif
+      return
+      end subroutine int_partition
+#endif
+!-----------------------------------------------------------------------------
+#ifndef CLUSTER
+      subroutine hpb_partition
+
+!      implicit real*8 (a-h,o-z)
+!      include 'DIMENSIONS'
+#ifdef MPI
+      include 'mpif.h'
+#endif
+!      include 'COMMON.SBRIDGE'
+!      include 'COMMON.IOUNITS'
+!      include 'COMMON.SETUP'
+#ifdef MPI
+      call int_bounds(nhpb,link_start,link_end)
+      write (iout,*) 'Processor',fg_rank,' CG group',kolor,&
+        ' absolute rank',MyRank,&
+        ' nhpb',nhpb,' link_start=',link_start,&
+        ' link_end',link_end
+#else
+      link_start=1
+      link_end=nhpb
+#endif
+      return
+      end subroutine hpb_partition
+#endif
+!-----------------------------------------------------------------------------
+! misc.f in module io_base
+!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------
+! parmread.F
+!-----------------------------------------------------------------------------
+      subroutine getenv_loc(var, val)
+
+      character(*) :: var, val
+
+#ifdef WINIFL
+      character(len=2000) :: line
+!el      external ilen
+
+      open (196,file='env',status='old',readonly,shared)
+      iread=0
+!      write(*,*)'looking for ',var
+10    read(196,*,err=11,end=11)line
+      iread=index(line,var)
+!      write(*,*)iread,' ',var,' ',line
+      if (iread.eq.0) go to 10 
+!      write(*,*)'---> ',line
+11    continue
+      if(iread.eq.0) then
+!       write(*,*)'CHUJ'
+       val=''
+      else
+       iread=iread+ilen(var)+1
+       read (line(iread:),*,err=12,end=12) val
+!       write(*,*)'OK: ',var,' = ',val
+      endif
+      close(196)
+      return
+12    val=''
+      close(196)
+#elif (defined CRAY)
+      integer :: lennam,lenval,ierror
+!
+!        getenv using a POSIX call, useful on the T3D
+!        Sept 1996, comment out error check on advice of H. Pritchard
+!
+      lennam = len(var)
+      if(lennam.le.0) stop '--error calling getenv--'
+      call pxfgetenv(var,lennam,val,lenval,ierror)
+!-HP- if(ierror.ne.0) stop '--error returned by pxfgetenv--'
+#else
+      call getenv(var,val)
+#endif
+
+      return
+      end subroutine getenv_loc
+!-----------------------------------------------------------------------------
+! readrtns_CSA.F
+!-----------------------------------------------------------------------------
+      subroutine setup_var
+
+      integer :: i
+!      implicit real*8 (a-h,o-z)
+!      include 'DIMENSIONS'
+!      include 'COMMON.IOUNITS'
+!      include 'COMMON.GEO'
+!      include 'COMMON.VAR'
+!      include 'COMMON.INTERACT'
+!      include 'COMMON.LOCAL'
+!      include 'COMMON.NAMES'
+!      include 'COMMON.CHAIN'
+!      include 'COMMON.FFIELD'
+!      include 'COMMON.SBRIDGE'
+!      include 'COMMON.HEADER'
+!      include 'COMMON.CONTROL'
+!      include 'COMMON.DBASE'
+!      include 'COMMON.THREAD'
+!      include 'COMMON.TIME1'
+! Set up variable list.
+      ntheta=nres-2
+      nphi=nres-3
+      nvar=ntheta+nphi
+      nside=0
+      do i=2,nres-1
+#ifdef WHAM_RUN
+        if (itype(i).ne.10) then
+#else
+        if (itype(i).ne.10 .and. itype(i).ne.ntyp1) then
+#endif
+         nside=nside+1
+          ialph(i,1)=nvar+nside
+         ialph(nside,2)=i
+        endif
+      enddo
+      if (indphi.gt.0) then
+        nvar=nphi
+      else if (indback.gt.0) then
+        nvar=nphi+ntheta
+      else
+        nvar=nvar+2*nside
+      endif
+!d    write (iout,'(3i4)') (i,ialph(i,1),ialph(i,2),i=2,nres-1)
+      return
+      end subroutine setup_var
+!-----------------------------------------------------------------------------
+! rescode.f
+!-----------------------------------------------------------------------------
+      integer function rescode(iseq,nam,itype)
+
+      use io_base, only: ucase
+!      implicit real*8 (a-h,o-z)
+!      include 'DIMENSIONS'
+!      include 'COMMON.NAMES'
+!      include 'COMMON.IOUNITS'
+      character(len=3) :: nam  !,ucase
+      integer :: iseq,itype,i
+
+      if (itype.eq.0) then
+
+      do i=-ntyp1,ntyp1
+        if (ucase(nam).eq.restyp(i)) then
+          rescode=i
+          return
+        endif
+      enddo
+
+      else
+
+      do i=-ntyp1,ntyp1
+        if (nam(1:1).eq.onelet(i)) then
+          rescode=i
+          return  
+        endif  
+      enddo
+
+      endif
+      write (iout,10) iseq,nam
+      stop
+   10 format ('**** Error - residue',i4,' has an unresolved name ',a3)
+      end function rescode
+!-----------------------------------------------------------------------------
+! timing.F
+!-----------------------------------------------------------------------------
+! $Date: 1994/10/05 16:41:52 $
+! $Revision: 2.2 $
+!
+      subroutine set_timers
+!
+!el      implicit none
+!el      real(kind=8) :: tcpu
+!      include 'COMMON.TIME1'
+!#ifdef MP
+#ifdef MPI
+      include 'mpif.h'
+#endif
+! Diminish the assigned time limit a little so that there is some time to
+! end a batch job
+!     timlim=batime-150.0
+! Calculate the initial time, if it is not zero (e.g. for the SUN).
+      stime=tcpu()
+#if .not. defined(WHAM_RUN) && .not. defined(CLUSTER)
+#ifdef MPI
+      walltime=MPI_WTIME()
+      time_reduce=0.0d0
+      time_allreduce=0.0d0
+      time_bcast=0.0d0
+      time_gather=0.0d0
+      time_sendrecv=0.0d0
+      time_scatter=0.0d0
+      time_scatter_fmat=0.0d0
+      time_scatter_ginv=0.0d0
+      time_scatter_fmatmult=0.0d0
+      time_scatter_ginvmult=0.0d0
+      time_barrier_e=0.0d0
+      time_barrier_g=0.0d0
+      time_enecalc=0.0d0
+      time_sumene=0.0d0
+      time_lagrangian=0.0d0
+      time_sumgradient=0.0d0
+      time_intcartderiv=0.0d0
+      time_inttocart=0.0d0
+      time_ginvmult=0.0d0
+      time_fricmatmult=0.0d0
+      time_cartgrad=0.0d0
+      time_bcastc=0.0d0
+      time_bcast7=0.0d0
+      time_bcastw=0.0d0
+      time_intfcart=0.0d0
+      time_vec=0.0d0
+      time_mat=0.0d0
+      time_fric=0.0d0
+      time_stoch=0.0d0
+      time_fricmatmult=0.0d0
+      time_fsample=0.0d0
+#endif
+#endif
+!d    print *,' in SET_TIMERS stime=',stime
+      return
+      end subroutine set_timers
+!-----------------------------------------------------------------------------
+#ifndef CLUSTER
+      logical function stopx(nf)
+! This function returns .true. if one of the following reasons to exit SUMSL
+! occurs. The "reason" code is stored in WHATSUP passed thru a COMMON block:
+!
+!... WHATSUP = 0 - go on, no reason to stop. Stopx will return .false.
+!...           1 - Time up in current node;
+!...           2 - STOP signal was received from another node because the
+!...               node's task was accomplished (parallel only);
+!...          -1 - STOP signal was received from another node because of error;
+!...          -2 - STOP signal was received from another node, because 
+!...               the node's time was up.
+!      implicit real*8 (a-h,o-z)
+!      include 'DIMENSIONS'
+!el#ifdef WHAM_RUN
+!el      use control_data, only:WhatsUp
+!el#endif
+#ifdef MP
+!el      use MPI_data  !include 'COMMON.INFO'
+      include 'mpif.h'
+#endif
+      integer :: nf
+!el      logical :: ovrtim
+
+!      include 'COMMON.IOUNITS'
+!      include 'COMMON.TIME1'
+      integer :: Kwita
+
+!d    print *,'Processor',MyID,' NF=',nf
+!d      write (iout,*) "stopx: ",nf
+#ifndef WHAM_RUN
+#ifndef MPI
+      if (ovrtim()) then
+! Finish if time is up.
+         stopx = .true.
+         WhatsUp=1
+#ifdef MPL
+      else if (mod(nf,100).eq.0) then
+! Other processors might have finished. Check this every 100th function 
+! evaluation.
+! Master checks if any other processor has sent accepted conformation(s) to it. 
+         if (MyID.ne.MasterID) call receive_mcm_info
+         if (MyID.eq.MasterID) call receive_conf
+!d       print *,'Processor ',MyID,' is checking STOP: nf=',nf
+         call recv_stop_sig(Kwita)
+         if (Kwita.eq.-1) then
+           write (iout,'(a,i4,a,i5)') 'Processor',&
+           MyID,' has received STOP signal in STOPX; NF=',nf
+           write (*,'(a,i4,a,i5)') 'Processor',&
+           MyID,' has received STOP signal in STOPX; NF=',nf
+           stopx=.true.
+           WhatsUp=2
+         elseif (Kwita.eq.-2) then
+           write (iout,*) &
+          'Processor',MyID,' received TIMEUP-STOP signal in SUMSL.'
+           write (*,*) &
+          'Processor',MyID,' received TIMEUP-STOP signal in SUMSL.'
+           WhatsUp=-2
+           stopx=.true.  
+         else if (Kwita.eq.-3) then
+           write (iout,*) &
+          'Processor',MyID,' received ERROR-STOP signal in SUMSL.'
+           write (*,*) &
+          'Processor',MyID,' received ERROR-STOP signal in SUMSL.'
+           WhatsUp=-1
+           stopx=.true.
+         else
+           stopx=.false.
+           WhatsUp=0
+         endif
+#endif
+      else
+         stopx = .false.
+         WhatsUp=0
+      endif
+#else
+      stopx=.false.
+!d      write (iout,*) "stopx set at .false."
+#endif
+
+#ifdef OSF
+! Check for FOUND_NAN flag
+      if (FOUND_NAN) then
+        write(iout,*)"   ***   stopx : Found a NaN"
+        stopx=.true.
+      endif
+#endif
+#else
+      if (ovrtim()) then
+! Finish if time is up.
+         stopx = .true.
+         WhatsUp=1
+      else if (cutoffviol) then
+        stopx = .true.
+        WhatsUp=2
+      else
+        stopx=.false.
+      endif
+#endif
+      return
+      end function stopx
+!-----------------------------------------------------------------------------
+#else
+      logical function stopx(nf)
+!
+!     ..................................................................
+!
+!     *****PURPOSE...
+!     THIS FUNCTION MAY SERVE AS THE STOPX (ASYNCHRONOUS INTERRUPTION)
+!     FUNCTION FOR THE NL2SOL (NONLINEAR LEAST-SQUARES) PACKAGE AT
+!     THOSE INSTALLATIONS WHICH DO NOT WISH TO IMPLEMENT A
+!     DYNAMIC STOPX.
+!
+!     *****ALGORITHM NOTES...
+!     AT INSTALLATIONS WHERE THE NL2SOL SYSTEM IS USED
+!     INTERACTIVELY, THIS DUMMY STOPX SHOULD BE REPLACED BY A
+!     FUNCTION THAT RETURNS .TRUE. IF AND ONLY IF THE INTERRUPT
+!     (BREAK) KEY HAS BEEN PRESSED SINCE THE LAST CALL ON STOPX.
+!
+!     $$$ MODIFIED FOR USE AS  THE TIMER ROUTINE.
+!     $$$                              WHEN THE TIME LIMIT HAS BEEN
+!     $$$ REACHED     STOPX IS SET TO .TRUE  AND INITIATES (IN ITSUM)
+!     $$$ AND ORDERLY EXIT OUT OF SUMSL.  IF ARRAYS IV AND V ARE
+!     $$$ SAVED, THE SUMSL ROUTINES CAN BE RESTARTED AT THE SAME
+!     $$$ POINT AT WHICH THEY WERE INTERRUPTED.
+!
+!     ..................................................................
+!
+!      include 'DIMENSIONS'
+      integer :: nf
+!      logical ovrtim
+!      include 'COMMON.IOUNITS'
+!      include 'COMMON.TIME1'
+#ifdef MPL
+!     include 'COMMON.INFO'
+      integer :: Kwita
+
+!d    print *,'Processor',MyID,' NF=',nf
+#endif
+      if (ovrtim()) then
+! Finish if time is up.
+         stopx = .true.
+#ifdef MPL
+      else if (mod(nf,100).eq.0) then
+! Other processors might have finished. Check this every 100th function 
+! evaluation.
+!d       print *,'Processor ',MyID,' is checking STOP: nf=',nf
+         call recv_stop_sig(Kwita)
+         if (Kwita.eq.-1) then
+           write (iout,'(a,i4,a,i5)') 'Processor',&
+           MyID,' has received STOP signal in STOPX; NF=',nf
+           write (*,'(a,i4,a,i5)') 'Processor',&
+           MyID,' has received STOP signal in STOPX; NF=',nf
+           stopx=.true.
+         else
+           stopx=.false.
+         endif
+#endif
+      else
+         stopx = .false.
+      endif
+      return
+      end function stopx
+#endif
+!-----------------------------------------------------------------------------
+      logical function ovrtim()
+
+!      include 'DIMENSIONS'
+!      include 'COMMON.IOUNITS'
+!      include 'COMMON.TIME1'
+!el      real(kind=8) :: tcpu
+      real(kind=8) :: curtim
+#ifdef MPI
+      include "mpif.h"
+      curtim = MPI_Wtime()-walltime
+#else
+      curtim= tcpu()
+#endif
+!  curtim is the current time in seconds.
+!      write (iout,*) "curtim",curtim," timlim",timlim," safety",safety
+#ifndef WHAM_RUN
+      if (curtim .ge. timlim - safety) then
+        write (iout,'(a,f10.2,a,f10.2,a,f10.2,a)') &
+        "***************** Elapsed time (",curtim,&
+        " s) is within the safety limit (",safety,&
+        " s) of the allocated time (",timlim," s). Terminating."
+        ovrtim=.true.
+      else
+        ovrtim=.false.
+      endif
+#else
+      ovrtim=.false.
+#endif
+!elwrite (iout,*) "ovrtim",ovrtim
+      return
+      end function ovrtim
+!-----------------------------------------------------------------------------
+      real(kind=8) function tcpu()
+
+!      include 'COMMON.TIME1'
+      real(kind=8) :: seconds
+#ifdef ES9000
+!***************************
+! Next definition for EAGLE (ibm-es9000)
+      real(kind=8) :: micseconds
+      integer :: rcode
+      tcpu=cputime(micseconds,rcode)
+      tcpu=(micseconds/1.0E6) - stime
+!***************************
+#endif
+#ifdef SUN
+!***************************
+! Next definitions for sun
+      REAL(kind=8) ::  ECPU,ETIME,ETCPU
+      real(kind=8),dimension(2) :: tarray
+      tcpu=etime(tarray)
+      tcpu=tarray(1)
+!***************************
+#endif
+#ifdef KSR
+!***************************
+! Next definitions for ksr
+! this function uses the ksr timer ALL_SECONDS from the PMON library to
+! return the elapsed time in seconds
+      tcpu= all_seconds() - stime
+!***************************
+#endif
+#ifdef SGI
+!***************************
+! Next definitions for sgi
+      real(kind=4) :: timar(2), etime
+      seconds = etime(timar)
+!d    print *,'seconds=',seconds,' stime=',stime
+!      usrsec = timar(1)
+!      syssec = timar(2)
+      tcpu=seconds - stime
+!***************************
+#endif
+
+#ifdef LINUX
+!***************************
+! Next definitions for sgi
+      real(kind=4) :: timar(2), etime
+      seconds = etime(timar)
+!d    print *,'seconds=',seconds,' stime=',stime
+!      usrsec = timar(1)
+!      syssec = timar(2)
+      tcpu=seconds - stime
+!***************************
+#endif
+
+
+#ifdef CRAY
+!***************************
+! Next definitions for Cray
+!     call date(curdat)
+!     curdat=curdat(1:9)
+!     call clock(curtim)
+!     curtim=curtim(1:8)
+      cpusec = second()
+      tcpu=cpusec - stime
+!***************************
+#endif
+#ifdef AIX
+!***************************
+! Next definitions for RS6000
+       integer(kind=4) :: i1,mclock
+       i1 = mclock()
+       tcpu = (i1+0.0D0)/100.0D0
+#endif
+#ifdef WINPGI
+!***************************
+! next definitions for windows NT Digital fortran
+       real(kind=4) :: time_real
+       call cpu_time(time_real)
+       tcpu = time_real
+#endif
+#ifdef WINIFL
+!***************************
+! next definitions for windows NT Digital fortran
+       real(kind=4) :: time_real
+       call cpu_time(time_real)
+       tcpu = time_real
+#endif
+      tcpu = 0d0 !el
+      return
+      end function tcpu
+!-----------------------------------------------------------------------------
+#ifndef CLUSTER
+      subroutine dajczas(rntime,hrtime,mintime,sectime)
+
+!      include 'COMMON.IOUNITS'
+      integer :: ihr,imn,isc
+      real(kind=8) :: rntime,hrtime,mintime,sectime 
+      hrtime=rntime/3600.0D0 
+      hrtime=aint(hrtime)
+      mintime=aint((rntime-3600.0D0*hrtime)/60.0D0)
+      sectime=aint((rntime-3600.0D0*hrtime-60.0D0*mintime)+0.5D0)
+      if (sectime.eq.60.0D0) then
+        sectime=0.0D0
+        mintime=mintime+1.0D0
+      endif
+      ihr=hrtime
+      imn=mintime
+      isc=sectime
+      write (iout,328) ihr,imn,isc
+  328 FORMAT(//'***** Computation time: ',I4  ,' hours ',I2  ,&
+               ' minutes ', I2  ,' seconds *****')       
+      return
+      end subroutine dajczas
+!-----------------------------------------------------------------------------
+      subroutine print_detailed_timing
+
+!el      use MPI_data
+!      implicit real*8 (a-h,o-z)
+!      include 'DIMENSIONS'
+#ifdef MPI
+      include 'mpif.h'
+#endif
+!      include 'COMMON.IOUNITS'
+!      include 'COMMON.TIME1'
+!      include 'COMMON.SETUP'
+      real(kind=8) :: time1,time_barrier
+      time_barrier = 0.0d0
+#ifdef MPI !el
+      time1=MPI_WTIME()
+#endif !el
+         write (iout,'(80(1h=)/a/(80(1h=)))') &
+          "Details of FG communication time"
+         write (*,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))') &
+          "BROADCAST:",time_bcast,"REDUCE:",time_reduce,&
+          "GATHER:",time_gather,&
+          "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,&
+          "BARRIER ene",time_barrier_e,&
+          "BARRIER grad",time_barrier_g,&
+          "TOTAL:",&
+          time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv
+         write (*,*) fg_rank,myrank,&
+           ': Total wall clock time',time1-walltime,' sec'
+         write (*,*) "Processor",fg_rank,myrank,&
+           ": BROADCAST time",time_bcast," REDUCE time",&
+            time_reduce," GATHER time",time_gather," SCATTER time",&
+            time_scatter,&
+           " SCATTER fmatmult",time_scatter_fmatmult,&
+           " SCATTER ginvmult",time_scatter_ginvmult,&
+           " SCATTER fmat",time_scatter_fmat,&
+           " SCATTER ginv",time_scatter_ginv,&
+            " SENDRECV",time_sendrecv,&
+            " BARRIER ene",time_barrier_e,&
+            " BARRIER GRAD",time_barrier_g,&
+            " BCAST7",time_bcast7," BCASTC",time_bcastc,&
+            " BCASTW",time_bcastw," ALLREDUCE",time_allreduce,&
+            " TOTAL",&
+            time_bcast+time_reduce+time_gather+time_scatter+ &
+            time_sendrecv+time_barrier+time_bcastc
+!el#endif
+         write (*,*) "Processor",fg_rank,myrank," enecalc",time_enecalc
+         write (*,*) "Processor",fg_rank,myrank," sumene",time_sumene
+         write (*,*) "Processor",fg_rank,myrank," intfromcart",&
+           time_intfcart
+         write (*,*) "Processor",fg_rank,myrank," vecandderiv",&
+           time_vec
+         write (*,*) "Processor",fg_rank,myrank," setmatrices",&
+           time_mat
+         write (*,*) "Processor",fg_rank,myrank," ginvmult",&
+           time_ginvmult
+         write (*,*) "Processor",fg_rank,myrank," fricmatmult",&
+           time_fricmatmult
+         write (*,*) "Processor",fg_rank,myrank," inttocart",&
+           time_inttocart
+         write (*,*) "Processor",fg_rank,myrank," sumgradient",&
+           time_sumgradient
+         write (*,*) "Processor",fg_rank,myrank," intcartderiv",&
+           time_intcartderiv
+         if (fg_rank.eq.0) then
+           write (*,*) "Processor",fg_rank,myrank," lagrangian",&
+             time_lagrangian
+           write (*,*) "Processor",fg_rank,myrank," cartgrad",&
+             time_cartgrad
+         endif
+      return
+      end subroutine print_detailed_timing
+#endif
+!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------
+      end module control