unres_package_Oct_2016 from emilial
[unres4.git] / source / unres / control.f90
1       module control
2 !-----------------------------------------------------------------------------
3       use io_units
4       use names
5       use MPI_data
6       use geometry_data
7       use energy_data
8       use control_data
9       use minim_data
10       use geometry, only:int_bounds
11 #ifndef CLUSTER
12       use csa_data
13 #ifdef WHAM_RUN
14       use wham_data
15 #endif
16 #endif
17       implicit none
18 !-----------------------------------------------------------------------------
19 ! commom.control
20 !      common /cntrl/
21 !      integer :: modecalc,iscode,indpdb,indback,indphi,iranconf,&
22 !       icheckgrad,iprint,i2ndstr,mucadyn,constr_dist,symetr
23 !      logical :: minim,refstr,pdbref,outpdb,outmol2,overlapsc,&
24 !       energy_dec,sideadd,lsecondary,read_cart,unres_pdb,&
25 !       vdisulf,searchsc,lmuca,dccart,extconf,out1file,&
26 !       gnorm_check,gradout,split_ene
27 !... minim = .true. means DO minimization.
28 !... energy_dec = .true. means print energy decomposition matrix
29 !-----------------------------------------------------------------------------
30 ! common.time1
31 !     FOUND_NAN - set by calcf to stop sumsl via stopx
32 !      COMMON/TIME1/
33       real(kind=8) :: STIME,BATIME,PREVTIM,RSTIME
34 !el      real(kind=8) :: TIMLIM,SAFETY
35 !el      real(kind=8) :: WALLTIME
36 !      COMMON/STOPTIM/
37       integer :: ISTOP
38 !      common /sumsl_flag/
39       logical :: FOUND_NAN
40 !      common /timing/
41       real(kind=8) :: t_init
42 !       time_bcast,time_reduce,time_gather,&
43 !       time_sendrecv,time_barrier_e,time_barrier_g,time_scatter,&
44        !t_eelecij,
45 !       time_allreduce,&
46 !       time_lagrangian,time_cartgrad,&
47 !       time_sumgradient,time_intcartderiv,time_inttocart,time_intfcart,&
48 !       time_mat,time_fricmatmult,&
49 !       time_scatter_fmat,time_scatter_ginv,&
50 !       time_scatter_fmatmult,time_scatter_ginvmult,&
51 !       t_eshort,t_elong,t_etotal
52 !-----------------------------------------------------------------------------
53 ! initialize_p.F
54 !-----------------------------------------------------------------------------
55 !      block data
56 !      integer,parameter :: MaxMoveType = 4
57 !      character(len=14),dimension(-1:MaxMoveType+1) :: MovTypID=(/'pool','chain regrow',&
58 !      character :: MovTypID(-1:MaxMoveType+1)=(/'pool','chain regrow',&
59 !       'multi-bond','phi','theta','side chain','total'/)
60 ! Conversion from poises to molecular unit and the gas constant
61 !el      real(kind=8) :: cPoise=2.9361d0, Rb=0.001986d0
62 !-----------------------------------------------------------------------------
63 !      common /przechowalnia/ subroutines: init_int_table,add_int,add_int_from
64       integer,dimension(:),allocatable :: iturn3_start_all,&
65         iturn3_end_all,iturn4_start_all,iturn4_end_all,iatel_s_all,&
66         iatel_e_all !(0:max_fg_procs)
67       integer,dimension(:,:),allocatable :: ielstart_all,&
68         ielend_all !(maxres,0:max_fg_procs-1)
69
70 !      common /przechowalnia/ subroutine: init_int_table
71       integer,dimension(:),allocatable :: ntask_cont_from_all,&
72         ntask_cont_to_all !(0:max_fg_procs-1)
73       integer,dimension(:,:),allocatable :: itask_cont_from_all,&
74         itask_cont_to_all !(0:max_fg_procs-1,0:max_fg_procs-1)
75 !-----------------------------------------------------------------------------
76 !
77 !
78 !-----------------------------------------------------------------------------
79       contains
80 !-----------------------------------------------------------------------------
81 ! initialize_p.F
82 !-----------------------------------------------------------------------------
83       subroutine initialize
84 !
85 ! Define constants and zero out tables.
86 !
87       use comm_iofile
88       use comm_machsw
89       use MCM_data, only: MovTypID
90 !      implicit real*8 (a-h,o-z)
91 !      include 'DIMENSIONS'
92 #ifdef MPI
93       include 'mpif.h'
94 #endif
95 #ifndef ISNAN
96       external proc_proc
97 #ifdef WINPGI
98 !MS$ATTRIBUTES C ::  proc_proc
99 #endif
100 #endif
101 !      include 'COMMON.IOUNITS'
102 !      include 'COMMON.CHAIN'
103 !      include 'COMMON.INTERACT'
104 !      include 'COMMON.GEO'
105 !      include 'COMMON.LOCAL'
106 !      include 'COMMON.TORSION'
107 !      include 'COMMON.FFIELD'
108 !      include 'COMMON.SBRIDGE'
109 !      include 'COMMON.MCM'
110 !      include 'COMMON.MINIM' 
111 !      include 'COMMON.DERIV'
112 !      include 'COMMON.SPLITELE'
113 !      implicit none
114 ! Common blocks from the diagonalization routines
115 !el      integer :: IR,IW,IP,IJK,IPK,IDAF,NAV,IODA(400)
116 !el      integer :: KDIAG,ICORFL,IXDR
117 !el      COMMON /IOFILE/ IR,IW,IP,IJK,IPK,IDAF,NAV,IODA
118 !el      COMMON /MACHSW/ KDIAG,ICORFL,IXDR
119       logical :: mask_r
120 !      real*8 text1 /'initial_i'/
121       real(kind=4) :: rr
122
123 !local variables el
124       integer :: i,j,k,l,ichir1,ichir2,iblock,m,maxit
125
126 #if .not. defined(WHAM_RUN) && .not. defined(CLUSTER)
127       mask_r=.false.
128 #ifndef ISNAN
129 ! NaNQ initialization
130       i=-1
131       rr=dacos(100.0d0)
132 #ifdef WINPGI
133       idumm=proc_proc(rr,i)
134 #elif defined(WHAM_RUN)
135       call proc_proc(rr,i)
136 #endif
137 #endif
138
139       kdiag=0
140       icorfl=0
141       iw=2
142       
143       allocate(MovTypID(-1:MaxMoveType+1))
144       MovTypID=(/'pool          ','chain regrow  ',&
145        'multi-bond    ','phi           ','theta         ',&
146        'side chain    ','total         '/)
147 #endif
148 !
149 ! The following is just to define auxiliary variables used in angle conversion
150 !
151       pi=4.0D0*datan(1.0D0)
152       dwapi=2.0D0*pi
153       dwapi3=dwapi/3.0D0
154       pipol=0.5D0*pi
155       deg2rad=pi/180.0D0
156       rad2deg=1.0D0/deg2rad
157       angmin=10.0D0*deg2rad
158 !el#ifdef CLUSTER
159 !el      Rgas = 1.987D-3
160 !el#endif
161 !
162 ! Define I/O units.
163 !
164       inp=    1
165       iout=   2
166       ipdbin= 3
167       ipdb=   7
168 #ifdef CLUSTER
169       imol2= 18
170       jplot= 19
171 !el      jstatin=10
172       imol2=  4
173       jrms=30
174 #else
175       icart = 30
176       imol2=  4
177       ithep_pdb=51
178       irotam_pdb=52
179       irest1=55
180       irest2=56
181       iifrag=57
182       ientin=18
183       ientout=19
184 !rc for write_rmsbank1  
185       izs1=21
186 !dr  include secondary structure prediction bias
187       isecpred=27
188 #endif
189       igeom=  8
190       intin=  9
191       ithep= 11
192       irotam=12
193       itorp= 13
194       itordp= 23
195       ielep= 14
196       isidep=15
197 #if defined(WHAM_RUN) || defined(CLUSTER)
198       isidep1=22 !wham
199 #else
200 !
201 ! CSA I/O units (separated from others especially for Jooyoung)
202 !
203       icsa_rbank=30
204       icsa_seed=31
205       icsa_history=32
206       icsa_bank=33
207       icsa_bank1=34
208       icsa_alpha=35
209       icsa_alpha1=36
210       icsa_bankt=37
211       icsa_int=39
212       icsa_bank_reminimized=38
213       icsa_native_int=41
214       icsa_in=40
215 !rc for ifc error 118
216       icsa_pdb=42
217 #endif
218       iscpp=25
219       icbase=16
220       ifourier=20
221       istat= 17
222       ibond = 28
223       isccor = 29
224 #ifdef WHAM_RUN
225 !
226 ! WHAM files
227 !
228       ihist=30
229       iweight=31
230       izsc=32
231 #endif
232 #if defined(WHAM_RUN) || defined(CLUSTER)
233 !
234 ! setting the mpi variables for WHAM
235 !
236       fgprocs=1
237       nfgtasks=1
238       nfgtasks1=1
239 #endif
240 !
241 ! Set default weights of the energy terms.
242 !
243       wsc=1.0D0 ! in wham:  wlong=1.0D0
244       welec=1.0D0
245       wtor =1.0D0
246       wang =1.0D0
247       wscloc=1.0D0
248       wstrain=1.0D0
249 !
250 ! Zero out tables.
251 !
252 !      print '(a,$)','Inside initialize'
253 !      call memmon_print_usage()
254       
255 !      do i=1,maxres2
256 !       do j=1,3
257 !         c(j,i)=0.0D0
258 !         dc(j,i)=0.0D0
259 !       enddo
260 !      enddo
261 !      do i=1,maxres
262 !       do j=1,3
263 !         xloc(j,i)=0.0D0
264 !        enddo
265 !      enddo
266 !      do i=1,ntyp
267 !       do j=1,ntyp
268 !         aa(i,j)=0.0D0
269 !         bb(i,j)=0.0D0
270 !         augm(i,j)=0.0D0
271 !         sigma(i,j)=0.0D0
272 !         r0(i,j)=0.0D0
273 !         chi(i,j)=0.0D0
274 !        enddo
275 !       do j=1,2
276 !         bad(i,j)=0.0D0
277 !        enddo
278 !       chip(i)=0.0D0
279 !       alp(i)=0.0D0
280 !       sigma0(i)=0.0D0
281 !       sigii(i)=0.0D0
282 !       rr0(i)=0.0D0
283 !       a0thet(i)=0.0D0
284 !       do j=1,2
285 !         do ichir1=-1,1
286 !          do ichir2=-1,1
287 !          athet(j,i,ichir1,ichir2)=0.0D0
288 !          bthet(j,i,ichir1,ichir2)=0.0D0
289 !          enddo
290 !         enddo
291 !        enddo
292 !        do j=0,3
293 !         polthet(j,i)=0.0D0
294 !        enddo
295 !       do j=1,3
296 !         gthet(j,i)=0.0D0
297 !        enddo
298 !       theta0(i)=0.0D0
299 !       sig0(i)=0.0D0
300 !       sigc0(i)=0.0D0
301 !       do j=1,maxlob
302 !         bsc(j,i)=0.0D0
303 !         do k=1,3
304 !           censc(k,j,i)=0.0D0
305 !          enddo
306 !          do k=1,3
307 !           do l=1,3
308 !             gaussc(l,k,j,i)=0.0D0
309 !            enddo
310 !          enddo
311 !         nlob(i)=0
312 !        enddo
313 !      enddo
314 !      nlob(ntyp1)=0
315 !      dsc(ntyp1)=0.0D0
316 !      do i=-maxtor,maxtor
317 !        itortyp(i)=0
318 !c      write (iout,*) "TU DOCHODZE",i,itortyp(i)
319 !       do iblock=1,2
320 !        do j=-maxtor,maxtor
321 !          do k=1,maxterm
322 !            v1(k,j,i,iblock)=0.0D0
323 !            v2(k,j,i,iblock)=0.0D0
324 !          enddo
325 !        enddo
326 !        enddo
327 !      enddo
328 !      do iblock=1,2
329 !       do i=-maxtor,maxtor
330 !        do j=-maxtor,maxtor
331 !         do k=-maxtor,maxtor
332 !          do l=1,maxtermd_1
333 !            v1c(1,l,i,j,k,iblock)=0.0D0
334 !            v1s(1,l,i,j,k,iblock)=0.0D0
335 !            v1c(2,l,i,j,k,iblock)=0.0D0
336 !            v1s(2,l,i,j,k,iblock)=0.0D0
337 !          enddo !l
338 !          do l=1,maxtermd_2
339 !           do m=1,maxtermd_2
340 !            v2c(m,l,i,j,k,iblock)=0.0D0
341 !            v2s(m,l,i,j,k,iblock)=0.0D0
342 !           enddo !m
343 !          enddo !l
344 !        enddo !k
345 !       enddo !j
346 !      enddo !i
347 !      enddo !iblock
348
349 !      do i=1,maxres
350 !       itype(i)=0
351 !       itel(i)=0
352 !      enddo
353 ! Initialize the bridge arrays
354       ns=0
355       nss=0 
356       nhpb=0
357 !      do i=1,maxss
358 !       iss(i)=0
359 !      enddo
360 !      do i=1,maxdim
361 !       dhpb(i)=0.0D0
362 !      enddo
363 !      do i=1,maxres
364 !       ihpb(i)=0
365 !       jhpb(i)=0
366 !      enddo
367 !
368 ! Initialize timing.
369 !
370       call set_timers
371 !
372 ! Initialize variables used in minimization.
373 !   
374 !c     maxfun=5000
375 !c     maxit=2000
376       maxfun=500
377       maxit=200
378       tolf=1.0D-2
379       rtolf=5.0D-4
380
381 ! Initialize the variables responsible for the mode of gradient storage.
382 !
383       nfl=0
384       icg=1
385       
386 #ifdef WHAM_RUN
387       allocate(iww(max_eneW))
388       do i=1,14
389         do j=1,14
390           if (print_order(i).eq.j) then
391             iww(print_order(i))=j
392             goto 1121
393           endif
394         enddo
395 1121    continue
396       enddo
397 #endif
398  
399 #if defined(WHAM_RUN) || defined(CLUSTER)
400       ndih_constr=0
401
402 !      allocate(ww0(max_eneW))
403 !      ww0 = reshape((/1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,&
404 !          1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,1.0d0,0.4d0,1.0d0,&
405 !          1.0d0,0.0d0,0.0/), shape(ww0))
406 !
407       calc_grad=.false.
408 ! Set timers and counters for the respective routines
409       t_func = 0.0d0
410       t_grad = 0.0d0
411       t_fhel = 0.0d0
412       t_fbet = 0.0d0
413       t_ghel = 0.0d0
414       t_gbet = 0.0d0
415       t_viol = 0.0d0
416       t_gviol = 0.0d0
417       n_func = 0
418       n_grad = 0
419       n_fhel = 0
420       n_fbet = 0
421       n_ghel = 0
422       n_gbet = 0
423       n_viol = 0
424       n_gviol = 0
425       n_map = 0
426 #endif
427 !
428 ! Initialize constants used to split the energy into long- and short-range
429 ! components
430 !
431       r_cut=2.0d0
432       rlamb=0.3d0
433 #ifndef SPLITELE
434       nprint_ene=nprint_ene-1
435 #endif
436       return
437       end subroutine initialize
438 !-----------------------------------------------------------------------------
439       subroutine init_int_table
440
441       use geometry, only:int_bounds1
442 !el      use MPI_data
443 !el      implicit none
444 !      implicit real*8 (a-h,o-z)
445 !      include 'DIMENSIONS'
446 #ifdef MPI
447       include 'mpif.h'
448       integer,dimension(15) :: blocklengths,displs
449 #endif
450 !      include 'COMMON.CONTROL'
451 !      include 'COMMON.SETUP'
452 !      include 'COMMON.CHAIN'
453 !      include 'COMMON.INTERACT'
454 !      include 'COMMON.LOCAL'
455 !      include 'COMMON.SBRIDGE'
456 !      include 'COMMON.TORCNSTR'
457 !      include 'COMMON.IOUNITS'
458 !      include 'COMMON.DERIV'
459 !      include 'COMMON.CONTACTS'
460 !el      integer,dimension(0:nfgtasks) :: iturn3_start_all,iturn3_end_all,&
461 !el        iturn4_start_all,iturn4_end_all,iatel_s_all,iatel_e_all  !(0:max_fg_procs)
462 !el      integer,dimension(nres,0:nfgtasks) :: ielstart_all,&
463 !el        ielend_all !(maxres,0:max_fg_procs-1)
464 !el      integer,dimension(0:nfgtasks-1) :: ntask_cont_from_all,&
465 !el        ntask_cont_to_all !(0:max_fg_procs-1),
466 !el      integer,dimension(0:nfgtasks-1,0:nfgtasks-1) :: itask_cont_from_all,&
467 !el        itask_cont_to_all !(0:max_fg_procs-1,0:max_fg_procs-1)
468
469 !el      common /przechowalnia/ iturn3_start_all,iturn3_end_all,&
470 !el        iturn4_start_all,iturn4_end_all,iatel_s_all,iatel_e_all,&
471 !el        ielstart_all,ielend_all,ntask_cont_from_all,itask_cont_from_all,&
472 !el        ntask_cont_to_all,itask_cont_to_all
473
474       integer :: FG_GROUP,CONT_FROM_GROUP,CONT_TO_GROUP
475       logical :: scheck,lprint,flag
476
477 !el local variables
478       integer :: ind_scint=0,ind_scint_old,ii,jj,i,j,iint
479
480 #ifdef MPI
481       integer :: my_sc_int(0:nfgtasks-1),my_ele_int(0:nfgtasks-1)
482       integer :: my_sc_intt(0:nfgtasks),my_ele_intt(0:nfgtasks)
483       integer :: n_sc_int_tot,my_sc_inde,my_sc_inds,ind_sctint,npept
484       integer :: nele_int_tot,my_ele_inds,my_ele_inde,ind_eleint_old,&
485             ind_eleint,ijunk,nele_int_tot_vdw,my_ele_inds_vdw,&
486             my_ele_inde_vdw,ind_eleint_vdw,ind_eleint_vdw_old,&
487             nscp_int_tot,my_scp_inds,my_scp_inde,ind_scpint,&
488             ind_scpint_old,nsumgrad,nlen,ngrad_start,ngrad_end,&
489             ierror,k,ierr,iaux,ncheck_to,ncheck_from,ind_typ,&
490             ichunk,int_index_old
491
492 !el      allocate(itask_cont_from(0:nfgtasks-1)) !(0:max_fg_procs-1)
493 !el      allocate(itask_cont_to(0:nfgtasks-1)) !(0:max_fg_procs-1)
494
495 !... Determine the numbers of start and end SC-SC interaction
496 !... to deal with by current processor.
497 !write (iout,*) '******INIT_INT_TABLE nres=',nres,' nnt=',nnt,' nct=',nct
498       do i=0,nfgtasks-1
499         itask_cont_from(i)=fg_rank
500         itask_cont_to(i)=fg_rank
501       enddo
502       lprint=energy_dec
503 !      lprint=.true.
504       if (lprint) &
505        write (iout,*)'INIT_INT_TABLE nres=',nres,' nnt=',nnt,' nct=',nct
506       n_sc_int_tot=(nct-nnt+1)*(nct-nnt)/2-nss
507       call int_bounds(n_sc_int_tot,my_sc_inds,my_sc_inde)
508 !write (iout,*) 'INIT_INT_TABLE nres=',nres,' nnt=',nnt,' nct=',nct
509       if (lprint) &
510         write (iout,*) 'Processor',fg_rank,' CG group',kolor,&
511         ' absolute rank',MyRank,&
512         ' n_sc_int_tot',n_sc_int_tot,' my_sc_inds=',my_sc_inds,&
513         ' my_sc_inde',my_sc_inde
514       ind_sctint=0
515       iatsc_s=0
516       iatsc_e=0
517 #endif
518 !el       common /przechowalnia/
519       allocate(iturn3_start_all(0:nfgtasks))
520       allocate(iturn3_end_all(0:nfgtasks))
521       allocate(iturn4_start_all(0:nfgtasks))
522       allocate(iturn4_end_all(0:nfgtasks))
523       allocate(iatel_s_all(0:nfgtasks))
524       allocate(iatel_e_all(0:nfgtasks))
525       allocate(ielstart_all(nres,0:nfgtasks-1))
526       allocate(ielend_all(nres,0:nfgtasks-1))
527
528       allocate(ntask_cont_from_all(0:nfgtasks-1))
529       allocate(ntask_cont_to_all(0:nfgtasks-1))
530       allocate(itask_cont_from_all(0:nfgtasks-1,0:nfgtasks-1))
531       allocate(itask_cont_to_all(0:nfgtasks-1,0:nfgtasks-1))
532 !el----------
533 !      lprint=.false.
534       do i=1,nres !el   !maxres
535         nint_gr(i)=0
536         nscp_gr(i)=0
537         ielstart(i)=0
538         ielend(i)=0
539         do j=1,maxint_gr
540           istart(i,j)=0
541           iend(i,j)=0
542           iscpstart(i,j)=0
543           iscpend(i,j)=0    
544         enddo
545       enddo
546       ind_scint=0
547       ind_scint_old=0
548 !d    write (iout,*) 'ns=',ns,' nss=',nss,' ihpb,jhpb',
549 !d   &   (ihpb(i),jhpb(i),i=1,nss)
550       do i=nnt,nct-1
551         scheck=.false.
552         if (dyn_ss) goto 10
553         do ii=1,nss
554           if (ihpb(ii).eq.i+nres) then
555             scheck=.true.
556             jj=jhpb(ii)-nres
557             goto 10
558           endif
559         enddo
560    10   continue
561 !d      write (iout,*) 'i=',i,' scheck=',scheck,' jj=',jj
562         if (scheck) then
563           if (jj.eq.i+1) then
564 #ifdef MPI
565 !            write (iout,*) 'jj=i+1'
566             call int_partition(ind_scint,my_sc_inds,my_sc_inde,i,&
567        iatsc_s,iatsc_e,i+2,nct,nint_gr(i),istart(i,1),iend(i,1),*12)
568 #else
569             nint_gr(i)=1
570             istart(i,1)=i+2
571             iend(i,1)=nct
572 #endif
573           else if (jj.eq.nct) then
574 #ifdef MPI
575 !            write (iout,*) 'jj=nct'
576             call int_partition(ind_scint,my_sc_inds,my_sc_inde,i,&
577         iatsc_s,iatsc_e,i+1,nct-1,nint_gr(i),istart(i,1),iend(i,1),*12)
578 #else
579             nint_gr(i)=1
580             istart(i,1)=i+1
581             iend(i,1)=nct-1
582 #endif
583           else
584 #ifdef MPI
585             call int_partition(ind_scint,my_sc_inds,my_sc_inde,i,&
586        iatsc_s,iatsc_e,i+1,jj-1,nint_gr(i),istart(i,1),iend(i,1),*12)
587             ii=nint_gr(i)+1
588             call int_partition(ind_scint,my_sc_inds,my_sc_inde,i,&
589        iatsc_s,iatsc_e,jj+1,nct,nint_gr(i),istart(i,ii),iend(i,ii),*12)
590 #else
591             nint_gr(i)=2
592             istart(i,1)=i+1
593             iend(i,1)=jj-1
594             istart(i,2)=jj+1
595             iend(i,2)=nct
596 #endif
597           endif
598         else
599 #ifdef MPI
600           call int_partition(ind_scint,my_sc_inds,my_sc_inde,i,&
601           iatsc_s,iatsc_e,i+1,nct,nint_gr(i),istart(i,1),iend(i,1),*12)
602 #else
603           nint_gr(i)=1
604           istart(i,1)=i+1
605           iend(i,1)=nct
606           ind_scint=ind_scint+nct-i
607 #endif
608         endif
609 #ifdef MPI
610         ind_scint_old=ind_scint
611 #endif
612       enddo
613    12 continue
614 #ifndef MPI
615       iatsc_s=nnt
616       iatsc_e=nct-1
617 #endif
618       if (iatsc_s.eq.0) iatsc_s=1
619 #ifdef MPI
620       if (lprint) write (*,*) 'Processor',fg_rank,' CG Group',kolor,&
621          ' absolute rank',myrank,' iatsc_s=',iatsc_s,' iatsc_e=',iatsc_e
622 #endif
623       if (lprint) then
624       write (iout,'(a)') 'Interaction array:'
625       do i=iatsc_s,iatsc_e
626         write (iout,'(i3,2(2x,2i3))') &
627        i,(istart(i,iint),iend(i,iint),iint=1,nint_gr(i))
628       enddo
629       endif
630       ispp=4 !?? wham ispp=2
631 #ifdef MPI
632 ! Now partition the electrostatic-interaction array
633       npept=nct-nnt
634       nele_int_tot=(npept-ispp)*(npept-ispp+1)/2
635       call int_bounds(nele_int_tot,my_ele_inds,my_ele_inde)
636       if (lprint) &
637        write (*,*) 'Processor',fg_rank,' CG group',kolor,&
638         ' absolute rank',MyRank,&
639         ' nele_int_tot',nele_int_tot,' my_ele_inds=',my_ele_inds,&
640                     ' my_ele_inde',my_ele_inde
641       iatel_s=0
642       iatel_e=0
643       ind_eleint=0
644       ind_eleint_old=0
645       do i=nnt,nct-3
646         ijunk=0
647         call int_partition(ind_eleint,my_ele_inds,my_ele_inde,i,&
648           iatel_s,iatel_e,i+ispp,nct-1,ijunk,ielstart(i),ielend(i),*13)
649       enddo ! i 
650    13 continue
651       if (iatel_s.eq.0) iatel_s=1
652       nele_int_tot_vdw=(npept-2)*(npept-2+1)/2
653 !      write (iout,*) "nele_int_tot_vdw",nele_int_tot_vdw
654       call int_bounds(nele_int_tot_vdw,my_ele_inds_vdw,my_ele_inde_vdw)
655 !      write (iout,*) "my_ele_inds_vdw",my_ele_inds_vdw,
656 !     & " my_ele_inde_vdw",my_ele_inde_vdw
657       ind_eleint_vdw=0
658       ind_eleint_vdw_old=0
659       iatel_s_vdw=0
660       iatel_e_vdw=0
661       do i=nnt,nct-3
662         ijunk=0
663         call int_partition(ind_eleint_vdw,my_ele_inds_vdw,&
664           my_ele_inde_vdw,i,&
665           iatel_s_vdw,iatel_e_vdw,i+2,nct-1,ijunk,ielstart_vdw(i),&
666           ielend_vdw(i),*15)
667 !        write (iout,*) i," ielstart_vdw",ielstart_vdw(i),
668 !     &   " ielend_vdw",ielend_vdw(i)
669       enddo ! i 
670       if (iatel_s_vdw.eq.0) iatel_s_vdw=1
671    15 continue
672 #else
673       iatel_s=nnt
674       iatel_e=nct-5 ! ?? wham iatel_e=nct-3
675       do i=iatel_s,iatel_e
676         ielstart(i)=i+4 ! ?? wham +2
677         ielend(i)=nct-1
678       enddo
679       iatel_s_vdw=nnt
680       iatel_e_vdw=nct-3
681       do i=iatel_s_vdw,iatel_e_vdw
682         ielstart_vdw(i)=i+2
683         ielend_vdw(i)=nct-1
684       enddo
685 #endif
686       if (lprint) then
687         write (*,'(a)') 'Processor',fg_rank,' CG group',kolor,&
688         ' absolute rank',MyRank
689         write (iout,*) 'Electrostatic interaction array:'
690         do i=iatel_s,iatel_e
691           write (iout,'(i3,2(2x,2i3))') i,ielstart(i),ielend(i)
692         enddo
693       endif ! lprint
694 !     iscp=3
695       iscp=2
696 ! Partition the SC-p interaction array
697 #ifdef MPI
698       nscp_int_tot=(npept-iscp+1)*(npept-iscp+1)
699       call int_bounds(nscp_int_tot,my_scp_inds,my_scp_inde)
700       if (lprint) write (iout,*) 'Processor',fg_rank,' CG group',kolor,&
701         ' absolute rank',myrank,&
702         ' nscp_int_tot',nscp_int_tot,' my_scp_inds=',my_scp_inds,&
703                     ' my_scp_inde',my_scp_inde
704       iatscp_s=0
705       iatscp_e=0
706       ind_scpint=0
707       ind_scpint_old=0
708       do i=nnt,nct-1
709         if (i.lt.nnt+iscp) then
710 !d        write (iout,*) 'i.le.nnt+iscp'
711           call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i,&
712             iatscp_s,iatscp_e,i+iscp,nct,nscp_gr(i),iscpstart(i,1),&
713             iscpend(i,1),*14)
714         else if (i.gt.nct-iscp) then
715 !d        write (iout,*) 'i.gt.nct-iscp'
716           call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i,&
717             iatscp_s,iatscp_e,nnt,i-iscp,nscp_gr(i),iscpstart(i,1),&
718             iscpend(i,1),*14)
719         else
720           call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i,&
721             iatscp_s,iatscp_e,nnt,i-iscp,nscp_gr(i),iscpstart(i,1),&
722            iscpend(i,1),*14)
723           ii=nscp_gr(i)+1
724           call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i,&
725             iatscp_s,iatscp_e,i+iscp,nct,nscp_gr(i),iscpstart(i,ii),&
726             iscpend(i,ii),*14)
727         endif
728       enddo ! i
729    14 continue
730 #else
731       iatscp_s=nnt
732       iatscp_e=nct-1
733       do i=nnt,nct-1
734         if (i.lt.nnt+iscp) then
735           nscp_gr(i)=1
736           iscpstart(i,1)=i+iscp
737           iscpend(i,1)=nct
738         elseif (i.gt.nct-iscp) then
739           nscp_gr(i)=1
740           iscpstart(i,1)=nnt
741           iscpend(i,1)=i-iscp
742         else
743           nscp_gr(i)=2
744           iscpstart(i,1)=nnt
745           iscpend(i,1)=i-iscp
746           iscpstart(i,2)=i+iscp
747           iscpend(i,2)=nct
748         endif 
749       enddo ! i
750 #endif
751       if (iatscp_s.eq.0) iatscp_s=1
752       if (lprint) then
753         write (iout,'(a)') 'SC-p interaction array:'
754         do i=iatscp_s,iatscp_e
755           write (iout,'(i3,2(2x,2i3))') &
756               i,(iscpstart(i,j),iscpend(i,j),j=1,nscp_gr(i))
757         enddo
758       endif ! lprint
759 ! Partition local interactions
760 #ifdef MPI
761       call int_bounds(nres-2,loc_start,loc_end)
762       loc_start=loc_start+1
763       loc_end=loc_end+1
764       call int_bounds(nres-2,ithet_start,ithet_end)
765       ithet_start=ithet_start+2
766       ithet_end=ithet_end+2
767       call int_bounds(nct-nnt-2,iturn3_start,iturn3_end) 
768       iturn3_start=iturn3_start+nnt
769       iphi_start=iturn3_start+2
770       iturn3_end=iturn3_end+nnt
771       iphi_end=iturn3_end+2
772       iturn3_start=iturn3_start-1
773       iturn3_end=iturn3_end-1
774       call int_bounds(nres-3,itau_start,itau_end)
775       itau_start=itau_start+3
776       itau_end=itau_end+3
777       call int_bounds(nres-3,iphi1_start,iphi1_end)
778       iphi1_start=iphi1_start+3
779       iphi1_end=iphi1_end+3
780       call int_bounds(nct-nnt-3,iturn4_start,iturn4_end) 
781       iturn4_start=iturn4_start+nnt
782       iphid_start=iturn4_start+2
783       iturn4_end=iturn4_end+nnt
784       iphid_end=iturn4_end+2
785       iturn4_start=iturn4_start-1
786       iturn4_end=iturn4_end-1
787       call int_bounds(nres-2,ibond_start,ibond_end) 
788       ibond_start=ibond_start+1
789       ibond_end=ibond_end+1
790       call int_bounds(nct-nnt,ibondp_start,ibondp_end) 
791       ibondp_start=ibondp_start+nnt
792       ibondp_end=ibondp_end+nnt
793       call int_bounds1(nres-1,ivec_start,ivec_end) 
794 !      print *,"Processor",myrank,fg_rank,fg_rank1,
795 !     &  " ivec_start",ivec_start," ivec_end",ivec_end
796       iset_start=loc_start+2
797       iset_end=loc_end+2
798       if (ndih_constr.eq.0) then
799         idihconstr_start=1
800         idihconstr_end=0
801       else
802         call int_bounds(ndih_constr,idihconstr_start,idihconstr_end)
803       endif
804 !      nsumgrad=(nres-nnt)*(nres-nnt+1)/2
805 !      nlen=nres-nnt+1
806       nsumgrad=(nres-nnt)*(nres-nnt+1)/2
807       nlen=nres-nnt+1
808       call int_bounds(nsumgrad,ngrad_start,ngrad_end)
809       igrad_start=((2*nlen+1) &
810          -sqrt(float((2*nlen-1)**2-8*(ngrad_start-1))))/2
811       igrad_end=((2*nlen+1) &
812          -sqrt(float((2*nlen-1)**2-8*(ngrad_end-1))))/2
813 !el      allocate(jgrad_start(igrad_start:igrad_end))
814 !el      allocate(jgrad_end(igrad_start:igrad_end)) !(maxres)
815       jgrad_start(igrad_start)= &
816          ngrad_start-(2*nlen-igrad_start)*(igrad_start-1)/2 &
817          +igrad_start
818       jgrad_end(igrad_start)=nres
819       if (igrad_end.gt.igrad_start) jgrad_start(igrad_end)=igrad_end+1
820       jgrad_end(igrad_end)=ngrad_end-(2*nlen-igrad_end)*(igrad_end-1)/2 &
821           +igrad_end
822       do i=igrad_start+1,igrad_end-1
823         jgrad_start(i)=i+1
824         jgrad_end(i)=nres
825       enddo
826       if (lprint) then 
827         write (*,*) 'Processor:',fg_rank,' CG group',kolor,&
828        ' absolute rank',myrank,&
829        ' loc_start',loc_start,' loc_end',loc_end,&
830        ' ithet_start',ithet_start,' ithet_end',ithet_end,&
831        ' iphi_start',iphi_start,' iphi_end',iphi_end,&
832        ' iphid_start',iphid_start,' iphid_end',iphid_end,&
833        ' ibond_start',ibond_start,' ibond_end',ibond_end,&
834        ' ibondp_start',ibondp_start,' ibondp_end',ibondp_end,&
835        ' iturn3_start',iturn3_start,' iturn3_end',iturn3_end,&
836        ' iturn4_start',iturn4_start,' iturn4_end',iturn4_end,&
837        ' ivec_start',ivec_start,' ivec_end',ivec_end,&
838        ' iset_start',iset_start,' iset_end',iset_end,&
839        ' idihconstr_start',idihconstr_start,' idihconstr_end',&
840          idihconstr_end
841        write (*,*) 'Processor:',fg_rank,myrank,' igrad_start',&
842          igrad_start,' igrad_end',igrad_end,' ngrad_start',ngrad_start,&
843          ' ngrad_end',ngrad_end
844        do i=igrad_start,igrad_end
845          write(*,*) 'Processor:',fg_rank,myrank,i,&
846           jgrad_start(i),jgrad_end(i)
847        enddo
848       endif
849       if (nfgtasks.gt.1) then
850         call MPI_Allgather(ivec_start,1,MPI_INTEGER,ivec_displ(0),1,&
851           MPI_INTEGER,FG_COMM1,IERROR)
852         iaux=ivec_end-ivec_start+1
853         call MPI_Allgather(iaux,1,MPI_INTEGER,ivec_count(0),1,&
854           MPI_INTEGER,FG_COMM1,IERROR)
855         call MPI_Allgather(iset_start-2,1,MPI_INTEGER,iset_displ(0),1,&
856           MPI_INTEGER,FG_COMM,IERROR)
857         iaux=iset_end-iset_start+1
858         call MPI_Allgather(iaux,1,MPI_INTEGER,iset_count(0),1,&
859           MPI_INTEGER,FG_COMM,IERROR)
860         call MPI_Allgather(ibond_start,1,MPI_INTEGER,ibond_displ(0),1,&
861           MPI_INTEGER,FG_COMM,IERROR)
862         iaux=ibond_end-ibond_start+1
863         call MPI_Allgather(iaux,1,MPI_INTEGER,ibond_count(0),1,&
864           MPI_INTEGER,FG_COMM,IERROR)
865         call MPI_Allgather(ithet_start,1,MPI_INTEGER,ithet_displ(0),1,&
866           MPI_INTEGER,FG_COMM,IERROR)
867         iaux=ithet_end-ithet_start+1
868         call MPI_Allgather(iaux,1,MPI_INTEGER,ithet_count(0),1,&
869           MPI_INTEGER,FG_COMM,IERROR)
870         call MPI_Allgather(iphi_start,1,MPI_INTEGER,iphi_displ(0),1,&
871           MPI_INTEGER,FG_COMM,IERROR)
872         iaux=iphi_end-iphi_start+1
873         call MPI_Allgather(iaux,1,MPI_INTEGER,iphi_count(0),1,&
874           MPI_INTEGER,FG_COMM,IERROR)
875         call MPI_Allgather(iphi1_start,1,MPI_INTEGER,iphi1_displ(0),1,&
876           MPI_INTEGER,FG_COMM,IERROR)
877         iaux=iphi1_end-iphi1_start+1
878         call MPI_Allgather(iaux,1,MPI_INTEGER,iphi1_count(0),1,&
879           MPI_INTEGER,FG_COMM,IERROR)
880         do i=0,nfgtasks-1
881           do j=1,nres
882             ielstart_all(j,i)=0
883             ielend_all(j,i)=0
884           enddo
885         enddo
886         call MPI_Allgather(iturn3_start,1,MPI_INTEGER,&
887           iturn3_start_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
888         call MPI_Allgather(iturn4_start,1,MPI_INTEGER,&
889           iturn4_start_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
890         call MPI_Allgather(iturn3_end,1,MPI_INTEGER,&
891           iturn3_end_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
892         call MPI_Allgather(iturn4_end,1,MPI_INTEGER,&
893           iturn4_end_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
894         call MPI_Allgather(iatel_s,1,MPI_INTEGER,&
895           iatel_s_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
896         call MPI_Allgather(iatel_e,1,MPI_INTEGER,&
897           iatel_e_all(0),1,MPI_INTEGER,FG_COMM,IERROR)
898         call MPI_Allgather(ielstart(1),nres,MPI_INTEGER,&
899           ielstart_all(1,0),nres,MPI_INTEGER,FG_COMM,IERROR)
900         call MPI_Allgather(ielend(1),nres,MPI_INTEGER,&
901           ielend_all(1,0),nres,MPI_INTEGER,FG_COMM,IERROR)
902         if (lprint) then
903         write (iout,*) "iatel_s_all",(iatel_s_all(i),i=0,nfgtasks)
904         write (iout,*) "iatel_e_all",(iatel_e_all(i),i=0,nfgtasks)
905         write (iout,*) "iturn3_start_all",&
906           (iturn3_start_all(i),i=0,nfgtasks-1)
907         write (iout,*) "iturn3_end_all",&
908           (iturn3_end_all(i),i=0,nfgtasks-1)
909         write (iout,*) "iturn4_start_all",&
910           (iturn4_start_all(i),i=0,nfgtasks-1)
911         write (iout,*) "iturn4_end_all",&
912           (iturn4_end_all(i),i=0,nfgtasks-1)
913         write (iout,*) "The ielstart_all array"
914         do i=nnt,nct
915           write (iout,'(20i4)') i,(ielstart_all(i,j),j=0,nfgtasks-1)
916         enddo
917         write (iout,*) "The ielend_all array"
918         do i=nnt,nct
919           write (iout,'(20i4)') i,(ielend_all(i,j),j=0,nfgtasks-1)
920         enddo
921         call flush(iout)
922         endif
923         ntask_cont_from=0
924         ntask_cont_to=0
925         itask_cont_from(0)=fg_rank
926         itask_cont_to(0)=fg_rank
927         flag=.false.
928 !el        allocate(iturn3_sent(4,iturn3_start:iturn3_end))
929 !el        allocate(iturn4_sent(4,iturn4_start:iturn4_end)) !(4,maxres)
930         do ii=iturn3_start,iturn3_end
931           call add_int(ii,ii+2,iturn3_sent(1,ii),&
932                       ntask_cont_to,itask_cont_to,flag)
933         enddo
934         do ii=iturn4_start,iturn4_end
935           call add_int(ii,ii+3,iturn4_sent(1,ii),&
936                       ntask_cont_to,itask_cont_to,flag)
937         enddo
938         do ii=iturn3_start,iturn3_end
939           call add_int_from(ii,ii+2,ntask_cont_from,itask_cont_from)
940         enddo
941         do ii=iturn4_start,iturn4_end
942           call add_int_from(ii,ii+3,ntask_cont_from,itask_cont_from)
943         enddo
944         if (lprint) then
945         write (iout,*) "After turn3 ntask_cont_from",ntask_cont_from,&
946          " ntask_cont_to",ntask_cont_to
947         write (iout,*) "itask_cont_from",&
948           (itask_cont_from(i),i=1,ntask_cont_from)
949         write (iout,*) "itask_cont_to",&
950           (itask_cont_to(i),i=1,ntask_cont_to)
951         call flush(iout)
952         endif
953 !        write (iout,*) "Loop forward"
954 !        call flush(iout)
955         do i=iatel_s,iatel_e
956 !          write (iout,*) "from loop i=",i
957 !          call flush(iout)
958           do j=ielstart(i),ielend(i)
959             call add_int_from(i,j,ntask_cont_from,itask_cont_from)
960           enddo
961         enddo
962 !        write (iout,*) "Loop backward iatel_e-1",iatel_e-1,
963 !     &     " iatel_e",iatel_e
964 !        call flush(iout)
965         nat_sent=0
966         do i=iatel_s,iatel_e
967 !          write (iout,*) "i",i," ielstart",ielstart(i),
968 !     &      " ielend",ielend(i)
969 !          call flush(iout)
970           flag=.false.
971           do j=ielstart(i),ielend(i)
972             call add_int(i,j,iint_sent(1,j,nat_sent+1),ntask_cont_to,&
973                         itask_cont_to,flag)
974           enddo
975           if (flag) then
976             nat_sent=nat_sent+1
977             iat_sent(nat_sent)=i
978           endif
979         enddo
980         if (lprint) then
981         write (iout,*)"After longrange ntask_cont_from",ntask_cont_from,&
982          " ntask_cont_to",ntask_cont_to
983         write (iout,*) "itask_cont_from",&
984           (itask_cont_from(i),i=1,ntask_cont_from)
985         write (iout,*) "itask_cont_to",&
986           (itask_cont_to(i),i=1,ntask_cont_to)
987         call flush(iout)
988         write (iout,*) "iint_sent"
989         do i=1,nat_sent
990           ii=iat_sent(i)
991           write (iout,'(20i4)') ii,(j,(iint_sent(k,j,i),k=1,4),&
992             j=ielstart(ii),ielend(ii))
993         enddo
994         write (iout,*) "iturn3_sent iturn3_start",iturn3_start,&
995           " iturn3_end",iturn3_end
996         write (iout,'(20i4)') (i,(iturn3_sent(j,i),j=1,4),&
997            i=iturn3_start,iturn3_end)
998         write (iout,*) "iturn4_sent iturn4_start",iturn4_start,&
999           " iturn4_end",iturn4_end
1000         write (iout,'(20i4)') (i,(iturn4_sent(j,i),j=1,4),&
1001            i=iturn4_start,iturn4_end)
1002         call flush(iout)
1003         endif
1004         call MPI_Gather(ntask_cont_from,1,MPI_INTEGER,&
1005          ntask_cont_from_all,1,MPI_INTEGER,king,FG_COMM,IERR)
1006 !        write (iout,*) "Gather ntask_cont_from ended"
1007 !        call flush(iout)
1008         call MPI_Gather(itask_cont_from(0),nfgtasks,MPI_INTEGER,&
1009          itask_cont_from_all(0,0),nfgtasks,MPI_INTEGER,king,&
1010          FG_COMM,IERR)
1011 !        write (iout,*) "Gather itask_cont_from ended"
1012 !        call flush(iout)
1013         call MPI_Gather(ntask_cont_to,1,MPI_INTEGER,ntask_cont_to_all,&
1014          1,MPI_INTEGER,king,FG_COMM,IERR)
1015 !        write (iout,*) "Gather ntask_cont_to ended"
1016 !        call flush(iout)
1017         call MPI_Gather(itask_cont_to,nfgtasks,MPI_INTEGER,&
1018          itask_cont_to_all,nfgtasks,MPI_INTEGER,king,FG_COMM,IERR)
1019 !        write (iout,*) "Gather itask_cont_to ended"
1020 !        call flush(iout)
1021         if (fg_rank.eq.king) then
1022           write (iout,*)"Contact receive task map (proc, #tasks, tasks)"
1023           do i=0,nfgtasks-1
1024             write (iout,'(20i4)') i,ntask_cont_from_all(i),&
1025               (itask_cont_from_all(j,i),j=1,ntask_cont_from_all(i)) 
1026           enddo
1027           write (iout,*)
1028           call flush(iout)
1029           write (iout,*) "Contact send task map (proc, #tasks, tasks)"
1030           do i=0,nfgtasks-1
1031             write (iout,'(20i4)') i,ntask_cont_to_all(i),&
1032              (itask_cont_to_all(j,i),j=1,ntask_cont_to_all(i)) 
1033           enddo
1034           write (iout,*)
1035           call flush(iout)
1036 ! Check if every send will have a matching receive
1037           ncheck_to=0
1038           ncheck_from=0
1039           do i=0,nfgtasks-1
1040             ncheck_to=ncheck_to+ntask_cont_to_all(i)
1041             ncheck_from=ncheck_from+ntask_cont_from_all(i)
1042           enddo
1043           write (iout,*) "Control sums",ncheck_from,ncheck_to
1044           if (ncheck_from.ne.ncheck_to) then
1045             write (iout,*) "Error: #receive differs from #send."
1046             write (iout,*) "Terminating program...!"
1047             call flush(iout)
1048             flag=.false.
1049           else
1050             flag=.true.
1051             do i=0,nfgtasks-1
1052               do j=1,ntask_cont_to_all(i)
1053                 ii=itask_cont_to_all(j,i)
1054                 do k=1,ntask_cont_from_all(ii)
1055                   if (itask_cont_from_all(k,ii).eq.i) then
1056                     if(lprint)write(iout,*)"Matching send/receive",i,ii
1057                     exit
1058                   endif
1059                 enddo
1060                 if (k.eq.ntask_cont_from_all(ii)+1) then
1061                   flag=.false.
1062                   write (iout,*) "Error: send by",j," to",ii,&
1063                     " would have no matching receive"
1064                 endif
1065               enddo
1066             enddo
1067           endif
1068           if (.not.flag) then
1069             write (iout,*) "Unmatched sends; terminating program"
1070             call flush(iout)
1071           endif
1072         endif
1073         call MPI_Bcast(flag,1,MPI_LOGICAL,king,FG_COMM,IERROR)
1074 !        write (iout,*) "flag broadcast ended flag=",flag
1075 !        call flush(iout)
1076         if (.not.flag) then
1077           call MPI_Finalize(IERROR)
1078           stop "Error in INIT_INT_TABLE: unmatched send/receive."
1079         endif
1080         call MPI_Comm_group(FG_COMM,fg_group,IERR)
1081 !        write (iout,*) "MPI_Comm_group ended"
1082 !        call flush(iout)
1083         call MPI_Group_incl(fg_group,ntask_cont_from+1,&
1084           itask_cont_from(0),CONT_FROM_GROUP,IERR)
1085         call MPI_Group_incl(fg_group,ntask_cont_to+1,itask_cont_to(0),&
1086           CONT_TO_GROUP,IERR)
1087         do i=1,nat_sent
1088           ii=iat_sent(i)
1089           iaux=4*(ielend(ii)-ielstart(ii)+1)
1090           call MPI_Group_translate_ranks(fg_group,iaux,&
1091             iint_sent(1,ielstart(ii),i),CONT_TO_GROUP,&
1092             iint_sent_local(1,ielstart(ii),i),IERR )
1093 !          write (iout,*) "Ranks translated i=",i
1094 !          call flush(iout)
1095         enddo
1096         iaux=4*(iturn3_end-iturn3_start+1)
1097         call MPI_Group_translate_ranks(fg_group,iaux,&
1098            iturn3_sent(1,iturn3_start),CONT_TO_GROUP,&
1099            iturn3_sent_local(1,iturn3_start),IERR)
1100         iaux=4*(iturn4_end-iturn4_start+1)
1101         call MPI_Group_translate_ranks(fg_group,iaux,&
1102            iturn4_sent(1,iturn4_start),CONT_TO_GROUP,&
1103            iturn4_sent_local(1,iturn4_start),IERR)
1104         if (lprint) then
1105         write (iout,*) "iint_sent_local"
1106         do i=1,nat_sent
1107           ii=iat_sent(i)
1108           write (iout,'(20i4)') ii,(j,(iint_sent_local(k,j,i),k=1,4),&
1109             j=ielstart(ii),ielend(ii))
1110           call flush(iout)
1111         enddo
1112         write (iout,*) "iturn3_sent_local iturn3_start",iturn3_start,&
1113           " iturn3_end",iturn3_end
1114         write (iout,'(20i4)') (i,(iturn3_sent_local(j,i),j=1,4),&
1115            i=iturn3_start,iturn3_end)
1116         write (iout,*) "iturn4_sent_local iturn4_start",iturn4_start,&
1117           " iturn4_end",iturn4_end
1118         write (iout,'(20i4)') (i,(iturn4_sent_local(j,i),j=1,4),&
1119            i=iturn4_start,iturn4_end)
1120         call flush(iout)
1121         endif
1122         call MPI_Group_free(fg_group,ierr)
1123         call MPI_Group_free(cont_from_group,ierr)
1124         call MPI_Group_free(cont_to_group,ierr)
1125         call MPI_Type_contiguous(3,MPI_DOUBLE_PRECISION,MPI_UYZ,IERROR)
1126         call MPI_Type_commit(MPI_UYZ,IERROR)
1127         call MPI_Type_contiguous(18,MPI_DOUBLE_PRECISION,MPI_UYZGRAD,&
1128           IERROR)
1129         call MPI_Type_commit(MPI_UYZGRAD,IERROR)
1130         call MPI_Type_contiguous(2,MPI_DOUBLE_PRECISION,MPI_MU,IERROR)
1131         call MPI_Type_commit(MPI_MU,IERROR)
1132         call MPI_Type_contiguous(4,MPI_DOUBLE_PRECISION,MPI_MAT1,IERROR)
1133         call MPI_Type_commit(MPI_MAT1,IERROR)
1134         call MPI_Type_contiguous(8,MPI_DOUBLE_PRECISION,MPI_MAT2,IERROR)
1135         call MPI_Type_commit(MPI_MAT2,IERROR)
1136         call MPI_Type_contiguous(6,MPI_DOUBLE_PRECISION,MPI_THET,IERROR)
1137         call MPI_Type_commit(MPI_THET,IERROR)
1138         call MPI_Type_contiguous(9,MPI_DOUBLE_PRECISION,MPI_GAM,IERROR)
1139         call MPI_Type_commit(MPI_GAM,IERROR)
1140
1141 !el        allocate(lentyp(0:nfgtasks-1))
1142 #ifndef MATGATHER
1143 ! 9/22/08 Derived types to send matrices which appear in correlation terms
1144         do i=0,nfgtasks-1
1145           if (ivec_count(i).eq.ivec_count(0)) then
1146             lentyp(i)=0
1147           else
1148             lentyp(i)=1
1149           endif
1150         enddo
1151         do ind_typ=lentyp(0),lentyp(nfgtasks-1)
1152         if (ind_typ.eq.0) then
1153           ichunk=ivec_count(0)
1154         else
1155           ichunk=ivec_count(1)
1156         endif
1157 !        do i=1,4
1158 !          blocklengths(i)=4
1159 !        enddo
1160 !        displs(1)=0
1161 !        do i=2,4
1162 !          displs(i)=displs(i-1)+blocklengths(i-1)*maxres
1163 !        enddo
1164 !        do i=1,4
1165 !          blocklengths(i)=blocklengths(i)*ichunk
1166 !        enddo
1167 !        write (iout,*) "blocklengths and displs"
1168 !        do i=1,4
1169 !          write (iout,*) i,blocklengths(i),displs(i)
1170 !        enddo
1171 !        call flush(iout)
1172 !        call MPI_Type_indexed(4,blocklengths(1),displs(1),
1173 !     &    MPI_DOUBLE_PRECISION,MPI_ROTAT1(ind_typ),IERROR)
1174 !        call MPI_Type_commit(MPI_ROTAT1(ind_typ),IERROR)
1175 !        write (iout,*) "MPI_ROTAT1",MPI_ROTAT1 
1176 !        do i=1,4
1177 !          blocklengths(i)=2
1178 !        enddo
1179 !        displs(1)=0
1180 !        do i=2,4
1181 !          displs(i)=displs(i-1)+blocklengths(i-1)*maxres
1182 !        enddo
1183 !        do i=1,4
1184 !          blocklengths(i)=blocklengths(i)*ichunk
1185 !        enddo
1186 !        write (iout,*) "blocklengths and displs"
1187 !        do i=1,4
1188 !          write (iout,*) i,blocklengths(i),displs(i)
1189 !        enddo
1190 !        call flush(iout)
1191 !        call MPI_Type_indexed(4,blocklengths(1),displs(1),
1192 !     &    MPI_DOUBLE_PRECISION,MPI_ROTAT2(ind_typ),IERROR)
1193 !        call MPI_Type_commit(MPI_ROTAT2(ind_typ),IERROR)
1194 !        write (iout,*) "MPI_ROTAT2",MPI_ROTAT2 
1195         do i=1,8
1196           blocklengths(i)=2
1197         enddo
1198         displs(1)=0
1199         do i=2,8
1200           displs(i)=displs(i-1)+blocklengths(i-1)*nres !maxres
1201         enddo
1202         do i=1,15
1203           blocklengths(i)=blocklengths(i)*ichunk
1204         enddo
1205         call MPI_Type_indexed(8,blocklengths,displs,&
1206           MPI_DOUBLE_PRECISION,MPI_PRECOMP11(ind_typ),IERROR)
1207         call MPI_Type_commit(MPI_PRECOMP11(ind_typ),IERROR)
1208         do i=1,8
1209           blocklengths(i)=4
1210         enddo
1211         displs(1)=0
1212         do i=2,8
1213           displs(i)=displs(i-1)+blocklengths(i-1)*nres !maxres
1214         enddo
1215         do i=1,15
1216           blocklengths(i)=blocklengths(i)*ichunk
1217         enddo
1218         call MPI_Type_indexed(8,blocklengths,displs,&
1219           MPI_DOUBLE_PRECISION,MPI_PRECOMP12(ind_typ),IERROR)
1220         call MPI_Type_commit(MPI_PRECOMP12(ind_typ),IERROR)
1221         do i=1,6
1222           blocklengths(i)=4
1223         enddo
1224         displs(1)=0
1225         do i=2,6
1226           displs(i)=displs(i-1)+blocklengths(i-1)*nres !maxres
1227         enddo
1228         do i=1,6
1229           blocklengths(i)=blocklengths(i)*ichunk
1230         enddo
1231         call MPI_Type_indexed(6,blocklengths,displs,&
1232           MPI_DOUBLE_PRECISION,MPI_PRECOMP22(ind_typ),IERROR)
1233         call MPI_Type_commit(MPI_PRECOMP22(ind_typ),IERROR)
1234         do i=1,2
1235           blocklengths(i)=8
1236         enddo
1237         displs(1)=0
1238         do i=2,2
1239           displs(i)=displs(i-1)+blocklengths(i-1)*nres !maxres
1240         enddo
1241         do i=1,2
1242           blocklengths(i)=blocklengths(i)*ichunk
1243         enddo
1244         call MPI_Type_indexed(2,blocklengths,displs,&
1245           MPI_DOUBLE_PRECISION,MPI_PRECOMP23(ind_typ),IERROR)
1246         call MPI_Type_commit(MPI_PRECOMP23(ind_typ),IERROR)
1247         do i=1,4
1248           blocklengths(i)=1
1249         enddo
1250         displs(1)=0
1251         do i=2,4
1252           displs(i)=displs(i-1)+blocklengths(i-1)*nres !maxres
1253         enddo
1254         do i=1,4
1255           blocklengths(i)=blocklengths(i)*ichunk
1256         enddo
1257         call MPI_Type_indexed(4,blocklengths,displs,&
1258           MPI_DOUBLE_PRECISION,MPI_ROTAT_OLD(ind_typ),IERROR)
1259         call MPI_Type_commit(MPI_ROTAT_OLD(ind_typ),IERROR)
1260         enddo
1261 #endif
1262       endif
1263       iint_start=ivec_start+1
1264       iint_end=ivec_end+1
1265       do i=0,nfgtasks-1
1266           iint_count(i)=ivec_count(i)
1267           iint_displ(i)=ivec_displ(i)
1268           ivec_displ(i)=ivec_displ(i)-1
1269           iset_displ(i)=iset_displ(i)-1
1270           ithet_displ(i)=ithet_displ(i)-1
1271           iphi_displ(i)=iphi_displ(i)-1
1272           iphi1_displ(i)=iphi1_displ(i)-1
1273           ibond_displ(i)=ibond_displ(i)-1
1274       enddo
1275       if (nfgtasks.gt.1 .and. fg_rank.eq.king &
1276           .and. (me.eq.0 .or. .not. out1file)) then
1277         write (iout,*) "IVEC_DISPL, IVEC_COUNT, ISET_START, ISET_COUNT"
1278         do i=0,nfgtasks-1
1279           write (iout,*) i,ivec_displ(i),ivec_count(i),iset_displ(i),&
1280             iset_count(i)
1281         enddo
1282         write (iout,*) "iphi_start",iphi_start," iphi_end",iphi_end,&
1283           " iphi1_start",iphi1_start," iphi1_end",iphi1_end
1284         write (iout,*)"IPHI_COUNT, IPHI_DISPL, IPHI1_COUNT, IPHI1_DISPL"
1285         do i=0,nfgtasks-1
1286           write (iout,*) i,iphi_count(i),iphi_displ(i),iphi1_count(i),&
1287             iphi1_displ(i)
1288         enddo
1289         write(iout,'(i10,a,i10,a,i10,a/a,i3,a)') n_sc_int_tot,' SC-SC ',&
1290           nele_int_tot,' electrostatic and ',nscp_int_tot,&
1291           ' SC-p interactions','were distributed among',nfgtasks,&
1292           ' fine-grain processors.'
1293       endif
1294 #else
1295       loc_start=2
1296       loc_end=nres-1
1297       ithet_start=3 
1298       ithet_end=nres
1299       iturn3_start=nnt
1300       iturn3_end=nct-3
1301       iturn4_start=nnt
1302       iturn4_end=nct-4
1303       iphi_start=nnt+3
1304       iphi_end=nct
1305       iphi1_start=4
1306       iphi1_end=nres
1307       idihconstr_start=1
1308       idihconstr_end=ndih_constr
1309       iphid_start=iphi_start
1310       iphid_end=iphi_end-1
1311       itau_start=4
1312       itau_end=nres
1313       ibond_start=2
1314       ibond_end=nres-1
1315       ibondp_start=nnt
1316       ibondp_end=nct-1
1317       ivec_start=1
1318       ivec_end=nres-1
1319       iset_start=3
1320       iset_end=nres+1
1321       iint_start=2
1322       iint_end=nres-1
1323 #endif
1324 !el       common /przechowalnia/
1325 !      deallocate(iturn3_start_all)
1326 !      deallocate(iturn3_end_all)
1327 !      deallocate(iturn4_start_all)
1328 !      deallocate(iturn4_end_all)
1329 !      deallocate(iatel_s_all)
1330 !      deallocate(iatel_e_all)
1331 !      deallocate(ielstart_all)
1332 !      deallocate(ielend_all)
1333
1334 !      deallocate(ntask_cont_from_all)
1335 !      deallocate(ntask_cont_to_all)
1336 !      deallocate(itask_cont_from_all)
1337 !      deallocate(itask_cont_to_all)
1338 !el----------
1339       return
1340       end subroutine init_int_table
1341 #ifdef MPI
1342 !-----------------------------------------------------------------------------
1343       subroutine add_int(ii,jj,itask,ntask_cont_to,itask_cont_to,flag)
1344
1345 !el      implicit none
1346 !      include "DIMENSIONS"
1347 !      include "COMMON.INTERACT"
1348 !      include "COMMON.SETUP"
1349 !      include "COMMON.IOUNITS"
1350       integer :: ii,jj,ntask_cont_to
1351       integer,dimension(4) :: itask
1352       integer :: itask_cont_to(0:nfgtasks-1)    !(0:max_fg_procs-1)
1353       logical :: flag
1354 !el      integer,dimension(0:nfgtasks) :: iturn3_start_all,iturn3_end_all,iturn4_start_all,&
1355 !el       iturn4_end_all,iatel_s_all,iatel_e_all        !(0:max_fg_procs)
1356 !el      integer,dimension(nres,0:nfgtasks-1) :: ielstart_all,ielend_all        !(maxres,0:max_fg_procs-1)
1357 !el      common /przechowalnia/ iturn3_start_all,iturn3_end_all,iturn4_start_all,&
1358 !el       iturn4_end_all,iatel_s_all,iatel_e_all,ielstart_all,ielend_all
1359       integer :: iproc,isent,k,l
1360 ! Determines whether to send interaction ii,jj to other processors; a given
1361 ! interaction can be sent to at most 2 processors.
1362 ! Sets flag=.true. if interaction ii,jj needs to be sent to at least 
1363 ! one processor, otherwise flag is unchanged from the input value.
1364       isent=0
1365       itask(1)=fg_rank
1366       itask(2)=fg_rank
1367       itask(3)=fg_rank
1368       itask(4)=fg_rank
1369 !      write (iout,*) "ii",ii," jj",jj
1370 ! Loop over processors to check if anybody could need interaction ii,jj
1371       do iproc=0,fg_rank-1
1372 ! Check if the interaction matches any turn3 at iproc
1373         do k=iturn3_start_all(iproc),iturn3_end_all(iproc)
1374           l=k+2
1375           if (k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1 &
1376          .or. k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1 .and. l.eq.jj-1) &
1377           then 
1378 !            write (iout,*) "turn3 to iproc",iproc," ij",ii,jj,"kl",k,l
1379 !            call flush(iout)
1380             flag=.true.
1381             if (iproc.ne.itask(1).and.iproc.ne.itask(2) &
1382               .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then
1383               isent=isent+1
1384               itask(isent)=iproc
1385               call add_task(iproc,ntask_cont_to,itask_cont_to)
1386             endif
1387           endif
1388         enddo
1389 ! Check if the interaction matches any turn4 at iproc
1390         do k=iturn4_start_all(iproc),iturn4_end_all(iproc)
1391           l=k+3
1392           if (k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1 &
1393          .or. k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1 .and. l.eq.jj-1) &
1394           then 
1395 !            write (iout,*) "turn3 to iproc",iproc," ij",ii,jj," kl",k,l
1396 !            call flush(iout)
1397             flag=.true.
1398             if (iproc.ne.itask(1).and.iproc.ne.itask(2) &
1399               .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then
1400               isent=isent+1
1401               itask(isent)=iproc
1402               call add_task(iproc,ntask_cont_to,itask_cont_to)
1403             endif
1404           endif
1405         enddo
1406         if (iatel_s_all(iproc).gt.0 .and. iatel_e_all(iproc).gt.0 .and. &
1407         iatel_s_all(iproc).le.ii-1 .and. iatel_e_all(iproc).ge.ii-1)then
1408           if (ielstart_all(ii-1,iproc).le.jj-1.and. &
1409               ielend_all(ii-1,iproc).ge.jj-1) then
1410             flag=.true.
1411             if (iproc.ne.itask(1).and.iproc.ne.itask(2) &
1412               .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then
1413               isent=isent+1
1414               itask(isent)=iproc
1415               call add_task(iproc,ntask_cont_to,itask_cont_to)
1416             endif
1417           endif
1418           if (ielstart_all(ii-1,iproc).le.jj+1.and. &
1419               ielend_all(ii-1,iproc).ge.jj+1) then
1420             flag=.true.
1421             if (iproc.ne.itask(1).and.iproc.ne.itask(2) &
1422               .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then
1423               isent=isent+1
1424               itask(isent)=iproc
1425               call add_task(iproc,ntask_cont_to,itask_cont_to)
1426             endif
1427           endif
1428         endif
1429       enddo
1430       return
1431       end subroutine add_int
1432 !-----------------------------------------------------------------------------
1433       subroutine add_int_from(ii,jj,ntask_cont_from,itask_cont_from)
1434
1435 !el      use MPI_data
1436 !el      implicit none
1437 !      include "DIMENSIONS"
1438 !      include "COMMON.INTERACT"
1439 !      include "COMMON.SETUP"
1440 !      include "COMMON.IOUNITS"
1441       integer :: ii,jj,itask(2),ntask_cont_from,&
1442        itask_cont_from(0:nfgtasks-1)    !(0:max_fg_procs)
1443       logical :: flag
1444 !el      integer,dimension(0:nfgtasks) :: iturn3_start_all,iturn3_end_all,&
1445 !el       iturn4_start_all,iturn4_end_all,iatel_s_all,iatel_e_all       !(0:max_fg_procs)
1446 !el      integer,dimension(nres,0:nfgtasks-1) :: ielstart_all,ielend_all        !(maxres,0:max_fg_procs-1)
1447 !el      common /przechowalnia/ iturn3_start_all,iturn3_end_all,iturn4_start_all,&
1448 !el       iturn4_end_all,iatel_s_all,iatel_e_all,ielstart_all,ielend_all
1449       integer :: iproc,k,l
1450       do iproc=fg_rank+1,nfgtasks-1
1451         do k=iturn3_start_all(iproc),iturn3_end_all(iproc)
1452           l=k+2
1453           if (k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1.and.l.eq.jj-1 &
1454          .or. k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1) &
1455           then
1456 !            write (iout,*)"turn3 from iproc",iproc," ij",ii,jj," kl",k,l
1457             call add_task(iproc,ntask_cont_from,itask_cont_from)
1458           endif
1459         enddo 
1460         do k=iturn4_start_all(iproc),iturn4_end_all(iproc)
1461           l=k+3
1462           if (k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1.and.l.eq.jj-1 &
1463          .or. k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1) &
1464           then
1465 !            write (iout,*)"turn4 from iproc",iproc," ij",ii,jj," kl",k,l
1466             call add_task(iproc,ntask_cont_from,itask_cont_from)
1467           endif
1468         enddo 
1469         if (iatel_s_all(iproc).gt.0 .and. iatel_e_all(iproc).gt.0) then
1470           if (ii+1.ge.iatel_s_all(iproc).and.ii+1.le.iatel_e_all(iproc)) &
1471           then
1472             if (jj+1.ge.ielstart_all(ii+1,iproc).and. &
1473                 jj+1.le.ielend_all(ii+1,iproc)) then
1474               call add_task(iproc,ntask_cont_from,itask_cont_from)
1475             endif            
1476             if (jj-1.ge.ielstart_all(ii+1,iproc).and. &
1477                 jj-1.le.ielend_all(ii+1,iproc)) then
1478               call add_task(iproc,ntask_cont_from,itask_cont_from)
1479             endif
1480           endif
1481           if (ii-1.ge.iatel_s_all(iproc).and.ii-1.le.iatel_e_all(iproc)) &
1482           then
1483             if (jj-1.ge.ielstart_all(ii-1,iproc).and. &
1484                 jj-1.le.ielend_all(ii-1,iproc)) then
1485               call add_task(iproc,ntask_cont_from,itask_cont_from)
1486             endif
1487             if (jj+1.ge.ielstart_all(ii-1,iproc).and. &
1488                 jj+1.le.ielend_all(ii-1,iproc)) then
1489                call add_task(iproc,ntask_cont_from,itask_cont_from)
1490             endif
1491           endif
1492         endif
1493       enddo
1494       return
1495       end subroutine add_int_from
1496 !-----------------------------------------------------------------------------
1497       subroutine add_task(iproc,ntask_cont,itask_cont)
1498
1499 !el      use MPI_data
1500 !el      implicit none
1501 !      include "DIMENSIONS"
1502       integer :: iproc,ntask_cont,itask_cont(0:nfgtasks-1)      !(0:max_fg_procs-1)
1503       integer :: ii
1504       do ii=1,ntask_cont
1505         if (itask_cont(ii).eq.iproc) return
1506       enddo
1507       ntask_cont=ntask_cont+1
1508       itask_cont(ntask_cont)=iproc
1509       return
1510       end subroutine add_task
1511 #endif
1512 !-----------------------------------------------------------------------------
1513 #if defined MPI || defined WHAM_RUN
1514       subroutine int_partition(int_index,lower_index,upper_index,atom,&
1515        at_start,at_end,first_atom,last_atom,int_gr,jat_start,jat_end,*)
1516
1517 !      implicit real*8 (a-h,o-z)
1518 !      include 'DIMENSIONS'
1519 !      include 'COMMON.IOUNITS'
1520       integer :: int_index,lower_index,upper_index,atom,at_start,at_end,&
1521        first_atom,last_atom,int_gr,jat_start,jat_end,int_index_old
1522       logical :: lprn
1523       lprn=.false.
1524       if (lprn) write (iout,*) 'int_index=',int_index
1525       int_index_old=int_index
1526       int_index=int_index+last_atom-first_atom+1
1527       if (lprn) &
1528          write (iout,*) 'int_index=',int_index,&
1529                      ' int_index_old',int_index_old,&
1530                      ' lower_index=',lower_index,&
1531                      ' upper_index=',upper_index,&
1532                      ' atom=',atom,' first_atom=',first_atom,&
1533                      ' last_atom=',last_atom
1534       if (int_index.ge.lower_index) then
1535         int_gr=int_gr+1
1536         if (at_start.eq.0) then
1537           at_start=atom
1538           jat_start=first_atom-1+lower_index-int_index_old
1539         else
1540           jat_start=first_atom
1541         endif
1542         if (lprn) write (iout,*) 'jat_start',jat_start
1543         if (int_index.ge.upper_index) then
1544           at_end=atom
1545           jat_end=first_atom-1+upper_index-int_index_old
1546           return 1
1547         else
1548           jat_end=last_atom
1549         endif
1550         if (lprn) write (iout,*) 'jat_end',jat_end
1551       endif
1552       return
1553       end subroutine int_partition
1554 #endif
1555 !-----------------------------------------------------------------------------
1556 #ifndef CLUSTER
1557       subroutine hpb_partition
1558
1559 !      implicit real*8 (a-h,o-z)
1560 !      include 'DIMENSIONS'
1561 #ifdef MPI
1562       include 'mpif.h'
1563 #endif
1564 !      include 'COMMON.SBRIDGE'
1565 !      include 'COMMON.IOUNITS'
1566 !      include 'COMMON.SETUP'
1567 #ifdef MPI
1568       call int_bounds(nhpb,link_start,link_end)
1569       write (iout,*) 'Processor',fg_rank,' CG group',kolor,&
1570         ' absolute rank',MyRank,&
1571         ' nhpb',nhpb,' link_start=',link_start,&
1572         ' link_end',link_end
1573 #else
1574       link_start=1
1575       link_end=nhpb
1576 #endif
1577       return
1578       end subroutine hpb_partition
1579 #endif
1580 !-----------------------------------------------------------------------------
1581 ! misc.f in module io_base
1582 !-----------------------------------------------------------------------------
1583 !-----------------------------------------------------------------------------
1584 ! parmread.F
1585 !-----------------------------------------------------------------------------
1586       subroutine getenv_loc(var, val)
1587
1588       character(*) :: var, val
1589
1590 #ifdef WINIFL
1591       character(len=2000) :: line
1592 !el      external ilen
1593
1594       open (196,file='env',status='old',readonly,shared)
1595       iread=0
1596 !      write(*,*)'looking for ',var
1597 10    read(196,*,err=11,end=11)line
1598       iread=index(line,var)
1599 !      write(*,*)iread,' ',var,' ',line
1600       if (iread.eq.0) go to 10 
1601 !      write(*,*)'---> ',line
1602 11    continue
1603       if(iread.eq.0) then
1604 !       write(*,*)'CHUJ'
1605        val=''
1606       else
1607        iread=iread+ilen(var)+1
1608        read (line(iread:),*,err=12,end=12) val
1609 !       write(*,*)'OK: ',var,' = ',val
1610       endif
1611       close(196)
1612       return
1613 12    val=''
1614       close(196)
1615 #elif (defined CRAY)
1616       integer :: lennam,lenval,ierror
1617 !
1618 !        getenv using a POSIX call, useful on the T3D
1619 !        Sept 1996, comment out error check on advice of H. Pritchard
1620 !
1621       lennam = len(var)
1622       if(lennam.le.0) stop '--error calling getenv--'
1623       call pxfgetenv(var,lennam,val,lenval,ierror)
1624 !-HP- if(ierror.ne.0) stop '--error returned by pxfgetenv--'
1625 #else
1626       call getenv(var,val)
1627 #endif
1628
1629       return
1630       end subroutine getenv_loc
1631 !-----------------------------------------------------------------------------
1632 ! readrtns_CSA.F
1633 !-----------------------------------------------------------------------------
1634       subroutine setup_var
1635
1636       integer :: i
1637 !      implicit real*8 (a-h,o-z)
1638 !      include 'DIMENSIONS'
1639 !      include 'COMMON.IOUNITS'
1640 !      include 'COMMON.GEO'
1641 !      include 'COMMON.VAR'
1642 !      include 'COMMON.INTERACT'
1643 !      include 'COMMON.LOCAL'
1644 !      include 'COMMON.NAMES'
1645 !      include 'COMMON.CHAIN'
1646 !      include 'COMMON.FFIELD'
1647 !      include 'COMMON.SBRIDGE'
1648 !      include 'COMMON.HEADER'
1649 !      include 'COMMON.CONTROL'
1650 !      include 'COMMON.DBASE'
1651 !      include 'COMMON.THREAD'
1652 !      include 'COMMON.TIME1'
1653 ! Set up variable list.
1654       ntheta=nres-2
1655       nphi=nres-3
1656       nvar=ntheta+nphi
1657       nside=0
1658       do i=2,nres-1
1659 #ifdef WHAM_RUN
1660         if (itype(i).ne.10) then
1661 #else
1662         if (itype(i).ne.10 .and. itype(i).ne.ntyp1) then
1663 #endif
1664           nside=nside+1
1665           ialph(i,1)=nvar+nside
1666           ialph(nside,2)=i
1667         endif
1668       enddo
1669       if (indphi.gt.0) then
1670         nvar=nphi
1671       else if (indback.gt.0) then
1672         nvar=nphi+ntheta
1673       else
1674         nvar=nvar+2*nside
1675       endif
1676 !d    write (iout,'(3i4)') (i,ialph(i,1),ialph(i,2),i=2,nres-1)
1677       return
1678       end subroutine setup_var
1679 !-----------------------------------------------------------------------------
1680 ! rescode.f
1681 !-----------------------------------------------------------------------------
1682       integer function rescode(iseq,nam,itype)
1683
1684       use io_base, only: ucase
1685 !      implicit real*8 (a-h,o-z)
1686 !      include 'DIMENSIONS'
1687 !      include 'COMMON.NAMES'
1688 !      include 'COMMON.IOUNITS'
1689       character(len=3) :: nam   !,ucase
1690       integer :: iseq,itype,i
1691
1692       if (itype.eq.0) then
1693
1694       do i=-ntyp1,ntyp1
1695         if (ucase(nam).eq.restyp(i)) then
1696           rescode=i
1697           return
1698         endif
1699       enddo
1700
1701       else
1702
1703       do i=-ntyp1,ntyp1
1704         if (nam(1:1).eq.onelet(i)) then
1705           rescode=i
1706           return  
1707         endif  
1708       enddo
1709
1710       endif
1711       write (iout,10) iseq,nam
1712       stop
1713    10 format ('**** Error - residue',i4,' has an unresolved name ',a3)
1714       end function rescode
1715 !-----------------------------------------------------------------------------
1716 ! timing.F
1717 !-----------------------------------------------------------------------------
1718 ! $Date: 1994/10/05 16:41:52 $
1719 ! $Revision: 2.2 $
1720 !
1721       subroutine set_timers
1722 !
1723 !el      implicit none
1724 !el      real(kind=8) :: tcpu
1725 !      include 'COMMON.TIME1'
1726 !#ifdef MP
1727 #ifdef MPI
1728       include 'mpif.h'
1729 #endif
1730 ! Diminish the assigned time limit a little so that there is some time to
1731 ! end a batch job
1732 !     timlim=batime-150.0
1733 ! Calculate the initial time, if it is not zero (e.g. for the SUN).
1734       stime=tcpu()
1735 #if .not. defined(WHAM_RUN) && .not. defined(CLUSTER)
1736 #ifdef MPI
1737       walltime=MPI_WTIME()
1738       time_reduce=0.0d0
1739       time_allreduce=0.0d0
1740       time_bcast=0.0d0
1741       time_gather=0.0d0
1742       time_sendrecv=0.0d0
1743       time_scatter=0.0d0
1744       time_scatter_fmat=0.0d0
1745       time_scatter_ginv=0.0d0
1746       time_scatter_fmatmult=0.0d0
1747       time_scatter_ginvmult=0.0d0
1748       time_barrier_e=0.0d0
1749       time_barrier_g=0.0d0
1750       time_enecalc=0.0d0
1751       time_sumene=0.0d0
1752       time_lagrangian=0.0d0
1753       time_sumgradient=0.0d0
1754       time_intcartderiv=0.0d0
1755       time_inttocart=0.0d0
1756       time_ginvmult=0.0d0
1757       time_fricmatmult=0.0d0
1758       time_cartgrad=0.0d0
1759       time_bcastc=0.0d0
1760       time_bcast7=0.0d0
1761       time_bcastw=0.0d0
1762       time_intfcart=0.0d0
1763       time_vec=0.0d0
1764       time_mat=0.0d0
1765       time_fric=0.0d0
1766       time_stoch=0.0d0
1767       time_fricmatmult=0.0d0
1768       time_fsample=0.0d0
1769 #endif
1770 #endif
1771 !d    print *,' in SET_TIMERS stime=',stime
1772       return
1773       end subroutine set_timers
1774 !-----------------------------------------------------------------------------
1775 #ifndef CLUSTER
1776       logical function stopx(nf)
1777 ! This function returns .true. if one of the following reasons to exit SUMSL
1778 ! occurs. The "reason" code is stored in WHATSUP passed thru a COMMON block:
1779 !
1780 !... WHATSUP = 0 - go on, no reason to stop. Stopx will return .false.
1781 !...           1 - Time up in current node;
1782 !...           2 - STOP signal was received from another node because the
1783 !...               node's task was accomplished (parallel only);
1784 !...          -1 - STOP signal was received from another node because of error;
1785 !...          -2 - STOP signal was received from another node, because 
1786 !...               the node's time was up.
1787 !      implicit real*8 (a-h,o-z)
1788 !      include 'DIMENSIONS'
1789 !el#ifdef WHAM_RUN
1790 !el      use control_data, only:WhatsUp
1791 !el#endif
1792 #ifdef MP
1793 !el      use MPI_data   !include 'COMMON.INFO'
1794       include 'mpif.h'
1795 #endif
1796       integer :: nf
1797 !el      logical :: ovrtim
1798
1799 !      include 'COMMON.IOUNITS'
1800 !      include 'COMMON.TIME1'
1801       integer :: Kwita
1802
1803 !d    print *,'Processor',MyID,' NF=',nf
1804 !d      write (iout,*) "stopx: ",nf
1805 #ifndef WHAM_RUN
1806 #ifndef MPI
1807       if (ovrtim()) then
1808 ! Finish if time is up.
1809          stopx = .true.
1810          WhatsUp=1
1811 #ifdef MPL
1812       else if (mod(nf,100).eq.0) then
1813 ! Other processors might have finished. Check this every 100th function 
1814 ! evaluation.
1815 ! Master checks if any other processor has sent accepted conformation(s) to it. 
1816          if (MyID.ne.MasterID) call receive_mcm_info
1817          if (MyID.eq.MasterID) call receive_conf
1818 !d       print *,'Processor ',MyID,' is checking STOP: nf=',nf
1819          call recv_stop_sig(Kwita)
1820          if (Kwita.eq.-1) then
1821            write (iout,'(a,i4,a,i5)') 'Processor',&
1822            MyID,' has received STOP signal in STOPX; NF=',nf
1823            write (*,'(a,i4,a,i5)') 'Processor',&
1824            MyID,' has received STOP signal in STOPX; NF=',nf
1825            stopx=.true.
1826            WhatsUp=2
1827          elseif (Kwita.eq.-2) then
1828            write (iout,*) &
1829           'Processor',MyID,' received TIMEUP-STOP signal in SUMSL.'
1830            write (*,*) &
1831           'Processor',MyID,' received TIMEUP-STOP signal in SUMSL.'
1832            WhatsUp=-2
1833            stopx=.true.  
1834          else if (Kwita.eq.-3) then
1835            write (iout,*) &
1836           'Processor',MyID,' received ERROR-STOP signal in SUMSL.'
1837            write (*,*) &
1838           'Processor',MyID,' received ERROR-STOP signal in SUMSL.'
1839            WhatsUp=-1
1840            stopx=.true.
1841          else
1842            stopx=.false.
1843            WhatsUp=0
1844          endif
1845 #endif
1846       else
1847          stopx = .false.
1848          WhatsUp=0
1849       endif
1850 #else
1851       stopx=.false.
1852 !d      write (iout,*) "stopx set at .false."
1853 #endif
1854
1855 #ifdef OSF
1856 ! Check for FOUND_NAN flag
1857       if (FOUND_NAN) then
1858         write(iout,*)"   ***   stopx : Found a NaN"
1859         stopx=.true.
1860       endif
1861 #endif
1862 #else
1863       if (ovrtim()) then
1864 ! Finish if time is up.
1865          stopx = .true.
1866          WhatsUp=1
1867       else if (cutoffviol) then
1868         stopx = .true.
1869         WhatsUp=2
1870       else
1871         stopx=.false.
1872       endif
1873 #endif
1874       return
1875       end function stopx
1876 !-----------------------------------------------------------------------------
1877 #else
1878       logical function stopx(nf)
1879 !
1880 !     ..................................................................
1881 !
1882 !     *****PURPOSE...
1883 !     THIS FUNCTION MAY SERVE AS THE STOPX (ASYNCHRONOUS INTERRUPTION)
1884 !     FUNCTION FOR THE NL2SOL (NONLINEAR LEAST-SQUARES) PACKAGE AT
1885 !     THOSE INSTALLATIONS WHICH DO NOT WISH TO IMPLEMENT A
1886 !     DYNAMIC STOPX.
1887 !
1888 !     *****ALGORITHM NOTES...
1889 !     AT INSTALLATIONS WHERE THE NL2SOL SYSTEM IS USED
1890 !     INTERACTIVELY, THIS DUMMY STOPX SHOULD BE REPLACED BY A
1891 !     FUNCTION THAT RETURNS .TRUE. IF AND ONLY IF THE INTERRUPT
1892 !     (BREAK) KEY HAS BEEN PRESSED SINCE THE LAST CALL ON STOPX.
1893 !
1894 !     $$$ MODIFIED FOR USE AS  THE TIMER ROUTINE.
1895 !     $$$                              WHEN THE TIME LIMIT HAS BEEN
1896 !     $$$ REACHED     STOPX IS SET TO .TRUE  AND INITIATES (IN ITSUM)
1897 !     $$$ AND ORDERLY EXIT OUT OF SUMSL.  IF ARRAYS IV AND V ARE
1898 !     $$$ SAVED, THE SUMSL ROUTINES CAN BE RESTARTED AT THE SAME
1899 !     $$$ POINT AT WHICH THEY WERE INTERRUPTED.
1900 !
1901 !     ..................................................................
1902 !
1903 !      include 'DIMENSIONS'
1904       integer :: nf
1905 !      logical ovrtim
1906 !      include 'COMMON.IOUNITS'
1907 !      include 'COMMON.TIME1'
1908 #ifdef MPL
1909 !     include 'COMMON.INFO'
1910       integer :: Kwita
1911
1912 !d    print *,'Processor',MyID,' NF=',nf
1913 #endif
1914       if (ovrtim()) then
1915 ! Finish if time is up.
1916          stopx = .true.
1917 #ifdef MPL
1918       else if (mod(nf,100).eq.0) then
1919 ! Other processors might have finished. Check this every 100th function 
1920 ! evaluation.
1921 !d       print *,'Processor ',MyID,' is checking STOP: nf=',nf
1922          call recv_stop_sig(Kwita)
1923          if (Kwita.eq.-1) then
1924            write (iout,'(a,i4,a,i5)') 'Processor',&
1925            MyID,' has received STOP signal in STOPX; NF=',nf
1926            write (*,'(a,i4,a,i5)') 'Processor',&
1927            MyID,' has received STOP signal in STOPX; NF=',nf
1928            stopx=.true.
1929          else
1930            stopx=.false.
1931          endif
1932 #endif
1933       else
1934          stopx = .false.
1935       endif
1936       return
1937       end function stopx
1938 #endif
1939 !-----------------------------------------------------------------------------
1940       logical function ovrtim()
1941
1942 !      include 'DIMENSIONS'
1943 !      include 'COMMON.IOUNITS'
1944 !      include 'COMMON.TIME1'
1945 !el      real(kind=8) :: tcpu
1946       real(kind=8) :: curtim
1947 #ifdef MPI
1948       include "mpif.h"
1949       curtim = MPI_Wtime()-walltime
1950 #else
1951       curtim= tcpu()
1952 #endif
1953 !  curtim is the current time in seconds.
1954 !      write (iout,*) "curtim",curtim," timlim",timlim," safety",safety
1955 #ifndef WHAM_RUN
1956       if (curtim .ge. timlim - safety) then
1957         write (iout,'(a,f10.2,a,f10.2,a,f10.2,a)') &
1958         "***************** Elapsed time (",curtim,&
1959         " s) is within the safety limit (",safety,&
1960         " s) of the allocated time (",timlim," s). Terminating."
1961         ovrtim=.true.
1962       else
1963         ovrtim=.false.
1964       endif
1965 #else
1966       ovrtim=.false.
1967 #endif
1968 !elwrite (iout,*) "ovrtim",ovrtim
1969       return
1970       end function ovrtim
1971 !-----------------------------------------------------------------------------
1972       real(kind=8) function tcpu()
1973
1974 !      include 'COMMON.TIME1'
1975       real(kind=8) :: seconds
1976 #ifdef ES9000
1977 !***************************
1978 ! Next definition for EAGLE (ibm-es9000)
1979       real(kind=8) :: micseconds
1980       integer :: rcode
1981       tcpu=cputime(micseconds,rcode)
1982       tcpu=(micseconds/1.0E6) - stime
1983 !***************************
1984 #endif
1985 #ifdef SUN
1986 !***************************
1987 ! Next definitions for sun
1988       REAL(kind=8) ::  ECPU,ETIME,ETCPU
1989       real(kind=8),dimension(2) :: tarray
1990       tcpu=etime(tarray)
1991       tcpu=tarray(1)
1992 !***************************
1993 #endif
1994 #ifdef KSR
1995 !***************************
1996 ! Next definitions for ksr
1997 ! this function uses the ksr timer ALL_SECONDS from the PMON library to
1998 ! return the elapsed time in seconds
1999       tcpu= all_seconds() - stime
2000 !***************************
2001 #endif
2002 #ifdef SGI
2003 !***************************
2004 ! Next definitions for sgi
2005       real(kind=4) :: timar(2), etime
2006       seconds = etime(timar)
2007 !d    print *,'seconds=',seconds,' stime=',stime
2008 !      usrsec = timar(1)
2009 !      syssec = timar(2)
2010       tcpu=seconds - stime
2011 !***************************
2012 #endif
2013
2014 #ifdef LINUX
2015 !***************************
2016 ! Next definitions for sgi
2017       real(kind=4) :: timar(2), etime
2018       seconds = etime(timar)
2019 !d    print *,'seconds=',seconds,' stime=',stime
2020 !      usrsec = timar(1)
2021 !      syssec = timar(2)
2022       tcpu=seconds - stime
2023 !***************************
2024 #endif
2025
2026
2027 #ifdef CRAY
2028 !***************************
2029 ! Next definitions for Cray
2030 !     call date(curdat)
2031 !     curdat=curdat(1:9)
2032 !     call clock(curtim)
2033 !     curtim=curtim(1:8)
2034       cpusec = second()
2035       tcpu=cpusec - stime
2036 !***************************
2037 #endif
2038 #ifdef AIX
2039 !***************************
2040 ! Next definitions for RS6000
2041        integer(kind=4) :: i1,mclock
2042        i1 = mclock()
2043        tcpu = (i1+0.0D0)/100.0D0
2044 #endif
2045 #ifdef WINPGI
2046 !***************************
2047 ! next definitions for windows NT Digital fortran
2048        real(kind=4) :: time_real
2049        call cpu_time(time_real)
2050        tcpu = time_real
2051 #endif
2052 #ifdef WINIFL
2053 !***************************
2054 ! next definitions for windows NT Digital fortran
2055        real(kind=4) :: time_real
2056        call cpu_time(time_real)
2057        tcpu = time_real
2058 #endif
2059       tcpu = 0d0 !el
2060       return
2061       end function tcpu
2062 !-----------------------------------------------------------------------------
2063 #ifndef CLUSTER
2064       subroutine dajczas(rntime,hrtime,mintime,sectime)
2065
2066 !      include 'COMMON.IOUNITS'
2067       integer :: ihr,imn,isc
2068       real(kind=8) :: rntime,hrtime,mintime,sectime 
2069       hrtime=rntime/3600.0D0 
2070       hrtime=aint(hrtime)
2071       mintime=aint((rntime-3600.0D0*hrtime)/60.0D0)
2072       sectime=aint((rntime-3600.0D0*hrtime-60.0D0*mintime)+0.5D0)
2073       if (sectime.eq.60.0D0) then
2074         sectime=0.0D0
2075         mintime=mintime+1.0D0
2076       endif
2077       ihr=hrtime
2078       imn=mintime
2079       isc=sectime
2080       write (iout,328) ihr,imn,isc
2081   328 FORMAT(//'***** Computation time: ',I4  ,' hours ',I2  ,&
2082                ' minutes ', I2  ,' seconds *****')       
2083       return
2084       end subroutine dajczas
2085 !-----------------------------------------------------------------------------
2086       subroutine print_detailed_timing
2087
2088 !el      use MPI_data
2089 !      implicit real*8 (a-h,o-z)
2090 !      include 'DIMENSIONS'
2091 #ifdef MPI
2092       include 'mpif.h'
2093 #endif
2094 !      include 'COMMON.IOUNITS'
2095 !      include 'COMMON.TIME1'
2096 !      include 'COMMON.SETUP'
2097       real(kind=8) :: time1,time_barrier
2098       time_barrier = 0.0d0
2099 #ifdef MPI !el
2100       time1=MPI_WTIME()
2101 #endif !el
2102          write (iout,'(80(1h=)/a/(80(1h=)))') &
2103           "Details of FG communication time"
2104          write (*,'(7(a40,1pe15.5/),40(1h-)/a40,1pe15.5/80(1h=))') &
2105           "BROADCAST:",time_bcast,"REDUCE:",time_reduce,&
2106           "GATHER:",time_gather,&
2107           "SCATTER:",time_scatter,"SENDRECV:",time_sendrecv,&
2108           "BARRIER ene",time_barrier_e,&
2109           "BARRIER grad",time_barrier_g,&
2110           "TOTAL:",&
2111           time_bcast+time_reduce+time_gather+time_scatter+time_sendrecv
2112          write (*,*) fg_rank,myrank,&
2113            ': Total wall clock time',time1-walltime,' sec'
2114          write (*,*) "Processor",fg_rank,myrank,&
2115            ": BROADCAST time",time_bcast," REDUCE time",&
2116             time_reduce," GATHER time",time_gather," SCATTER time",&
2117             time_scatter,&
2118            " SCATTER fmatmult",time_scatter_fmatmult,&
2119            " SCATTER ginvmult",time_scatter_ginvmult,&
2120            " SCATTER fmat",time_scatter_fmat,&
2121            " SCATTER ginv",time_scatter_ginv,&
2122             " SENDRECV",time_sendrecv,&
2123             " BARRIER ene",time_barrier_e,&
2124             " BARRIER GRAD",time_barrier_g,&
2125             " BCAST7",time_bcast7," BCASTC",time_bcastc,&
2126             " BCASTW",time_bcastw," ALLREDUCE",time_allreduce,&
2127             " TOTAL",&
2128             time_bcast+time_reduce+time_gather+time_scatter+ &
2129             time_sendrecv+time_barrier+time_bcastc
2130 !el#endif
2131          write (*,*) "Processor",fg_rank,myrank," enecalc",time_enecalc
2132          write (*,*) "Processor",fg_rank,myrank," sumene",time_sumene
2133          write (*,*) "Processor",fg_rank,myrank," intfromcart",&
2134            time_intfcart
2135          write (*,*) "Processor",fg_rank,myrank," vecandderiv",&
2136            time_vec
2137          write (*,*) "Processor",fg_rank,myrank," setmatrices",&
2138            time_mat
2139          write (*,*) "Processor",fg_rank,myrank," ginvmult",&
2140            time_ginvmult
2141          write (*,*) "Processor",fg_rank,myrank," fricmatmult",&
2142            time_fricmatmult
2143          write (*,*) "Processor",fg_rank,myrank," inttocart",&
2144            time_inttocart
2145          write (*,*) "Processor",fg_rank,myrank," sumgradient",&
2146            time_sumgradient
2147          write (*,*) "Processor",fg_rank,myrank," intcartderiv",&
2148            time_intcartderiv
2149          if (fg_rank.eq.0) then
2150            write (*,*) "Processor",fg_rank,myrank," lagrangian",&
2151              time_lagrangian
2152            write (*,*) "Processor",fg_rank,myrank," cartgrad",&
2153              time_cartgrad
2154          endif
2155       return
2156       end subroutine print_detailed_timing
2157 #endif
2158 !-----------------------------------------------------------------------------
2159 !-----------------------------------------------------------------------------
2160       end module control