block data implicit real*8 (a-h,o-z) include 'DIMENSIONS' include 'COMMON.MCM' c include 'COMMON.MD' data MovTypID & /'pool','chain regrow','multi-bond','phi','theta','side chain', & 'total'/ c Conversion from poises to molecular unit and the gas constant c data cPoise /2.9361d0/, Rb /0.001986d0/ end c-------------------------------------------------------------------------- subroutine initialize C C Define constants and zero out tables. C implicit real*8 (a-h,o-z) include 'DIMENSIONS' #ifdef MPI include 'mpif.h' #endif #ifndef ISNAN external proc_proc #ifdef WINPGI cMS$ATTRIBUTES C :: proc_proc #endif #endif include 'COMMON.IOUNITS' include 'COMMON.CHAIN' include 'COMMON.INTERACT' include 'COMMON.GEO' include 'COMMON.LOCAL' include 'COMMON.TORSION' include 'COMMON.FFIELD' include 'COMMON.SBRIDGE' include 'COMMON.MCM' include 'COMMON.MINIM' include 'COMMON.DERIV' include 'COMMON.SPLITELE' c Common blocks from the diagonalization routines COMMON /IOFILE/ IR,IW,IP,IJK,IPK,IDAF,NAV,IODA(400) COMMON /MACHSW/ KDIAG,ICORFL,IXDR logical mask_r c real*8 text1 /'initial_i'/ mask_r=.false. #ifndef ISNAN c NaNQ initialization i=-1 arg=100.0d0 rr=dacos(arg) #ifdef WINPGI idumm=proc_proc(rr,i) #else call proc_proc(rr,i) #endif #endif kdiag=0 icorfl=0 iw=2 C C The following is just to define auxiliary variables used in angle conversion C pi=4.0D0*datan(1.0D0) dwapi=2.0D0*pi dwapi3=dwapi/3.0D0 pipol=0.5D0*pi deg2rad=pi/180.0D0 rad2deg=1.0D0/deg2rad angmin=10.0D0*deg2rad C C Define I/O units. C inp= 1 iout= 2 ipdbin= 3 ipdb= 7 icart = 30 imol2= 4 igeom= 8 intin= 9 ithep= 11 ithep_pdb=51 irotam=12 irotam_pdb=52 itorp= 13 itordp= 23 ielep= 14 isidep=15 iscpp=25 icbase=16 ifourier=20 istat= 17 irest1=55 irest2=56 iifrag=57 ientin=18 ientout=19 ibond = 28 isccor = 29 crc for write_rmsbank1 izs1=21 cdr include secondary structure prediction bias isecpred=27 C C CSA I/O units (separated from others especially for Jooyoung) C icsa_rbank=30 icsa_seed=31 icsa_history=32 icsa_bank=33 icsa_bank1=34 icsa_alpha=35 icsa_alpha1=36 icsa_bankt=37 icsa_int=39 icsa_bank_reminimized=38 icsa_native_int=41 icsa_in=40 crc for ifc error 118 icsa_pdb=42 C C Set default weights of the energy terms. C wlong=1.0D0 welec=1.0D0 wtor =1.0D0 wang =1.0D0 wscloc=1.0D0 wstrain=1.0D0 C C Zero out tables. C print '(a,$)','Inside initialize' c call memmon_print_usage() do i=1,maxres2 do j=1,3 c(j,i)=0.0D0 dc(j,i)=0.0D0 enddo enddo do i=1,maxres do j=1,3 xloc(j,i)=0.0D0 enddo enddo do i=1,ntyp do j=1,ntyp aa(i,j)=0.0D0 bb(i,j)=0.0D0 augm(i,j)=0.0D0 sigma(i,j)=0.0D0 r0(i,j)=0.0D0 chi(i,j)=0.0D0 enddo do j=1,2 bad(i,j)=0.0D0 enddo chip(i)=0.0D0 alp(i)=0.0D0 sigma0(i)=0.0D0 sigii(i)=0.0D0 rr0(i)=0.0D0 a0thet(i)=0.0D0 do j=1,2 athet(j,i)=0.0D0 bthet(j,i)=0.0D0 enddo do j=0,3 polthet(j,i)=0.0D0 enddo do j=1,3 gthet(j,i)=0.0D0 enddo theta0(i)=0.0D0 sig0(i)=0.0D0 sigc0(i)=0.0D0 do j=1,maxlob bsc(j,i)=0.0D0 do k=1,3 censc(k,j,i)=0.0D0 enddo do k=1,3 do l=1,3 gaussc(l,k,j,i)=0.0D0 enddo enddo nlob(i)=0 enddo enddo nlob(ntyp1)=0 dsc(ntyp1)=0.0D0 do i=1,maxtor itortyp(i)=0 do j=1,maxtor do k=1,maxterm v1(k,j,i)=0.0D0 v2(k,j,i)=0.0D0 enddo enddo enddo do i=1,maxres itype(i)=0 itel(i)=0 enddo C Initialize the bridge arrays ns=0 nss=0 nhpb=0 do i=1,maxss iss(i)=0 enddo do i=1,maxdim dhpb(i)=0.0D0 enddo do i=1,maxres ihpb(i)=0 jhpb(i)=0 enddo C C Initialize timing. C call set_timers C C Initialize variables used in minimization. C c maxfun=5000 c maxit=2000 maxfun=500 maxit=200 tolf=1.0D-2 rtolf=5.0D-4 C C Initialize the variables responsible for the mode of gradient storage. C nfl=0 icg=1 C C Initialize constants used to split the energy into long- and short-range C components C r_cut=2.0d0 rlamb=0.3d0 #ifndef SPLITELE nprint_ene=nprint_ene-1 #endif return end c------------------------------------------------------------------------- block data nazwy implicit real*8 (a-h,o-z) include 'DIMENSIONS' include 'COMMON.NAMES' include 'COMMON.FFIELD' data restyp / &'CYS','MET','PHE','ILE','LEU','VAL','TRP','TYR','ALA','GLY','THR', &'SER','GLN','ASN','GLU','ASP','HIS','ARG','LYS','PRO','D'/ data onelet / &'C','M','F','I','L','V','W','Y','A','G','T', &'S','Q','N','E','D','H','R','K','P','X'/ data potname /'LJ','LJK','BP','GB','GBV'/ data ename / & "EVDW SC-SC","EVDW2 SC-p","EES p-p","ECORR4 ","ECORR5 ", & "ECORR6 ","EELLO ","ETURN3 ","ETURN4 ","ETURN6 ", & "EBE bend","ESC SCloc","ETORS ","ETORSD ","EHPB ","EVDWPP ", & "ESTR ","EVDW2_14 ","UCONST ", " ","ESCCOR"," "," ", & "DFA DIS","DFA TOR","DFA NEI","DFA BET"/ data wname / & "WSC","WSCP","WELEC","WCORR","WCORR5","WCORR6","WEL_LOC", & "WTURN3","WTURN4","WTURN6","WANG","WSCLOC","WTOR","WTORD", & "WSTRAIN","WVDWPP","WBOND","SCAL14"," "," ","WSCCOR", & " "," ","WDFAD","WDFAT","WDFAN","WDFAB"/ data nprint_ene /24/ data print_order/1,2,3,11,12,13,14,4,5,6,7,8,9,10,19,18,15,17,16, & 21,24,25,26,27,0,0,0/ end c--------------------------------------------------------------------------- subroutine init_int_table implicit real*8 (a-h,o-z) include 'DIMENSIONS' #ifdef MPI include 'mpif.h' integer blocklengths(15),displs(15) #endif include 'COMMON.CONTROL' include 'COMMON.SETUP' include 'COMMON.CHAIN' include 'COMMON.INTERACT' include 'COMMON.LOCAL' include 'COMMON.SBRIDGE' include 'COMMON.TORCNSTR' include 'COMMON.IOUNITS' include 'COMMON.DERIV' include 'COMMON.CONTACTS' common /przechowalnia/ iturn3_start_all(0:MaxProcs), & iturn3_end_all(0:MaxProcs),iturn4_start_all(0:MaxProcs), & iturn4_end_all(0:MaxProcs),iatel_s_all(0:MaxProcs), & iatel_e_all(0:MaxProcs),ielstart_all(maxres,0:MaxProcs-1), & ielend_all(maxres,0:MaxProcs-1), & ntask_cont_from_all(0:max_fg_procs-1), & itask_cont_from_all(0:max_fg_procs-1,0:max_fg_procs-1), & ntask_cont_to_all(0:max_fg_procs-1), & itask_cont_to_all(0:max_fg_procs-1,0:max_fg_procs-1) integer FG_GROUP,CONT_FROM_GROUP,CONT_TO_GROUP logical scheck,lprint,flag #ifdef MPI integer my_sc_int(0:max_fg_Procs-1),my_sc_intt(0:max_fg_Procs), & my_ele_int(0:max_fg_Procs-1),my_ele_intt(0:max_fg_Procs) C... Determine the numbers of start and end SC-SC interaction C... to deal with by current processor. do i=0,nfgtasks-1 itask_cont_from(i)=fg_rank itask_cont_to(i)=fg_rank enddo lprint=.false. if (lprint) &write (iout,*) 'INIT_INT_TABLE nres=',nres,' nnt=',nnt,' nct=',nct n_sc_int_tot=(nct-nnt+1)*(nct-nnt)/2-nss call int_bounds(n_sc_int_tot,my_sc_inds,my_sc_inde) if (lprint) & write (iout,*) 'Processor',fg_rank,' CG group',kolor, & ' absolute rank',MyRank, & ' n_sc_int_tot',n_sc_int_tot,' my_sc_inds=',my_sc_inds, & ' my_sc_inde',my_sc_inde ind_sctint=0 iatsc_s=0 iatsc_e=0 #endif c lprint=.false. do i=1,maxres nint_gr(i)=0 nscp_gr(i)=0 do j=1,maxint_gr istart(i,1)=0 iend(i,1)=0 ielstart(i)=0 ielend(i)=0 iscpstart(i,1)=0 iscpend(i,1)=0 enddo enddo ind_scint=0 ind_scint_old=0 cd write (iout,*) 'ns=',ns,' nss=',nss,' ihpb,jhpb', cd & (ihpb(i),jhpb(i),i=1,nss) do i=nnt,nct-1 scheck=.false. do ii=1,nss if (ihpb(ii).eq.i+nres) then scheck=.true. jj=jhpb(ii)-nres goto 10 endif enddo 10 continue cd write (iout,*) 'i=',i,' scheck=',scheck,' jj=',jj if (scheck) then if (jj.eq.i+1) then #ifdef MPI c write (iout,*) 'jj=i+1' call int_partition(ind_scint,my_sc_inds,my_sc_inde,i, & iatsc_s,iatsc_e,i+2,nct,nint_gr(i),istart(i,1),iend(i,1),*12) #else nint_gr(i)=1 istart(i,1)=i+2 iend(i,1)=nct #endif else if (jj.eq.nct) then #ifdef MPI c write (iout,*) 'jj=nct' call int_partition(ind_scint,my_sc_inds,my_sc_inde,i, & iatsc_s,iatsc_e,i+1,nct-1,nint_gr(i),istart(i,1),iend(i,1),*12) #else nint_gr(i)=1 istart(i,1)=i+1 iend(i,1)=nct-1 #endif else #ifdef MPI call int_partition(ind_scint,my_sc_inds,my_sc_inde,i, & iatsc_s,iatsc_e,i+1,jj-1,nint_gr(i),istart(i,1),iend(i,1),*12) ii=nint_gr(i)+1 call int_partition(ind_scint,my_sc_inds,my_sc_inde,i, & iatsc_s,iatsc_e,jj+1,nct,nint_gr(i),istart(i,ii),iend(i,ii),*12) #else nint_gr(i)=2 istart(i,1)=i+1 iend(i,1)=jj-1 istart(i,2)=jj+1 iend(i,2)=nct #endif endif else #ifdef MPI call int_partition(ind_scint,my_sc_inds,my_sc_inde,i, & iatsc_s,iatsc_e,i+1,nct,nint_gr(i),istart(i,1),iend(i,1),*12) #else nint_gr(i)=1 istart(i,1)=i+1 iend(i,1)=nct ind_scint=ind_scint+nct-i #endif endif #ifdef MPI ind_scint_old=ind_scint #endif enddo 12 continue #ifndef MPI iatsc_s=nnt iatsc_e=nct-1 #endif #ifdef MPI if (lprint) write (*,*) 'Processor',fg_rank,' CG Group',kolor, & ' absolute rank',myrank,' iatsc_s=',iatsc_s,' iatsc_e=',iatsc_e #endif if (lprint) then write (iout,'(a)') 'Interaction array:' do i=iatsc_s,iatsc_e write (iout,'(i3,2(2x,2i3))') & i,(istart(i,iint),iend(i,iint),iint=1,nint_gr(i)) enddo endif ispp=4 #ifdef MPI C Now partition the electrostatic-interaction array npept=nct-nnt nele_int_tot=(npept-ispp)*(npept-ispp+1)/2 call int_bounds(nele_int_tot,my_ele_inds,my_ele_inde) if (lprint) & write (*,*) 'Processor',fg_rank,' CG group',kolor, & ' absolute rank',MyRank, & ' nele_int_tot',nele_int_tot,' my_ele_inds=',my_ele_inds, & ' my_ele_inde',my_ele_inde iatel_s=0 iatel_e=0 ind_eleint=0 ind_eleint_old=0 do i=nnt,nct-3 ijunk=0 call int_partition(ind_eleint,my_ele_inds,my_ele_inde,i, & iatel_s,iatel_e,i+ispp,nct-1,ijunk,ielstart(i),ielend(i),*13) enddo ! i 13 continue if (iatel_s.eq.0) iatel_s=1 nele_int_tot_vdw=(npept-2)*(npept-2+1)/2 c write (iout,*) "nele_int_tot_vdw",nele_int_tot_vdw call int_bounds(nele_int_tot_vdw,my_ele_inds_vdw,my_ele_inde_vdw) c write (iout,*) "my_ele_inds_vdw",my_ele_inds_vdw, c & " my_ele_inde_vdw",my_ele_inde_vdw ind_eleint_vdw=0 ind_eleint_vdw_old=0 iatel_s_vdw=0 iatel_e_vdw=0 do i=nnt,nct-3 ijunk=0 call int_partition(ind_eleint_vdw,my_ele_inds_vdw, & my_ele_inde_vdw,i, & iatel_s_vdw,iatel_e_vdw,i+2,nct-1,ijunk,ielstart_vdw(i), & ielend_vdw(i),*15) c write (iout,*) i," ielstart_vdw",ielstart_vdw(i), c & " ielend_vdw",ielend_vdw(i) enddo ! i if (iatel_s_vdw.eq.0) iatel_s_vdw=1 15 continue #else iatel_s=nnt iatel_e=nct-5 do i=iatel_s,iatel_e ielstart(i)=i+4 ielend(i)=nct-1 enddo iatel_s_vdw=nnt iatel_e_vdw=nct-3 do i=iatel_s_vdw,iatel_e_vdw ielstart_vdw(i)=i+2 ielend_vdw(i)=nct-1 enddo #endif if (lprint) then write (*,'(a)') 'Processor',fg_rank,' CG group',kolor, & ' absolute rank',MyRank write (iout,*) 'Electrostatic interaction array:' do i=iatel_s,iatel_e write (iout,'(i3,2(2x,2i3))') i,ielstart(i),ielend(i) enddo endif ! lprint c iscp=3 iscp=2 C Partition the SC-p interaction array #ifdef MPI nscp_int_tot=(npept-iscp+1)*(npept-iscp+1) call int_bounds(nscp_int_tot,my_scp_inds,my_scp_inde) if (lprint) write (iout,*) 'Processor',fg_rank,' CG group',kolor, & ' absolute rank',myrank, & ' nscp_int_tot',nscp_int_tot,' my_scp_inds=',my_scp_inds, & ' my_scp_inde',my_scp_inde iatscp_s=0 iatscp_e=0 ind_scpint=0 ind_scpint_old=0 do i=nnt,nct-1 if (i.lt.nnt+iscp) then cd write (iout,*) 'i.le.nnt+iscp' call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i, & iatscp_s,iatscp_e,i+iscp,nct,nscp_gr(i),iscpstart(i,1), & iscpend(i,1),*14) else if (i.gt.nct-iscp) then cd write (iout,*) 'i.gt.nct-iscp' call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i, & iatscp_s,iatscp_e,nnt,i-iscp,nscp_gr(i),iscpstart(i,1), & iscpend(i,1),*14) else call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i, & iatscp_s,iatscp_e,nnt,i-iscp,nscp_gr(i),iscpstart(i,1), & iscpend(i,1),*14) ii=nscp_gr(i)+1 call int_partition(ind_scpint,my_scp_inds,my_scp_inde,i, & iatscp_s,iatscp_e,i+iscp,nct,nscp_gr(i),iscpstart(i,ii), & iscpend(i,ii),*14) endif enddo ! i 14 continue #else iatscp_s=nnt iatscp_e=nct-1 do i=nnt,nct-1 if (i.lt.nnt+iscp) then nscp_gr(i)=1 iscpstart(i,1)=i+iscp iscpend(i,1)=nct elseif (i.gt.nct-iscp) then nscp_gr(i)=1 iscpstart(i,1)=nnt iscpend(i,1)=i-iscp else nscp_gr(i)=2 iscpstart(i,1)=nnt iscpend(i,1)=i-iscp iscpstart(i,2)=i+iscp iscpend(i,2)=nct endif enddo ! i #endif if (lprint) then write (iout,'(a)') 'SC-p interaction array:' do i=iatscp_s,iatscp_e write (iout,'(i3,2(2x,2i3))') & i,(iscpstart(i,j),iscpend(i,j),j=1,nscp_gr(i)) enddo endif ! lprint C Partition local interactions #ifdef MPI call int_bounds(nres-2,loc_start,loc_end) loc_start=loc_start+1 loc_end=loc_end+1 call int_bounds(nres-2,ithet_start,ithet_end) ithet_start=ithet_start+2 ithet_end=ithet_end+2 call int_bounds(nct-nnt-2,iturn3_start,iturn3_end) iturn3_start=iturn3_start+nnt iphi_start=iturn3_start+2 iturn3_end=iturn3_end+nnt iphi_end=iturn3_end+2 iturn3_start=iturn3_start-1 iturn3_end=iturn3_end-1 call int_bounds(nres-3,iphi1_start,iphi1_end) iphi1_start=iphi1_start+3 iphi1_end=iphi1_end+3 call int_bounds(nct-nnt-3,iturn4_start,iturn4_end) iturn4_start=iturn4_start+nnt iphid_start=iturn4_start+2 iturn4_end=iturn4_end+nnt iphid_end=iturn4_end+2 iturn4_start=iturn4_start-1 iturn4_end=iturn4_end-1 call int_bounds(nres-2,ibond_start,ibond_end) ibond_start=ibond_start+1 ibond_end=ibond_end+1 call int_bounds(nct-nnt,ibondp_start,ibondp_end) ibondp_start=ibondp_start+nnt ibondp_end=ibondp_end+nnt call int_bounds1(nres-1,ivec_start,ivec_end) print *,"Processor",myrank,fg_rank,fg_rank1, & " ivec_start",ivec_start," ivec_end",ivec_end iset_start=loc_start+2 iset_end=loc_end+2 if (ndih_constr.eq.0) then idihconstr_start=1 idihconstr_end=0 else call int_bounds(ndih_constr,idihconstr_start,idihconstr_end) endif nsumgrad=(nres-nnt)*(nres-nnt+1)/2 nlen=nres-nnt+1 call int_bounds(nsumgrad,ngrad_start,ngrad_end) igrad_start=((2*nlen+1) & -sqrt(float((2*nlen-1)**2-8*(ngrad_start-1))))/2 jgrad_start(igrad_start)= & ngrad_start-(2*nlen-igrad_start)*(igrad_start-1)/2 & +igrad_start jgrad_end(igrad_start)=nres igrad_end=((2*nlen+1) & -sqrt(float((2*nlen-1)**2-8*(ngrad_end-1))))/2 if (igrad_end.gt.igrad_start) jgrad_start(igrad_end)=igrad_end+1 jgrad_end(igrad_end)=ngrad_end-(2*nlen-igrad_end)*(igrad_end-1)/2 & +igrad_end do i=igrad_start+1,igrad_end-1 jgrad_start(i)=i+1 jgrad_end(i)=nres enddo if (lprint) then write (*,*) 'Processor:',fg_rank,' CG group',kolor, & ' absolute rank',myrank, & ' loc_start',loc_start,' loc_end',loc_end, & ' ithet_start',ithet_start,' ithet_end',ithet_end, & ' iphi_start',iphi_start,' iphi_end',iphi_end, & ' iphid_start',iphid_start,' iphid_end',iphid_end, & ' ibond_start',ibond_start,' ibond_end',ibond_end, & ' ibondp_start',ibondp_start,' ibondp_end',ibondp_end, & ' iturn3_start',iturn3_start,' iturn3_end',iturn3_end, & ' iturn4_start',iturn4_start,' iturn4_end',iturn4_end, & ' ivec_start',ivec_start,' ivec_end',ivec_end, & ' iset_start',iset_start,' iset_end',iset_end, & ' idihconstr_start',idihconstr_start,' idihconstr_end', & idihconstr_end write (*,*) 'Processor:',fg_rank,myrank,' igrad_start', & igrad_start,' igrad_end',igrad_end,' ngrad_start',ngrad_start, & ' ngrad_end',ngrad_end do i=igrad_start,igrad_end write(*,*) 'Processor:',fg_rank,myrank,i, & jgrad_start(i),jgrad_end(i) enddo endif if (nfgtasks.gt.1) then call MPI_Allgather(ivec_start,1,MPI_INTEGER,ivec_displ(0),1, & MPI_INTEGER,FG_COMM1,IERROR) iaux=ivec_end-ivec_start+1 call MPI_Allgather(iaux,1,MPI_INTEGER,ivec_count(0),1, & MPI_INTEGER,FG_COMM1,IERROR) call MPI_Allgather(iset_start-2,1,MPI_INTEGER,iset_displ(0),1, & MPI_INTEGER,FG_COMM,IERROR) iaux=iset_end-iset_start+1 call MPI_Allgather(iaux,1,MPI_INTEGER,iset_count(0),1, & MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(ibond_start,1,MPI_INTEGER,ibond_displ(0),1, & MPI_INTEGER,FG_COMM,IERROR) iaux=ibond_end-ibond_start+1 call MPI_Allgather(iaux,1,MPI_INTEGER,ibond_count(0),1, & MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(ithet_start,1,MPI_INTEGER,ithet_displ(0),1, & MPI_INTEGER,FG_COMM,IERROR) iaux=ithet_end-ithet_start+1 call MPI_Allgather(iaux,1,MPI_INTEGER,ithet_count(0),1, & MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(iphi_start,1,MPI_INTEGER,iphi_displ(0),1, & MPI_INTEGER,FG_COMM,IERROR) iaux=iphi_end-iphi_start+1 call MPI_Allgather(iaux,1,MPI_INTEGER,iphi_count(0),1, & MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(iphi1_start,1,MPI_INTEGER,iphi1_displ(0),1, & MPI_INTEGER,FG_COMM,IERROR) iaux=iphi1_end-iphi1_start+1 call MPI_Allgather(iaux,1,MPI_INTEGER,iphi1_count(0),1, & MPI_INTEGER,FG_COMM,IERROR) do i=0,maxprocs-1 do j=1,maxres ielstart_all(j,i)=0 ielend_all(j,i)=0 enddo enddo call MPI_Allgather(iturn3_start,1,MPI_INTEGER, & iturn3_start_all(0),1,MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(iturn4_start,1,MPI_INTEGER, & iturn4_start_all(0),1,MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(iturn3_end,1,MPI_INTEGER, & iturn3_end_all(0),1,MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(iturn4_end,1,MPI_INTEGER, & iturn4_end_all(0),1,MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(iatel_s,1,MPI_INTEGER, & iatel_s_all(0),1,MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(iatel_e,1,MPI_INTEGER, & iatel_e_all(0),1,MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(ielstart(1),maxres,MPI_INTEGER, & ielstart_all(1,0),maxres,MPI_INTEGER,FG_COMM,IERROR) call MPI_Allgather(ielend(1),maxres,MPI_INTEGER, & ielend_all(1,0),maxres,MPI_INTEGER,FG_COMM,IERROR) if (lprint) then write (iout,*) "iatel_s_all",(iatel_s_all(i),i=0,nfgtasks) write (iout,*) "iatel_e_all",(iatel_e_all(i),i=0,nfgtasks) write (iout,*) "iturn3_start_all", & (iturn3_start_all(i),i=0,nfgtasks-1) write (iout,*) "iturn3_end_all", & (iturn3_end_all(i),i=0,nfgtasks-1) write (iout,*) "iturn4_start_all", & (iturn4_start_all(i),i=0,nfgtasks-1) write (iout,*) "iturn4_end_all", & (iturn4_end_all(i),i=0,nfgtasks-1) write (iout,*) "The ielstart_all array" do i=nnt,nct write (iout,'(20i4)') i,(ielstart_all(i,j),j=0,nfgtasks-1) enddo write (iout,*) "The ielend_all array" do i=nnt,nct write (iout,'(20i4)') i,(ielend_all(i,j),j=0,nfgtasks-1) enddo call flush(iout) endif ntask_cont_from=0 ntask_cont_to=0 itask_cont_from(0)=fg_rank itask_cont_to(0)=fg_rank flag=.false. do ii=iturn3_start,iturn3_end call add_int(ii,ii+2,iturn3_sent(1,ii), & ntask_cont_to,itask_cont_to,flag) enddo do ii=iturn4_start,iturn4_end call add_int(ii,ii+3,iturn4_sent(1,ii), & ntask_cont_to,itask_cont_to,flag) enddo do ii=iturn3_start,iturn3_end call add_int_from(ii,ii+2,ntask_cont_from,itask_cont_from) enddo do ii=iturn4_start,iturn4_end call add_int_from(ii,ii+3,ntask_cont_from,itask_cont_from) enddo if (lprint) then write (iout,*) "After turn3 ntask_cont_from",ntask_cont_from, & " ntask_cont_to",ntask_cont_to write (iout,*) "itask_cont_from", & (itask_cont_from(i),i=1,ntask_cont_from) write (iout,*) "itask_cont_to", & (itask_cont_to(i),i=1,ntask_cont_to) call flush(iout) endif c write (iout,*) "Loop forward" c call flush(iout) do i=iatel_s,iatel_e c write (iout,*) "from loop i=",i c call flush(iout) do j=ielstart(i),ielend(i) call add_int_from(i,j,ntask_cont_from,itask_cont_from) enddo enddo c write (iout,*) "Loop backward iatel_e-1",iatel_e-1, c & " iatel_e",iatel_e c call flush(iout) nat_sent=0 do i=iatel_s,iatel_e c write (iout,*) "i",i," ielstart",ielstart(i), c & " ielend",ielend(i) c call flush(iout) flag=.false. do j=ielstart(i),ielend(i) call add_int(i,j,iint_sent(1,j,nat_sent+1),ntask_cont_to, & itask_cont_to,flag) enddo if (flag) then nat_sent=nat_sent+1 iat_sent(nat_sent)=i endif enddo if (lprint) then write (iout,*)"After longrange ntask_cont_from",ntask_cont_from, & " ntask_cont_to",ntask_cont_to write (iout,*) "itask_cont_from", & (itask_cont_from(i),i=1,ntask_cont_from) write (iout,*) "itask_cont_to", & (itask_cont_to(i),i=1,ntask_cont_to) call flush(iout) write (iout,*) "iint_sent" do i=1,nat_sent ii=iat_sent(i) write (iout,'(20i4)') ii,(j,(iint_sent(k,j,i),k=1,4), & j=ielstart(ii),ielend(ii)) enddo write (iout,*) "iturn3_sent iturn3_start",iturn3_start, & " iturn3_end",iturn3_end write (iout,'(20i4)') (i,(iturn3_sent(j,i),j=1,4), & i=iturn3_start,iturn3_end) write (iout,*) "iturn4_sent iturn4_start",iturn4_start, & " iturn4_end",iturn4_end write (iout,'(20i4)') (i,(iturn4_sent(j,i),j=1,4), & i=iturn4_start,iturn4_end) call flush(iout) endif call MPI_Gather(ntask_cont_from,1,MPI_INTEGER, & ntask_cont_from_all,1,MPI_INTEGER,king,FG_COMM,IERR) c write (iout,*) "Gather ntask_cont_from ended" c call flush(iout) call MPI_Gather(itask_cont_from(0),max_fg_procs,MPI_INTEGER, & itask_cont_from_all(0,0),max_fg_procs,MPI_INTEGER,king, & FG_COMM,IERR) c write (iout,*) "Gather itask_cont_from ended" c call flush(iout) call MPI_Gather(ntask_cont_to,1,MPI_INTEGER,ntask_cont_to_all, & 1,MPI_INTEGER,king,FG_COMM,IERR) c write (iout,*) "Gather ntask_cont_to ended" c call flush(iout) call MPI_Gather(itask_cont_to,max_fg_procs,MPI_INTEGER, & itask_cont_to_all,max_fg_procs,MPI_INTEGER,king,FG_COMM,IERR) c write (iout,*) "Gather itask_cont_to ended" c call flush(iout) if (fg_rank.eq.king) then write (iout,*)"Contact receive task map (proc, #tasks, tasks)" do i=0,nfgtasks-1 write (iout,'(20i4)') i,ntask_cont_from_all(i), & (itask_cont_from_all(j,i),j=1,ntask_cont_from_all(i)) enddo write (iout,*) call flush(iout) write (iout,*) "Contact send task map (proc, #tasks, tasks)" do i=0,nfgtasks-1 write (iout,'(20i4)') i,ntask_cont_to_all(i), & (itask_cont_to_all(j,i),j=1,ntask_cont_to_all(i)) enddo write (iout,*) call flush(iout) C Check if every send will have a matching receive ncheck_to=0 ncheck_from=0 do i=0,nfgtasks-1 ncheck_to=ncheck_to+ntask_cont_to_all(i) ncheck_from=ncheck_from+ntask_cont_from_all(i) enddo write (iout,*) "Control sums",ncheck_from,ncheck_to if (ncheck_from.ne.ncheck_to) then write (iout,*) "Error: #receive differs from #send." write (iout,*) "Terminating program...!" call flush(iout) flag=.false. else flag=.true. do i=0,nfgtasks-1 do j=1,ntask_cont_to_all(i) ii=itask_cont_to_all(j,i) do k=1,ntask_cont_from_all(ii) if (itask_cont_from_all(k,ii).eq.i) then if(lprint)write(iout,*)"Matching send/receive",i,ii exit endif enddo if (k.eq.ntask_cont_from_all(ii)+1) then flag=.false. write (iout,*) "Error: send by",j," to",ii, & " would have no matching receive" endif enddo enddo endif if (.not.flag) then write (iout,*) "Unmatched sends; terminating program" call flush(iout) endif endif call MPI_Bcast(flag,1,MPI_LOGICAL,king,FG_COMM,IERROR) c write (iout,*) "flag broadcast ended flag=",flag c call flush(iout) if (.not.flag) then call MPI_Finalize(IERROR) stop "Error in INIT_INT_TABLE: unmatched send/receive." endif call MPI_Comm_group(FG_COMM,fg_group,IERR) c write (iout,*) "MPI_Comm_group ended" c call flush(iout) call MPI_Group_incl(fg_group,ntask_cont_from+1, & itask_cont_from(0),CONT_FROM_GROUP,IERR) call MPI_Group_incl(fg_group,ntask_cont_to+1,itask_cont_to(0), & CONT_TO_GROUP,IERR) do i=1,nat_sent ii=iat_sent(i) iaux=4*(ielend(ii)-ielstart(ii)+1) call MPI_Group_translate_ranks(fg_group,iaux, & iint_sent(1,ielstart(ii),i),CONT_TO_GROUP, & iint_sent_local(1,ielstart(ii),i),IERR ) c write (iout,*) "Ranks translated i=",i c call flush(iout) enddo iaux=4*(iturn3_end-iturn3_start+1) call MPI_Group_translate_ranks(fg_group,iaux, & iturn3_sent(1,iturn3_start),CONT_TO_GROUP, & iturn3_sent_local(1,iturn3_start),IERR) iaux=4*(iturn4_end-iturn4_start+1) call MPI_Group_translate_ranks(fg_group,iaux, & iturn4_sent(1,iturn4_start),CONT_TO_GROUP, & iturn4_sent_local(1,iturn4_start),IERR) if (lprint) then write (iout,*) "iint_sent_local" do i=1,nat_sent ii=iat_sent(i) write (iout,'(20i4)') ii,(j,(iint_sent_local(k,j,i),k=1,4), & j=ielstart(ii),ielend(ii)) call flush(iout) enddo write (iout,*) "iturn3_sent_local iturn3_start",iturn3_start, & " iturn3_end",iturn3_end write (iout,'(20i4)') (i,(iturn3_sent_local(j,i),j=1,4), & i=iturn3_start,iturn3_end) write (iout,*) "iturn4_sent_local iturn4_start",iturn4_start, & " iturn4_end",iturn4_end write (iout,'(20i4)') (i,(iturn4_sent_local(j,i),j=1,4), & i=iturn4_start,iturn4_end) call flush(iout) endif call MPI_Group_free(fg_group,ierr) call MPI_Group_free(cont_from_group,ierr) call MPI_Group_free(cont_to_group,ierr) call MPI_Type_contiguous(3,MPI_DOUBLE_PRECISION,MPI_UYZ,IERROR) call MPI_Type_commit(MPI_UYZ,IERROR) call MPI_Type_contiguous(18,MPI_DOUBLE_PRECISION,MPI_UYZGRAD, & IERROR) call MPI_Type_commit(MPI_UYZGRAD,IERROR) call MPI_Type_contiguous(2,MPI_DOUBLE_PRECISION,MPI_MU,IERROR) call MPI_Type_commit(MPI_MU,IERROR) call MPI_Type_contiguous(4,MPI_DOUBLE_PRECISION,MPI_MAT1,IERROR) call MPI_Type_commit(MPI_MAT1,IERROR) call MPI_Type_contiguous(8,MPI_DOUBLE_PRECISION,MPI_MAT2,IERROR) call MPI_Type_commit(MPI_MAT2,IERROR) call MPI_Type_contiguous(6,MPI_DOUBLE_PRECISION,MPI_THET,IERROR) call MPI_Type_commit(MPI_THET,IERROR) call MPI_Type_contiguous(9,MPI_DOUBLE_PRECISION,MPI_GAM,IERROR) call MPI_Type_commit(MPI_GAM,IERROR) #ifndef MATGATHER c 9/22/08 Derived types to send matrices which appear in correlation terms do i=0,nfgtasks-1 if (ivec_count(i).eq.ivec_count(0)) then lentyp(i)=0 else lentyp(i)=1 endif enddo do ind_typ=lentyp(0),lentyp(nfgtasks-1) if (ind_typ.eq.0) then ichunk=ivec_count(0) else ichunk=ivec_count(1) endif c do i=1,4 c blocklengths(i)=4 c enddo c displs(1)=0 c do i=2,4 c displs(i)=displs(i-1)+blocklengths(i-1)*maxres c enddo c do i=1,4 c blocklengths(i)=blocklengths(i)*ichunk c enddo c write (iout,*) "blocklengths and displs" c do i=1,4 c write (iout,*) i,blocklengths(i),displs(i) c enddo c call flush(iout) c call MPI_Type_indexed(4,blocklengths(1),displs(1), c & MPI_DOUBLE_PRECISION,MPI_ROTAT1(ind_typ),IERROR) c call MPI_Type_commit(MPI_ROTAT1(ind_typ),IERROR) c write (iout,*) "MPI_ROTAT1",MPI_ROTAT1 c do i=1,4 c blocklengths(i)=2 c enddo c displs(1)=0 c do i=2,4 c displs(i)=displs(i-1)+blocklengths(i-1)*maxres c enddo c do i=1,4 c blocklengths(i)=blocklengths(i)*ichunk c enddo c write (iout,*) "blocklengths and displs" c do i=1,4 c write (iout,*) i,blocklengths(i),displs(i) c enddo c call flush(iout) c call MPI_Type_indexed(4,blocklengths(1),displs(1), c & MPI_DOUBLE_PRECISION,MPI_ROTAT2(ind_typ),IERROR) c call MPI_Type_commit(MPI_ROTAT2(ind_typ),IERROR) c write (iout,*) "MPI_ROTAT2",MPI_ROTAT2 do i=1,8 blocklengths(i)=2 enddo displs(1)=0 do i=2,8 displs(i)=displs(i-1)+blocklengths(i-1)*maxres enddo do i=1,15 blocklengths(i)=blocklengths(i)*ichunk enddo call MPI_Type_indexed(8,blocklengths,displs, & MPI_DOUBLE_PRECISION,MPI_PRECOMP11(ind_typ),IERROR) call MPI_Type_commit(MPI_PRECOMP11(ind_typ),IERROR) do i=1,8 blocklengths(i)=4 enddo displs(1)=0 do i=2,8 displs(i)=displs(i-1)+blocklengths(i-1)*maxres enddo do i=1,15 blocklengths(i)=blocklengths(i)*ichunk enddo call MPI_Type_indexed(8,blocklengths,displs, & MPI_DOUBLE_PRECISION,MPI_PRECOMP12(ind_typ),IERROR) call MPI_Type_commit(MPI_PRECOMP12(ind_typ),IERROR) do i=1,6 blocklengths(i)=4 enddo displs(1)=0 do i=2,6 displs(i)=displs(i-1)+blocklengths(i-1)*maxres enddo do i=1,6 blocklengths(i)=blocklengths(i)*ichunk enddo call MPI_Type_indexed(6,blocklengths,displs, & MPI_DOUBLE_PRECISION,MPI_PRECOMP22(ind_typ),IERROR) call MPI_Type_commit(MPI_PRECOMP22(ind_typ),IERROR) do i=1,2 blocklengths(i)=8 enddo displs(1)=0 do i=2,2 displs(i)=displs(i-1)+blocklengths(i-1)*maxres enddo do i=1,2 blocklengths(i)=blocklengths(i)*ichunk enddo call MPI_Type_indexed(2,blocklengths,displs, & MPI_DOUBLE_PRECISION,MPI_PRECOMP23(ind_typ),IERROR) call MPI_Type_commit(MPI_PRECOMP23(ind_typ),IERROR) do i=1,4 blocklengths(i)=1 enddo displs(1)=0 do i=2,4 displs(i)=displs(i-1)+blocklengths(i-1)*maxres enddo do i=1,4 blocklengths(i)=blocklengths(i)*ichunk enddo call MPI_Type_indexed(4,blocklengths,displs, & MPI_DOUBLE_PRECISION,MPI_ROTAT_OLD(ind_typ),IERROR) call MPI_Type_commit(MPI_ROTAT_OLD(ind_typ),IERROR) enddo #endif endif iint_start=ivec_start+1 iint_end=ivec_end+1 do i=0,nfgtasks-1 iint_count(i)=ivec_count(i) iint_displ(i)=ivec_displ(i) ivec_displ(i)=ivec_displ(i)-1 iset_displ(i)=iset_displ(i)-1 ithet_displ(i)=ithet_displ(i)-1 iphi_displ(i)=iphi_displ(i)-1 iphi1_displ(i)=iphi1_displ(i)-1 ibond_displ(i)=ibond_displ(i)-1 enddo if (nfgtasks.gt.1 .and. fg_rank.eq.king & .and. (me.eq.0 .or. out1file)) then write (iout,*) "IVEC_DISPL, IVEC_COUNT, ISET_START, ISET_COUNT" do i=0,nfgtasks-1 write (iout,*) i,ivec_displ(i),ivec_count(i),iset_displ(i), & iset_count(i) enddo write (iout,*) "iphi_start",iphi_start," iphi_end",iphi_end, & " iphi1_start",iphi1_start," iphi1_end",iphi1_end write (iout,*)"IPHI_COUNT, IPHI_DISPL, IPHI1_COUNT, IPHI1_DISPL" do i=0,nfgtasks-1 write (iout,*) i,iphi_count(i),iphi_displ(i),iphi1_count(i), & iphi1_displ(i) enddo write(iout,'(i10,a,i10,a,i10,a/a,i3,a)') n_sc_int_tot,' SC-SC ', & nele_int_tot,' electrostatic and ',nscp_int_tot, & ' SC-p interactions','were distributed among',nfgtasks, & ' fine-grain processors.' endif #else loc_start=2 loc_end=nres-1 ithet_start=3 ithet_end=nres iturn3_start=nnt iturn3_end=nct-3 iturn4_start=nnt iturn4_end=nct-4 iphi_start=nnt+3 iphi_end=nct iphi1_start=4 iphi1_end=nres idihconstr_start=1 idihconstr_end=ndih_constr iphid_start=iphi_start iphid_end=iphi_end-1 ibond_start=2 ibond_end=nres-1 ibondp_start=nnt+1 ibondp_end=nct ivec_start=1 ivec_end=nres-1 iset_start=3 iset_end=nres+1 iint_start=2 iint_end=nres-1 #endif return end #ifdef MPI c--------------------------------------------------------------------------- subroutine add_int(ii,jj,itask,ntask_cont_to,itask_cont_to,flag) implicit none include "DIMENSIONS" include "COMMON.INTERACT" include "COMMON.SETUP" include "COMMON.IOUNITS" integer ii,jj,itask(4),ntask_cont_to,itask_cont_to(0:MaxProcs-1) logical flag integer iturn3_start_all,iturn3_end_all,iturn4_start_all, & iturn4_end_all,iatel_s_all,iatel_e_all,ielstart_all,ielend_all common /przechowalnia/ iturn3_start_all(0:MaxProcs), & iturn3_end_all(0:MaxProcs),iturn4_start_all(0:MaxProcs), & iturn4_end_all(0:MaxProcs),iatel_s_all(0:MaxProcs), & iatel_e_all(0:MaxProcs),ielstart_all(maxres,0:MaxProcs-1), & ielend_all(maxres,0:MaxProcs-1) integer iproc,isent,k,l c Determines whether to send interaction ii,jj to other processors; a given c interaction can be sent to at most 2 processors. c Sets flag=.true. if interaction ii,jj needs to be sent to at least c one processor, otherwise flag is unchanged from the input value. isent=0 itask(1)=fg_rank itask(2)=fg_rank itask(3)=fg_rank itask(4)=fg_rank c write (iout,*) "ii",ii," jj",jj c Loop over processors to check if anybody could need interaction ii,jj do iproc=0,fg_rank-1 c Check if the interaction matches any turn3 at iproc do k=iturn3_start_all(iproc),iturn3_end_all(iproc) l=k+2 if (k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1 & .or. k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1 .and. l.eq.jj-1) & then c write (iout,*) "turn3 to iproc",iproc," ij",ii,jj,"kl",k,l c call flush(iout) flag=.true. if (iproc.ne.itask(1).and.iproc.ne.itask(2) & .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then isent=isent+1 itask(isent)=iproc call add_task(iproc,ntask_cont_to,itask_cont_to) endif endif enddo C Check if the interaction matches any turn4 at iproc do k=iturn4_start_all(iproc),iturn4_end_all(iproc) l=k+3 if (k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1 & .or. k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1 .and. l.eq.jj-1) & then c write (iout,*) "turn3 to iproc",iproc," ij",ii,jj," kl",k,l c call flush(iout) flag=.true. if (iproc.ne.itask(1).and.iproc.ne.itask(2) & .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then isent=isent+1 itask(isent)=iproc call add_task(iproc,ntask_cont_to,itask_cont_to) endif endif enddo if (iatel_s_all(iproc).gt.0 .and. iatel_e_all(iproc).gt.0 .and. & iatel_s_all(iproc).le.ii-1 .and. iatel_e_all(iproc).ge.ii-1)then if (ielstart_all(ii-1,iproc).le.jj-1.and. & ielend_all(ii-1,iproc).ge.jj-1) then flag=.true. if (iproc.ne.itask(1).and.iproc.ne.itask(2) & .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then isent=isent+1 itask(isent)=iproc call add_task(iproc,ntask_cont_to,itask_cont_to) endif endif if (ielstart_all(ii-1,iproc).le.jj+1.and. & ielend_all(ii-1,iproc).ge.jj+1) then flag=.true. if (iproc.ne.itask(1).and.iproc.ne.itask(2) & .and.iproc.ne.itask(3).and.iproc.ne.itask(4)) then isent=isent+1 itask(isent)=iproc call add_task(iproc,ntask_cont_to,itask_cont_to) endif endif endif enddo return end c--------------------------------------------------------------------------- subroutine add_int_from(ii,jj,ntask_cont_from,itask_cont_from) implicit none include "DIMENSIONS" include "COMMON.INTERACT" include "COMMON.SETUP" include "COMMON.IOUNITS" integer ii,jj,itask(2),ntask_cont_from, & itask_cont_from(0:MaxProcs-1) logical flag integer iturn3_start_all,iturn3_end_all,iturn4_start_all, & iturn4_end_all,iatel_s_all,iatel_e_all,ielstart_all,ielend_all common /przechowalnia/ iturn3_start_all(0:MaxProcs), & iturn3_end_all(0:MaxProcs),iturn4_start_all(0:MaxProcs), & iturn4_end_all(0:MaxProcs),iatel_s_all(0:MaxProcs), & iatel_e_all(0:MaxProcs),ielstart_all(maxres,0:MaxProcs-1), & ielend_all(maxres,0:MaxProcs-1) integer iproc,k,l do iproc=fg_rank+1,nfgtasks-1 do k=iturn3_start_all(iproc),iturn3_end_all(iproc) l=k+2 if (k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1.and.l.eq.jj-1 & .or. k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1) & then c write (iout,*)"turn3 from iproc",iproc," ij",ii,jj," kl",k,l call add_task(iproc,ntask_cont_from,itask_cont_from) endif enddo do k=iturn4_start_all(iproc),iturn4_end_all(iproc) l=k+3 if (k.eq.ii+1 .and. l.eq.jj+1 .or. k.eq.ii+1.and.l.eq.jj-1 & .or. k.eq.ii-1 .and. l.eq.jj-1 .or. k.eq.ii-1 .and. l.eq.jj+1) & then c write (iout,*)"turn4 from iproc",iproc," ij",ii,jj," kl",k,l call add_task(iproc,ntask_cont_from,itask_cont_from) endif enddo if (iatel_s_all(iproc).gt.0 .and. iatel_e_all(iproc).gt.0) then if (ii+1.ge.iatel_s_all(iproc).and.ii+1.le.iatel_e_all(iproc)) & then if (jj+1.ge.ielstart_all(ii+1,iproc).and. & jj+1.le.ielend_all(ii+1,iproc)) then call add_task(iproc,ntask_cont_from,itask_cont_from) endif if (jj-1.ge.ielstart_all(ii+1,iproc).and. & jj-1.le.ielend_all(ii+1,iproc)) then call add_task(iproc,ntask_cont_from,itask_cont_from) endif endif if (ii-1.ge.iatel_s_all(iproc).and.ii-1.le.iatel_e_all(iproc)) & then if (jj-1.ge.ielstart_all(ii-1,iproc).and. & jj-1.le.ielend_all(ii-1,iproc)) then call add_task(iproc,ntask_cont_from,itask_cont_from) endif if (jj+1.ge.ielstart_all(ii-1,iproc).and. & jj+1.le.ielend_all(ii-1,iproc)) then call add_task(iproc,ntask_cont_from,itask_cont_from) endif endif endif enddo return end c--------------------------------------------------------------------------- subroutine add_task(iproc,ntask_cont,itask_cont) implicit none include "DIMENSIONS" integer iproc,ntask_cont,itask_cont(0:MaxProcs-1) integer ii do ii=1,ntask_cont if (itask_cont(ii).eq.iproc) return enddo ntask_cont=ntask_cont+1 itask_cont(ntask_cont)=iproc return end c--------------------------------------------------------------------------- subroutine int_bounds(total_ints,lower_bound,upper_bound) implicit real*8 (a-h,o-z) include 'DIMENSIONS' include 'mpif.h' include 'COMMON.SETUP' integer total_ints,lower_bound,upper_bound integer int4proc(0:max_fg_procs),sint4proc(0:max_fg_procs) nint=total_ints/nfgtasks do i=1,nfgtasks int4proc(i-1)=nint enddo nexcess=total_ints-nint*nfgtasks do i=1,nexcess int4proc(nfgtasks-i)=int4proc(nfgtasks-i)+1 enddo lower_bound=0 do i=0,fg_rank-1 lower_bound=lower_bound+int4proc(i) enddo upper_bound=lower_bound+int4proc(fg_rank) lower_bound=lower_bound+1 return end c--------------------------------------------------------------------------- subroutine int_bounds1(total_ints,lower_bound,upper_bound) implicit real*8 (a-h,o-z) include 'DIMENSIONS' include 'mpif.h' include 'COMMON.SETUP' integer total_ints,lower_bound,upper_bound integer int4proc(0:max_fg_procs),sint4proc(0:max_fg_procs) nint=total_ints/nfgtasks1 do i=1,nfgtasks1 int4proc(i-1)=nint enddo nexcess=total_ints-nint*nfgtasks1 do i=1,nexcess int4proc(nfgtasks1-i)=int4proc(nfgtasks1-i)+1 enddo lower_bound=0 do i=0,fg_rank1-1 lower_bound=lower_bound+int4proc(i) enddo upper_bound=lower_bound+int4proc(fg_rank1) lower_bound=lower_bound+1 return end c--------------------------------------------------------------------------- subroutine int_partition(int_index,lower_index,upper_index,atom, & at_start,at_end,first_atom,last_atom,int_gr,jat_start,jat_end,*) implicit real*8 (a-h,o-z) include 'DIMENSIONS' include 'COMMON.IOUNITS' integer int_index,lower_index,upper_index,atom,at_start,at_end, & first_atom,last_atom,int_gr,jat_start,jat_end logical lprn lprn=.false. if (lprn) write (iout,*) 'int_index=',int_index int_index_old=int_index int_index=int_index+last_atom-first_atom+1 if (lprn) & write (iout,*) 'int_index=',int_index, & ' int_index_old',int_index_old, & ' lower_index=',lower_index, & ' upper_index=',upper_index, & ' atom=',atom,' first_atom=',first_atom, & ' last_atom=',last_atom if (int_index.ge.lower_index) then int_gr=int_gr+1 if (at_start.eq.0) then at_start=atom jat_start=first_atom-1+lower_index-int_index_old else jat_start=first_atom endif if (lprn) write (iout,*) 'jat_start',jat_start if (int_index.ge.upper_index) then at_end=atom jat_end=first_atom-1+upper_index-int_index_old return1 else jat_end=last_atom endif if (lprn) write (iout,*) 'jat_end',jat_end endif return end #endif c------------------------------------------------------------------------------ subroutine hpb_partition implicit real*8 (a-h,o-z) include 'DIMENSIONS' #ifdef MPI include 'mpif.h' #endif include 'COMMON.SBRIDGE' include 'COMMON.IOUNITS' include 'COMMON.SETUP' include 'COMMON.CONTROL' #ifdef MPI call int_bounds(nhpb,link_start,link_end) if (.not. out1file) & write (iout,*) 'Processor',fg_rank,' CG group',kolor, & ' absolute rank',MyRank, & ' nhpb',nhpb,' link_start=',link_start, & ' link_end',link_end #else link_start=1 link_end=nhpb #endif return end