+ subroutine make_SCSC_inter_list
+ include 'mpif.h'
+ real*8 :: xi,yi,zi,xj,yj,zj,xj_safe,yj_safe,zj_safe,xj_temp,yj_temp,zj_temp
+ real*8 :: dist_init, dist_temp,r_buff_list
+ integer:: contlisti(250*nres),contlistj(250*nres)
+! integer :: newcontlisti(200*nres),newcontlistj(200*nres)
+ integer i,j,itypi,itypj,subchap,xshift,yshift,zshift,iint,ilist_sc,g_ilist_sc
+ integer displ(0:nprocs),i_ilist_sc(0:nprocs),ierr
+! print *,"START make_SC"
+ r_buff_list=5.0
+ ilist_sc=0
+ do i=iatsc_s,iatsc_e
+ itypi=iabs(itype(i,1))
+ if (itypi.eq.ntyp1) cycle
+ xi=c(1,nres+i)
+ yi=c(2,nres+i)
+ zi=c(3,nres+i)
+ call to_box(xi,yi,zi)
+ do iint=1,nint_gr(i)
+! print *,"is it wrong", iint,i
+ do j=istart(i,iint),iend(i,iint)
+ itypj=iabs(itype(j,1))
+ if (energy_dec) write(iout,*) "LISTA ZAKRES",istart(i,iint),iend(i,iint),iatsc_s,iatsc_e
+ if (itypj.eq.ntyp1) cycle
+ xj=c(1,nres+j)
+ yj=c(2,nres+j)
+ zj=c(3,nres+j)
+ call to_box(xj,yj,zj)
+! call lipid_layer(xj,yj,zj,sslipj,ssgradlipj)
+! faclipij2=(sslipi+sslipj)/2.0d0*lipscale**2+1.0d0
+ xj=boxshift(xj-xi,boxxsize)
+ yj=boxshift(yj-yi,boxysize)
+ zj=boxshift(zj-zi,boxzsize)
+ dist_init=xj**2+yj**2+zj**2
+! dist_init=(xj-xi)**2+(yj-yi)**2+(zj-zi)**2
+! r_buff_list is a read value for a buffer
+ if (sqrt(dist_init).le.(r_cut_ele+r_buff_list)) then
+! Here the list is created
+ ilist_sc=ilist_sc+1
+! this can be substituted by cantor and anti-cantor
+ contlisti(ilist_sc)=i
+ contlistj(ilist_sc)=j
+
+ endif
+ enddo
+ enddo
+ enddo
+! call MPI_Reduce(ilist_sc,g_ilist_sc,1,&
+! MPI_INTEGER,MPI_SUM,king,FG_COMM,IERR)
+! call MPI_Gather(newnss,1,MPI_INTEGER,&
+! i_newnss,1,MPI_INTEGER,king,FG_COMM,IERR)
+#ifdef DEBUG
+ write (iout,*) "before MPIREDUCE",ilist_sc
+ do i=1,ilist_sc
+ write (iout,*) i,contlisti(i),contlistj(i)
+ enddo
+#endif
+ if (nfgtasks.gt.1)then
+
+ call MPI_Reduce(ilist_sc,g_ilist_sc,1,&
+ MPI_INTEGER,MPI_SUM,king,FG_COMM,IERR)
+! write(iout,*) "before bcast",g_ilist_sc
+ call MPI_Gather(ilist_sc,1,MPI_INTEGER,&
+ i_ilist_sc,1,MPI_INTEGER,king,FG_COMM,IERR)
+ displ(0)=0
+ do i=1,nfgtasks-1,1
+ displ(i)=i_ilist_sc(i-1)+displ(i-1)
+ enddo
+! write(iout,*) "before gather",displ(0),displ(1)
+ call MPI_Gatherv(contlisti,ilist_sc,MPI_INTEGER,&
+ newcontlisti,i_ilist_sc,displ,MPI_INTEGER,&
+ king,FG_COMM,IERR)
+ call MPI_Gatherv(contlistj,ilist_sc,MPI_INTEGER,&
+ newcontlistj,i_ilist_sc,displ,MPI_INTEGER,&
+ king,FG_COMM,IERR)
+ call MPI_Bcast(g_ilist_sc,1,MPI_INT,king,FG_COMM,IERR)
+! write(iout,*) "before bcast",g_ilist_sc
+! call MPI_Bcast(g_ilist_sc,1,MPI_INT,king,FG_COMM)
+ call MPI_Bcast(newcontlisti,g_ilist_sc,MPI_INT,king,FG_COMM,IERR)
+ call MPI_Bcast(newcontlistj,g_ilist_sc,MPI_INT,king,FG_COMM,IERR)
+
+! call MPI_Bcast(g_ilist_sc,1,MPI_INT,king,FG_COMM)
+
+ else
+ g_ilist_sc=ilist_sc
+
+ do i=1,ilist_sc
+ newcontlisti(i)=contlisti(i)
+ newcontlistj(i)=contlistj(i)
+ enddo
+ endif
+
+#ifdef DEBUG
+ write (iout,*) "after MPIREDUCE",g_ilist_sc
+ do i=1,g_ilist_sc
+ write (iout,*) i,newcontlisti(i),newcontlistj(i)
+ enddo
+#endif
+ call int_bounds(g_ilist_sc,g_listscsc_start,g_listscsc_end)
+ return
+ end subroutine make_SCSC_inter_list
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ subroutine make_SCp_inter_list
+ use MD_data, only: itime_mat
+
+ include 'mpif.h'
+ real*8 :: xi,yi,zi,xj,yj,zj,xj_safe,yj_safe,zj_safe,xj_temp,yj_temp,zj_temp
+ real*8 :: dist_init, dist_temp,r_buff_list
+ integer:: contlistscpi(350*nres),contlistscpj(350*nres)
+! integer :: newcontlistscpi(200*nres),newcontlistscpj(200*nres)
+ integer i,j,itypi,itypj,subchap,xshift,yshift,zshift,iint,ilist_scp,g_ilist_scp
+ integer displ(0:nprocs),i_ilist_scp(0:nprocs),ierr
+! print *,"START make_SC"
+ r_buff_list=5.0
+ ilist_scp=0
+ do i=iatscp_s,iatscp_e
+ if (itype(i,1).eq.ntyp1 .or. itype(i+1,1).eq.ntyp1) cycle
+ xi=0.5D0*(c(1,i)+c(1,i+1))
+ yi=0.5D0*(c(2,i)+c(2,i+1))
+ zi=0.5D0*(c(3,i)+c(3,i+1))
+ call to_box(xi,yi,zi)
+ do iint=1,nscp_gr(i)
+
+ do j=iscpstart(i,iint),iscpend(i,iint)
+ itypj=iabs(itype(j,1))
+ if (itypj.eq.ntyp1) cycle
+! Uncomment following three lines for SC-p interactions
+! xj=c(1,nres+j)-xi
+! yj=c(2,nres+j)-yi
+! zj=c(3,nres+j)-zi
+! Uncomment following three lines for Ca-p interactions
+! xj=c(1,j)-xi
+! yj=c(2,j)-yi
+! zj=c(3,j)-zi
+ xj=c(1,j)
+ yj=c(2,j)
+ zj=c(3,j)
+ call to_box(xj,yj,zj)
+ xj=boxshift(xj-xi,boxxsize)
+ yj=boxshift(yj-yi,boxysize)
+ zj=boxshift(zj-zi,boxzsize)
+ dist_init=xj**2+yj**2+zj**2
+#ifdef DEBUG
+ ! r_buff_list is a read value for a buffer
+ if ((sqrt(dist_init).le.(r_cut_ele)).and.(ifirstrun.eq.0)) then
+! Here the list is created
+ ilist_scp_first=ilist_scp_first+1
+! this can be substituted by cantor and anti-cantor
+ contlistscpi_f(ilist_scp_first)=i
+ contlistscpj_f(ilist_scp_first)=j
+ endif
+#endif
+! r_buff_list is a read value for a buffer
+ if (sqrt(dist_init).le.(r_cut_ele+r_buff_list)) then
+! Here the list is created
+ ilist_scp=ilist_scp+1
+! this can be substituted by cantor and anti-cantor
+ contlistscpi(ilist_scp)=i
+ contlistscpj(ilist_scp)=j
+ endif
+ enddo
+ enddo
+ enddo
+#ifdef DEBUG
+ write (iout,*) "before MPIREDUCE",ilist_scp
+ do i=1,ilist_scp
+ write (iout,*) i,contlistscpi(i),contlistscpj(i)
+ enddo
+#endif
+ if (nfgtasks.gt.1)then
+
+ call MPI_Reduce(ilist_scp,g_ilist_scp,1,&
+ MPI_INTEGER,MPI_SUM,king,FG_COMM,IERR)
+! write(iout,*) "before bcast",g_ilist_sc
+ call MPI_Gather(ilist_scp,1,MPI_INTEGER,&
+ i_ilist_scp,1,MPI_INTEGER,king,FG_COMM,IERR)
+ displ(0)=0
+ do i=1,nfgtasks-1,1
+ displ(i)=i_ilist_scp(i-1)+displ(i-1)
+ enddo
+! write(iout,*) "before gather",displ(0),displ(1)
+ call MPI_Gatherv(contlistscpi,ilist_scp,MPI_INTEGER,&
+ newcontlistscpi,i_ilist_scp,displ,MPI_INTEGER,&
+ king,FG_COMM,IERR)
+ call MPI_Gatherv(contlistscpj,ilist_scp,MPI_INTEGER,&
+ newcontlistscpj,i_ilist_scp,displ,MPI_INTEGER,&
+ king,FG_COMM,IERR)
+ call MPI_Bcast(g_ilist_scp,1,MPI_INT,king,FG_COMM,IERR)
+! write(iout,*) "before bcast",g_ilist_sc
+! call MPI_Bcast(g_ilist_sc,1,MPI_INT,king,FG_COMM)
+ call MPI_Bcast(newcontlistscpi,g_ilist_scp,MPI_INT,king,FG_COMM,IERR)
+ call MPI_Bcast(newcontlistscpj,g_ilist_scp,MPI_INT,king,FG_COMM,IERR)
+
+! call MPI_Bcast(g_ilist_sc,1,MPI_INT,king,FG_COMM)
+
+ else
+ g_ilist_scp=ilist_scp
+
+ do i=1,ilist_scp
+ newcontlistscpi(i)=contlistscpi(i)
+ newcontlistscpj(i)=contlistscpj(i)
+ enddo
+ endif
+
+#ifdef DEBUG
+ write (iout,*) "after MPIREDUCE",g_ilist_scp
+ do i=1,g_ilist_scp
+ write (iout,*) i,newcontlistscpi(i),newcontlistscpj(i)
+ enddo
+
+! if (ifirstrun.eq.0) ifirstrun=1
+! do i=1,ilist_scp_first
+! do j=1,g_ilist_scp
+! if ((newcontlistscpi(j).eq.contlistscpi_f(i)).and.&
+! (newcontlistscpj(j).eq.contlistscpj_f(i))) go to 126
+! enddo
+! print *,itime_mat,"ERROR matrix needs updating"
+! print *,contlistscpi_f(i),contlistscpj_f(i)
+! 126 continue
+! enddo
+#endif
+ call int_bounds(g_ilist_scp,g_listscp_start,g_listscp_end)
+
+ return
+ end subroutine make_SCp_inter_list
+
+!-----------------------------------------------------------------------------
+!-----------------------------------------------------------------------------
+
+
+ subroutine make_pp_inter_list
+ include 'mpif.h'
+ real*8 :: xi,yi,zi,xj,yj,zj,xj_safe,yj_safe,zj_safe,xj_temp,yj_temp,zj_temp
+ real*8 :: xmedj,ymedj,zmedj,sslipi,ssgradlipi,faclipij2,sslipj,ssgradlipj
+ real*8 :: dist_init, dist_temp,r_buff_list,dxi,dyi,dzi,xmedi,ymedi,zmedi
+ real*8 :: dx_normi,dy_normi,dz_normi,dxj,dyj,dzj,dx_normj,dy_normj,dz_normj
+ integer:: contlistppi(250*nres),contlistppj(250*nres)
+! integer :: newcontlistppi(200*nres),newcontlistppj(200*nres)
+ integer i,j,itypi,itypj,subchap,xshift,yshift,zshift,iint,ilist_pp,g_ilist_pp
+ integer displ(0:nprocs),i_ilist_pp(0:nprocs),ierr
+! write(iout,*),"START make_pp",iatel_s,iatel_e,r_cut_ele+r_buff_list
+ ilist_pp=0
+ r_buff_list=5.0
+ do i=iatel_s,iatel_e
+ if (itype(i,1).eq.ntyp1 .or. itype(i+1,1).eq.ntyp1) cycle
+ dxi=dc(1,i)
+ dyi=dc(2,i)
+ dzi=dc(3,i)
+ dx_normi=dc_norm(1,i)
+ dy_normi=dc_norm(2,i)
+ dz_normi=dc_norm(3,i)
+ xmedi=c(1,i)+0.5d0*dxi
+ ymedi=c(2,i)+0.5d0*dyi
+ zmedi=c(3,i)+0.5d0*dzi
+
+ call to_box(xmedi,ymedi,zmedi)
+ call lipid_layer(xmedi,ymedi,zmedi,sslipi,ssgradlipi)
+! write (iout,*) i,j,itype(i,1),itype(j,1)
+! if (itype(j,1).eq.ntyp1.or. itype(j+1,1).eq.ntyp1) cycle
+
+! 1,j)
+ do j=ielstart(i),ielend(i)
+! write (iout,*) i,j,itype(i,1),itype(j,1)
+ if (itype(j,1).eq.ntyp1.or. itype(j+1,1).eq.ntyp1) cycle
+ dxj=dc(1,j)
+ dyj=dc(2,j)
+ dzj=dc(3,j)
+ dx_normj=dc_norm(1,j)
+ dy_normj=dc_norm(2,j)
+ dz_normj=dc_norm(3,j)
+! xj=c(1,j)+0.5D0*dxj-xmedi
+! yj=c(2,j)+0.5D0*dyj-ymedi
+! zj=c(3,j)+0.5D0*dzj-zmedi
+ xj=c(1,j)+0.5D0*dxj
+ yj=c(2,j)+0.5D0*dyj
+ zj=c(3,j)+0.5D0*dzj
+ call to_box(xj,yj,zj)
+! call lipid_layer(xj,yj,zj,sslipj,ssgradlipj)
+! faclipij2=(sslipi+sslipj)/2.0d0*lipscale**2+1.0d0
+ xj=boxshift(xj-xmedi,boxxsize)
+ yj=boxshift(yj-ymedi,boxysize)
+ zj=boxshift(zj-zmedi,boxzsize)
+ dist_init=xj**2+yj**2+zj**2
+ if (sqrt(dist_init).le.(r_cut_ele+r_buff_list)) then
+! Here the list is created
+ ilist_pp=ilist_pp+1
+! this can be substituted by cantor and anti-cantor
+ contlistppi(ilist_pp)=i
+ contlistppj(ilist_pp)=j
+ endif
+! enddo
+ enddo
+ enddo
+#ifdef DEBUG
+ write (iout,*) "before MPIREDUCE",ilist_pp
+ do i=1,ilist_pp
+ write (iout,*) i,contlistppi(i),contlistppj(i)
+ enddo
+#endif
+ if (nfgtasks.gt.1)then
+
+ call MPI_Reduce(ilist_pp,g_ilist_pp,1,&
+ MPI_INTEGER,MPI_SUM,king,FG_COMM,IERR)
+! write(iout,*) "before bcast",g_ilist_sc
+ call MPI_Gather(ilist_pp,1,MPI_INTEGER,&
+ i_ilist_pp,1,MPI_INTEGER,king,FG_COMM,IERR)
+ displ(0)=0
+ do i=1,nfgtasks-1,1
+ displ(i)=i_ilist_pp(i-1)+displ(i-1)
+ enddo
+! write(iout,*) "before gather",displ(0),displ(1)
+ call MPI_Gatherv(contlistppi,ilist_pp,MPI_INTEGER,&
+ newcontlistppi,i_ilist_pp,displ,MPI_INTEGER,&
+ king,FG_COMM,IERR)
+ call MPI_Gatherv(contlistppj,ilist_pp,MPI_INTEGER,&
+ newcontlistppj,i_ilist_pp,displ,MPI_INTEGER,&
+ king,FG_COMM,IERR)
+ call MPI_Bcast(g_ilist_pp,1,MPI_INT,king,FG_COMM,IERR)
+! write(iout,*) "before bcast",g_ilist_sc
+! call MPI_Bcast(g_ilist_sc,1,MPI_INT,king,FG_COMM)
+ call MPI_Bcast(newcontlistppi,g_ilist_pp,MPI_INT,king,FG_COMM,IERR)
+ call MPI_Bcast(newcontlistppj,g_ilist_pp,MPI_INT,king,FG_COMM,IERR)
+
+! call MPI_Bcast(g_ilist_sc,1,MPI_INT,king,FG_COMM)