MPI shield
authorAdam Sieradzan <adasko@piasek4.chem.univ.gda.pl>
Wed, 2 May 2018 13:42:15 +0000 (15:42 +0200)
committerAdam Sieradzan <adasko@piasek4.chem.univ.gda.pl>
Wed, 2 May 2018 13:42:15 +0000 (15:42 +0200)
source/unres/control.F90
source/unres/data/MPI_data.f90
source/unres/data/energy_data.f90
source/unres/energy.f90

index f7cba30..4c413ec 100644 (file)
             ind_eleint_old_nucl,ind_eleint_nucl,nele_int_tot_vdw_nucl,&
             my_ele_inds_vdw_nucl,my_ele_inde_vdw_nucl,ind_eleint_vdw_nucl,&
             ind_eleint_vdw_old_nucl,nscp_int_tot_nucl,my_scp_inds_nucl,&
-            my_scp_inde_nucl,ind_scpint_nucl,ind_scpint_old_nucl
+            my_scp_inde_nucl,ind_scpint_nucl,ind_scpint_old_nucl,impishi
 !      integer,dimension(5) :: nct_molec,nnt_molec
 !el      allocate(itask_cont_from(0:nfgtasks-1)) !(0:max_fg_procs-1)
 !el      allocate(itask_cont_to(0:nfgtasks-1)) !(0:max_fg_procs-1)
         call MPI_Type_contiguous(18,MPI_DOUBLE_PRECISION,MPI_UYZGRAD,&
           IERROR)
         call MPI_Type_commit(MPI_UYZGRAD,IERROR)
+        call MPI_Type_contiguous(maxcontsshi,MPI_INTEGER,MPI_I50,IERROR)
+        call MPI_Type_commit(MPI_I50,IERROR)
+         impishi=maxcontsshi*3
+        call MPI_Type_contiguous(impishi,MPI_DOUBLE_PRECISION, &
+        MPI_SHI,IERROR)
+        call MPI_Type_commit(MPI_SHI,IERROR)
+
         call MPI_Type_contiguous(2,MPI_DOUBLE_PRECISION,MPI_MU,IERROR)
         call MPI_Type_commit(MPI_MU,IERROR)
         call MPI_Type_contiguous(4,MPI_DOUBLE_PRECISION,MPI_MAT1,IERROR)
index 3ed522b..06b6d0b 100644 (file)
@@ -39,7 +39,7 @@
       logical :: yourjob,finished,cgdone
 !      common /types/
       integer :: MPI_UYZ,MPI_UYZGRAD,MPI_MU,MPI_MAT1,MPI_MAT2,&
-       MPI_THET,MPI_GAM
+       MPI_THET,MPI_GAM,MPI_I50,MPI_SHI
       integer,dimension(0:1) :: MPI_ROTAT1,MPI_ROTAT2,MPI_ROTAT_OLD,&
        MPI_PRECOMP11,MPI_PRECOMP12,MPI_PRECOMP22,MPI_PRECOMP23
 !-----------------------------------------------------------------------------
index 4077472..5f340cf 100644 (file)
@@ -14,6 +14,7 @@
 !-----------------------------------------------------------------------------
 ! Max. number of contacts per residue
       integer :: maxconts
+      integer,parameter :: maxcontsshi=50
 !-----------------------------------------------------------------------------
 ! Max. number of SC contacts
       integer :: maxcont
index e4d0f76..7d21f89 100644 (file)
@@ -30,7 +30,7 @@
 ! Maximum number of SC local term fitting function coefficiants
       integer,parameter :: maxsccoef=65
 ! Maximum number of local shielding effectors
-      integer,parameter :: maxcontsshi=50
+!      integer,parameter :: maxcontsshi=50
 !-----------------------------------------------------------------------------
 ! commom.calc common/calc/
 !-----------------------------------------------------------------------------
 #ifdef MPI      
       real(kind=8) :: weights_(n_ene) !,time_Bcast,time_Bcastw
 ! shielding effect varibles for MPI
-!      real(kind=8)   fac_shieldbuf(maxres),
-!     & grad_shield_locbuf(3,maxcontsshi,-1:maxres),
-!     & grad_shield_sidebuf(3,maxcontsshi,-1:maxres),
-!     & grad_shieldbuf(3,-1:maxres)
-!       integer ishield_listbuf(maxres),
-!     &shield_listbuf(maxcontsshi,maxres)
+      real(kind=8)   fac_shieldbuf(nres), &
+      grad_shield_locbuf(3,maxcontsshi,-1:nres), &
+      grad_shield_sidebuf(3,maxcontsshi,-1:nres), &
+      grad_shieldbuf(3,-1:nres)
+       integer ishield_listbuf(nres), &
+       shield_listbuf(maxcontsshi,nres),k,j,i
 
 !      print*,"ETOTAL Processor",fg_rank," absolute rank",myrank,
 !     & " nfgtasks",nfgtasks
        if (shield_mode.eq.2) then
                  call set_shield_fac2
        endif
+      if (nfgtasks.gt.1) then
+        call MPI_Allgatherv(fac_shield(ivec_start), &
+        ivec_count(fg_rank1), &
+        MPI_DOUBLE_PRECISION,fac_shieldbuf(1),ivec_count(0), &
+        ivec_displ(0), &
+        MPI_DOUBLE_PRECISION,FG_COMM,IERROR)
+        call MPI_Allgatherv(shield_list(1,ivec_start), &
+        ivec_count(fg_rank1), &
+        MPI_I50,shield_listbuf(1,1),ivec_count(0), &
+        ivec_displ(0), &
+        MPI_I50,FG_COMM,IERROR)
+        call MPI_Allgatherv(ishield_list(ivec_start), &
+        ivec_count(fg_rank1), &
+        MPI_INTEGER,ishield_listbuf(1),ivec_count(0), &
+        ivec_displ(0), &
+        MPI_INTEGER,FG_COMM,IERROR)
+        call MPI_Allgatherv(grad_shield(1,ivec_start), &
+        ivec_count(fg_rank1), &
+        MPI_UYZ,grad_shieldbuf(1,1),ivec_count(0), &
+        ivec_displ(0), &
+        MPI_UYZ,FG_COMM,IERROR)
+        call MPI_Allgatherv(grad_shield_side(1,1,ivec_start), &
+        ivec_count(fg_rank1), &
+        MPI_SHI,grad_shield_sidebuf(1,1,1),ivec_count(0), &
+        ivec_displ(0), &
+        MPI_SHI,FG_COMM,IERROR)
+        call MPI_Allgatherv(grad_shield_loc(1,1,ivec_start), &
+        ivec_count(fg_rank1), &
+        MPI_SHI,grad_shield_locbuf(1,1,1),ivec_count(0), &
+        ivec_displ(0), &
+        MPI_SHI,FG_COMM,IERROR)
+        do i=1,nres
+         fac_shield(i)=fac_shieldbuf(i)
+         ishield_list(i)=ishield_listbuf(i)
+         do j=1,3
+         grad_shield(j,i)=grad_shieldbuf(j,i)
+         enddo !j
+         do j=1,ishield_list(i)
+           shield_list(j,i)=shield_listbuf(j,i)
+          do k=1,3
+          grad_shield_loc(k,j,i)=grad_shield_locbuf(k,j,i)
+          grad_shield_side(k,j,i)=grad_shield_sidebuf(k,j,i)
+          enddo !k
+        enddo !j
+       enddo !i
+       endif
+
+
+
+
 !       print *,"AFTER EGB",ipot,evdw
 !mc
 !mc Sep-06: egb takes care of dynamic ss bonds too
 #ifdef TIMING
       time_vec=time_vec+MPI_Wtime()-time01
 #endif
+
+
+
+
 !        print *,"Processor",myrank," left VEC_AND_DERIV"
       if (ipot.lt.6) then
 #ifdef SPLITELE