X-Git-Url: http://mmka.chem.univ.gda.pl/gitweb/?a=blobdiff_plain;f=source%2Funres%2Fsrc_Eshel%2FSRC-SURPLUS%2Flagrangian_lesyng.F;fp=source%2Funres%2Fsrc_Eshel%2FSRC-SURPLUS%2Flagrangian_lesyng.F;h=8a9163a4f4a4ba48c07ddb315ab270336eb8738b;hb=7308760ff07636ef6b1ee28d8c3a67a23c14b34b;hp=0000000000000000000000000000000000000000;hpb=9a54ab407f6d0d9d564d52763b3e2136450b9ffc;p=unres.git diff --git a/source/unres/src_Eshel/SRC-SURPLUS/lagrangian_lesyng.F b/source/unres/src_Eshel/SRC-SURPLUS/lagrangian_lesyng.F new file mode 100644 index 0000000..8a9163a --- /dev/null +++ b/source/unres/src_Eshel/SRC-SURPLUS/lagrangian_lesyng.F @@ -0,0 +1,726 @@ + subroutine lagrangian +c------------------------------------------------------------------------- +c This subroutine contains the total lagrangain from which the accelerations +c are obtained. For numerical gradient checking, the derivetive of the +c lagrangian in the velocities and coordinates are calculated seperately +c------------------------------------------------------------------------- + implicit real*8 (a-h,o-z) + include 'DIMENSIONS' +#ifdef MPI + include 'mpif.h' +#endif + include 'COMMON.VAR' + include 'COMMON.CHAIN' + include 'COMMON.DERIV' + include 'COMMON.GEO' + include 'COMMON.LOCAL' + include 'COMMON.INTERACT' + include 'COMMON.MD' + include 'COMMON.IOUNITS' + include 'COMMON.CONTROL' + include 'COMMON.MUCA' + include 'COMMON.TIME1' + + integer i,j,ind + double precision zapas(MAXRES6),muca_factor + logical lprn /.false./ + common /cipiszcze/ itime + +#ifdef TIMING + time00=MPI_Wtime() +#endif + do j=1,3 + zapas(j)=-gcart(j,0) + enddo + ind=3 + if (lprn) then + write (iout,*) "Potential forces backbone" + endif + do i=nnt,nct-1 + if (lprn) write (iout,'(i5,3e15.5,5x,3e15.5)') + & i,(-gcart(j,i),j=1,3) + do j=1,3 + ind=ind+1 + zapas(ind)=-gcart(j,i) + enddo + enddo + if (lprn) write (iout,*) "Potential forces sidechain" + do i=nnt,nct + if (itype(i).ne.10) then + if (lprn) write (iout,'(i5,3e15.5,5x,3e15.5)') + & i,(-gcart(j,i),j=1,3) + do j=1,3 + ind=ind+1 + zapas(ind)=-gxcart(j,i) + enddo + endif + enddo + + call ginv_mult(zapas,d_a_work) + + do j=1,3 + d_a(j,0)=d_a_work(j) + enddo + ind=3 + do i=nnt,nct-1 + do j=1,3 + ind=ind+1 + d_a(j,i)=d_a_work(ind) + enddo + enddo + do i=nnt,nct + if (itype(i).ne.10) then + do j=1,3 + ind=ind+1 + d_a(j,i+nres)=d_a_work(ind) + enddo + endif + enddo + + if(lmuca) then + imtime=imtime+1 + if(mucadyn.gt.0) call muca_update(potE) + factor=muca_factor(potE)*t_bath*Rb + +cd print *,'lmuca ',factor,potE + do j=1,3 + d_a(j,0)=d_a(j,0)*factor + enddo + do i=nnt,nct-1 + do j=1,3 + d_a(j,i)=d_a(j,i)*factor + enddo + enddo + do i=nnt,nct + do j=1,3 + d_a(j,i+nres)=d_a(j,i+nres)*factor + enddo + enddo + + endif + + if (lprn) then + write(iout,*) 'acceleration 3D' + write (iout,'(i3,3f10.5,3x,3f10.5)') 0,(d_a(j,0),j=1,3) + do i=nnt,nct-1 + write (iout,'(i3,3f10.5,3x,3f10.5)') i,(d_a(j,i),j=1,3) + enddo + do i=nnt,nct + write (iout,'(i3,3f10.5,3x,3f10.5)') + & i+nres,(d_a(j,i+nres),j=1,3) + enddo + endif +#ifdef TIMING + time_lagrangian=time_lagrangian+MPI_Wtime()-time00 +#endif + return + end +c------------------------------------------------------------------ + subroutine setup_MD_matrices + implicit real*8 (a-h,o-z) + include 'DIMENSIONS' +#ifdef MPI + include 'mpif.h' + integer ierror +#endif + include 'COMMON.SETUP' + include 'COMMON.CONTROL' + include 'COMMON.VAR' + include 'COMMON.CHAIN' + include 'COMMON.DERIV' + include 'COMMON.GEO' + include 'COMMON.LOCAL' + include 'COMMON.INTERACT' + include 'COMMON.MD' +#ifndef LANG0 + include 'COMMON.LANGEVIN' +#else + include 'COMMON.LANGEVIN.lang0' +#endif + include 'COMMON.IOUNITS' + include 'COMMON.TIME1' + integer i,j + logical lprn /.false./ + logical osob + double precision dtdi,massvec(maxres2),Gcopy(maxres2,maxres2), + & Ghalf(mmaxres2),sqreig(maxres2), invsqreig(maxres2), Gcopytmp, + & Gsqrptmp, Gsqrmtmp, Gvec2tmp,Gvectmp(maxres2,maxres2) + double precision work(8*maxres6) + integer iwork(maxres6) + common /przechowalnia/ Gcopy,Ghalf,invsqreig,Gvectmp +c +c Set up the matrix of the (dC,dX)-->(C,X) transformation (A), the +c inertia matrix (Gmat) and the inverse of the inertia matrix (Ginv) +c +c Determine the number of degrees of freedom (dimen) and the number of +c sites (dimen1) + dimen=(nct-nnt+1)+nside + dimen1=(nct-nnt)+(nct-nnt+1) + dimen3=dimen*3 +#ifdef MPI + if (nfgtasks.gt.1) then + time00=MPI_Wtime() + call MPI_Bcast(5,1,MPI_INTEGER,king,FG_COMM,IERROR) + time_Bcast=time_Bcast+MPI_Wtime()-time00 + call int_bounds(dimen,igmult_start,igmult_end) + igmult_start=igmult_start-1 + call MPI_Allgather(3*igmult_start,1,MPI_INTEGER, + & ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR) + my_ng_count=igmult_end-igmult_start + call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1, + & MPI_INTEGER,FG_COMM,IERROR) + write (iout,*) 'Processor:',fg_rank,' CG group',kolor, + & ' absolute rank',myrank,' igmult_start',igmult_start, + & ' igmult_end',igmult_end,' count',my_ng_count + write (iout,*) "ng_start",(ng_start(i),i=0,nfgtasks-1) + write (iout,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1) + call flush(iout) + else +#endif + igmult_start=1 + igmult_end=dimen + my_ng_count=dimen +#ifdef MPI + endif +#endif +c write (iout,*) "dimen",dimen," dimen1",dimen1," dimen3",dimen3 +c Zeroing out A and fricmat + do i=1,dimen + do j=1,dimen + A(i,j)=0.0D0 + enddo + enddo +c Diagonal elements of the dC part of A and the respective friction coefficients + ind=1 + ind1=0 + do i=nnt,nct-1 + ind=ind+1 + ind1=ind1+1 + coeff=0.25d0*IP + massvec(ind1)=mp + Gmat(ind,ind)=coeff + A(ind1,ind)=0.5d0 + enddo + +c Off-diagonal elements of the dC part of A + k=3 + do i=1,nct-nnt + do j=1,i + A(i,j)=1.0d0 + enddo + enddo +c Diagonal elements of the dX part of A and the respective friction coefficients + m=nct-nnt + m1=nct-nnt+1 + ind=0 + ind1=0 + do i=nnt,nct + ind=ind+1 + ii = ind+m + iti=itype(i) + massvec(ii)=msc(iti) + if (iti.ne.10) then + ind1=ind1+1 + ii1= ind1+m1 + A(ii,ii1)=1.0d0 + Gmat(ii1,ii1)=ISC(iti) + endif + enddo +c Off-diagonal elements of the dX part of A + ind=0 + k=nct-nnt + do i=nnt,nct + iti=itype(i) + ind=ind+1 + do j=nnt,i + ii = ind + jj = j-nnt+1 + A(k+ii,jj)=1.0d0 + enddo + enddo + if (lprn) then + write (iout,*) + write (iout,*) "Vector massvec" + do i=1,dimen1 + write (iout,*) i,massvec(i) + enddo + write (iout,'(//a)') "A" + call matout(dimen,dimen1,maxres2,maxres2,A) + endif + +c Calculate the G matrix (store in Gmat) + do k=1,dimen + do i=1,dimen + dtdi=0.0d0 + do j=1,dimen1 + dtdi=dtdi+A(j,k)*A(j,i)*massvec(j) + enddo + Gmat(k,i)=Gmat(k,i)+dtdi + enddo + enddo + + if (lprn) then + write (iout,'(//a)') "Gmat" + call matout(dimen,dimen,maxres2,maxres2,Gmat) + endif + do i=1,dimen + do j=1,dimen + Ginv(i,j)=0.0d0 + Gcopy(i,j)=Gmat(i,j) + enddo + Ginv(i,i)=1.0d0 + enddo +c Invert the G matrix + call MATINVERT(dimen,maxres2,Gcopy,Ginv,osob) + if (lprn) then + write (iout,'(//a)') "Ginv" + call matout(dimen,dimen,maxres2,maxres2,Ginv) + endif +#ifdef MPI + if (nfgtasks.gt.1) then + myginv_ng_count=maxres2*my_ng_count + call MPI_Allgather(maxres2*igmult_start,1,MPI_INTEGER, + & nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR) + call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER, + & nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR) + if (lprn .and. (me.eq.king .or. .not. out1file) ) then + write (iout,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1) + write (iout,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1) + call flush(iout) + endif +c call MPI_Scatterv(ginv(1,1),nginv_counts(0), +c & nginv_start(0),MPI_DOUBLE_PRECISION,ginv, +c & myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR) +c call MPI_Barrier(FG_COMM,IERR) + time00=MPI_Wtime() + call MPI_Scatterv(ginv(1,1),nginv_counts(0), + & nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1), + & myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR) +#ifdef TIMING + time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00 +#endif + do i=1,dimen + do j=1,2*my_ng_count + ginv(j,i)=gcopy(i,j) + enddo + enddo +c write (iout,*) "Master's chunk of ginv" +c call MATOUT2(my_ng_count,dimen,maxres2,maxres2,ginv) + endif +#endif + if (osob) then + write (iout,*) "The G matrix is singular." + stop + endif +c Compute G**(-1/2) and G**(1/2) + ind=0 + do i=1,dimen + do j=1,i + ind=ind+1 + Ghalf(ind)=Gmat(i,j) + enddo + enddo + call gldiag(maxres2,dimen,dimen,Ghalf,work,Geigen,Gvec, + & ierr,iwork) + if (lprn) then + write (iout,'(//a)') + & "Eigenvectors and eigenvalues of the G matrix" + call eigout(dimen,dimen,maxres2,maxres2,Gvec,Geigen) + endif + + do i=1,dimen + sqreig(i)=dsqrt(Geigen(i)) + invsqreig(i)=1.d0/sqreig(i) + enddo + do i=1,dimen + do j=1,dimen + Gvectmp(i,j)=Gvec(j,i) + enddo + enddo + + do i=1,dimen + do j=1,dimen + Gsqrptmp=0.0d0 + Gsqrmtmp=0.0d0 + Gcopytmp=0.0d0 + do k=1,dimen +c Gvec2tmp=Gvec(i,k)*Gvec(j,k) + Gvec2tmp=Gvec(k,i)*Gvec(k,j) + Gsqrptmp=Gsqrptmp+Gvec2tmp*sqreig(k) + Gsqrmtmp=Gsqrmtmp+Gvec2tmp*invsqreig(k) + Gcopytmp=Gcopytmp+Gvec2tmp*Geigen(k) + enddo + Gsqrp(i,j)=Gsqrptmp + Gsqrm(i,j)=Gsqrmtmp + Gcopy(i,j)=Gcopytmp + enddo + enddo + + do i=1,dimen + do j=1,dimen + Gvec(i,j)=Gvectmp(j,i) + enddo + enddo + + if (lprn) then + write (iout,*) "Comparison of original and restored G" + do i=1,dimen + do j=1,dimen + write (iout,'(2i5,5f10.5)') i,j,Gmat(i,j),Gcopy(i,j), + & Gmat(i,j)-Gcopy(i,j),Gsqrp(i,j),Gsqrm(i,j) + enddo + enddo + endif + return + end +c------------------------------------------------------------------------------- + SUBROUTINE EIGOUT(NC,NR,LM2,LM3,A,B) + implicit real*8 (a-h,o-z) + include 'DIMENSIONS' + include 'COMMON.IOUNITS' + double precision A(LM2,LM3),B(LM2) + KA=1 + KC=6 + 1 KB=MIN0(KC,NC) + WRITE(IOUT,600) (I,I=KA,KB) + WRITE(IOUT,601) (B(I),I=KA,KB) + WRITE(IOUT,602) + 2 N=0 + DO 3 I=1,NR + WRITE(IOUT,603) I,(A(I,J),J=KA,KB) + N=N+1 + IF(N.LT.10) GO TO 3 + WRITE(IOUT,602) + N=0 + 3 CONTINUE + 4 IF (KB.EQ.NC) RETURN + KA=KC+1 + KC=KC+6 + GO TO 1 + 600 FORMAT (// 9H ROOT NO.,I4,9I11) + 601 FORMAT (/5X,10(1PE11.4)) + 602 FORMAT (2H ) + 603 FORMAT (I5,10F11.5) + 604 FORMAT (1H1) + END +c------------------------------------------------------------------------------- + SUBROUTINE MATOUT(NC,NR,LM2,LM3,A) + implicit real*8 (a-h,o-z) + include 'DIMENSIONS' + include 'COMMON.IOUNITS' + double precision A(LM2,LM3) + KA=1 + KC=6 + 1 KB=MIN0(KC,NC) + WRITE(IOUT,600) (I,I=KA,KB) + WRITE(IOUT,602) + 2 N=0 + DO 3 I=1,NR + WRITE(IOUT,603) I,(A(I,J),J=KA,KB) + N=N+1 + IF(N.LT.10) GO TO 3 + WRITE(IOUT,602) + N=0 + 3 CONTINUE + 4 IF (KB.EQ.NC) RETURN + KA=KC+1 + KC=KC+6 + GO TO 1 + 600 FORMAT (//5x,9I11) + 602 FORMAT (2H ) + 603 FORMAT (I5,10F11.3) + 604 FORMAT (1H1) + END +c------------------------------------------------------------------------------- + SUBROUTINE MATOUT1(NC,NR,LM2,LM3,A) + implicit real*8 (a-h,o-z) + include 'DIMENSIONS' + include 'COMMON.IOUNITS' + double precision A(LM2,LM3) + KA=1 + KC=21 + 1 KB=MIN0(KC,NC) + WRITE(IOUT,600) (I,I=KA,KB) + WRITE(IOUT,602) + 2 N=0 + DO 3 I=1,NR + WRITE(IOUT,603) I,(A(I,J),J=KA,KB) + N=N+1 + IF(N.LT.3) GO TO 3 + WRITE(IOUT,602) + N=0 + 3 CONTINUE + 4 IF (KB.EQ.NC) RETURN + KA=KC+1 + KC=KC+21 + GO TO 1 + 600 FORMAT (//5x,7(3I5,2x)) + 602 FORMAT (2H ) + 603 FORMAT (I5,7(3F5.1,2x)) + 604 FORMAT (1H1) + END +c------------------------------------------------------------------------------- + SUBROUTINE MATOUT2(NC,NR,LM2,LM3,A) + implicit real*8 (a-h,o-z) + include 'DIMENSIONS' + include 'COMMON.IOUNITS' + double precision A(LM2,LM3) + KA=1 + KC=12 + 1 KB=MIN0(KC,NC) + WRITE(IOUT,600) (I,I=KA,KB) + WRITE(IOUT,602) + 2 N=0 + DO 3 I=1,NR + WRITE(IOUT,603) I,(A(I,J),J=KA,KB) + N=N+1 + IF(N.LT.3) GO TO 3 + WRITE(IOUT,602) + N=0 + 3 CONTINUE + 4 IF (KB.EQ.NC) RETURN + KA=KC+1 + KC=KC+12 + GO TO 1 + 600 FORMAT (//5x,4(3I9,2x)) + 602 FORMAT (2H ) + 603 FORMAT (I5,4(3F9.3,2x)) + 604 FORMAT (1H1) + END +c--------------------------------------------------------------------------- + SUBROUTINE ginv_mult(z,d_a_tmp) + implicit real*8 (a-h,o-z) + include 'DIMENSIONS' +#ifdef MPI + include 'mpif.h' + integer ierr +#endif + include 'COMMON.SETUP' + include 'COMMON.TIME1' + include 'COMMON.MD' + double precision z(dimen3),d_a_tmp(dimen3),temp(maxres6),time00 + &time01 +#ifdef MPI + if (nfgtasks.gt.1) then + if (fg_rank.eq.0) then +c The matching BROADCAST for fg processors is called in ERGASTULUM + time00=MPI_Wtime() + call MPI_Bcast(4,1,MPI_INTEGER,king,FG_COMM,IERROR) + time_Bcast=time_Bcast+MPI_Wtime()-time00 +c print *,"Processor",myrank," BROADCAST iorder in GINV_MULT" + endif +c write (2,*) "time00",time00 +c write (2,*) "Before Scatterv" +c call flush(2) +c write (2,*) "Whole z (for FG master)" +c do i=1,dimen +c write (2,*) i,z(i) +c enddo +c call MPI_Barrier(FG_COMM,IERROR) + time00=MPI_Wtime() + call MPI_Scatterv(z,ng_counts(0),ng_start(0), + & MPI_DOUBLE_PRECISION, + & z,3*my_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR) +c write (2,*) "My chunk of z" +c do i=1,3*my_ng_count +c write (2,*) i,z(i) +c enddo +c write (2,*) "After SCATTERV" +c call flush(2) +c write (2,*) "MPI_Wtime",MPI_Wtime() + time_scatter=time_scatter+MPI_Wtime()-time00 +#ifdef TIMING + time_scatter_ginvmult=time_scatter_ginvmult+MPI_Wtime()-time00 +#endif +c write (2,*) "time_scatter",time_scatter +c write (2,*) "dimen",dimen," dimen3",dimen3," my_ng_count", +c & my_ng_count +c call flush(2) + time01=MPI_Wtime() + do k=0,2 + do i=1,dimen + ind=(i-1)*3+k+1 + temp(ind)=0.0d0 + do j=1,my_ng_count +c write (2,*) "k,i,j,ind",k,i,j,ind,(j-1)*3+k+1, +c & Ginv(i,j),z((j-1)*3+k+1), +c & Ginv(i,j)*z((j-1)*3+k+1) +c temp(ind)=temp(ind)+Ginv(i,j)*z((j-1)*3+k+1) + temp(ind)=temp(ind)+Ginv(j,i)*z((j-1)*3+k+1) + enddo + enddo + enddo + time_ginvmult=time_ginvmult+MPI_Wtime()-time01 +c write (2,*) "Before REDUCE" +c call flush(2) +c write (2,*) "z before reduce" +c do i=1,dimen +c write (2,*) i,temp(i) +c enddo + time00=MPI_Wtime() + call MPI_Reduce(temp(1),d_a_tmp(1),dimen3,MPI_DOUBLE_PRECISION, + & MPI_SUM,king,FG_COMM,IERR) + time_reduce=time_reduce+MPI_Wtime()-time00 +c write (2,*) "After REDUCE" +c call flush(2) + else +#endif +#ifdef TIMING + time01=MPI_Wtime() +#endif + do k=0,2 + do i=1,dimen + ind=(i-1)*3+k+1 + d_a_tmp(ind)=0.0d0 + do j=1,dimen +c write (2,*) "k,i,j,ind",k,i,j,ind,(j-1)*3+k+1 +c call flush(2) +c & Ginv(i,j),z((j-1)*3+k+1), +c & Ginv(i,j)*z((j-1)*3+k+1) + d_a_tmp(ind)=d_a_tmp(ind) + & +Ginv(j,i)*z((j-1)*3+k+1) +c d_a_tmp(ind)=d_a_tmp(ind) +c & +Ginv(i,j)*z((j-1)*3+k+1) + enddo + enddo + enddo +#ifdef TIMING + time_ginvmult=time_ginvmult+MPI_Wtime()-time01 +#endif +#ifdef MPI + endif +#endif + return + end +c--------------------------------------------------------------------------- +#ifdef GINV_MULT + SUBROUTINE ginv_mult_test(z,d_a_tmp) + include 'DIMENSIONS' + integer dimen +c include 'COMMON.MD' + double precision z(dimen),d_a_tmp(dimen) + double precision ztmp(dimen/3),dtmp(dimen/3) + +c do i=1,dimen +c d_a_tmp(i)=0.0d0 +c do j=1,dimen +c d_a_tmp(i)=d_a_tmp(i)+Ginv(i,j)*z(j) +c enddo +c enddo +c +c return + +!ibm* unroll(3) + do k=0,2 + do j=1,dimen/3 + ztmp(j)=z((j-1)*3+k+1) + enddo + + call alignx(16,ztmp(1)) + call alignx(16,dtmp(1)) + call alignx(16,Ginv(1,1)) + + do i=1,dimen/3 + dtmp(i)=0.0d0 + do j=1,dimen/3 + dtmp(i)=dtmp(i)+Ginv(i,j)*ztmp(j) + enddo + enddo + do i=1,dimen/3 + ind=(i-1)*3+k+1 + d_a_tmp(ind)=dtmp(i) + enddo + enddo + return + end +#endif +c--------------------------------------------------------------------------- + SUBROUTINE fricmat_mult(z,d_a_tmp) + include 'DIMENSIONS' +#ifdef MPI + include 'mpif.h' + integer IERROR +#endif + include 'COMMON.MD' + include 'COMMON.IOUNITS' + include 'COMMON.SETUP' + include 'COMMON.TIME1' +#ifndef LANG0 + include 'COMMON.LANGEVIN' +#else + include 'COMMON.LANGEVIN.lang0' +#endif + double precision z(dimen3),d_a_tmp(dimen3),temp(maxres6),time00 + &time01 +#ifdef MPI + if (nfgtasks.gt.1) then + if (fg_rank.eq.0) then +c The matching BROADCAST for fg processors is called in ERGASTULUM + time00=MPI_Wtime() + call MPI_Bcast(9,1,MPI_INTEGER,king,FG_COMM,IERROR) + time_Bcast=time_Bcast+MPI_Wtime()-time00 +c print *,"Processor",myrank," BROADCAST iorder in FRICMAT_MULT" + endif +c call MPI_Barrier(FG_COMM,IERROR) + time00=MPI_Wtime() + call MPI_Scatterv(z,ng_counts(0),ng_start(0), + & MPI_DOUBLE_PRECISION, + & z,3*my_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR) +c write (2,*) "My chunk of z" +c do i=1,3*my_ng_count +c write (2,*) i,z(i) +c enddo + time_scatter=time_scatter+MPI_Wtime()-time00 +#ifdef TIMING + time_scatter_fmatmult=time_scatter_fmatmult+MPI_Wtime()-time00 +#endif + time01=MPI_Wtime() + do k=0,2 + do i=1,dimen + ind=(i-1)*3+k+1 + temp(ind)=0.0d0 + do j=1,my_ng_count + temp(ind)=temp(ind)-fricmat(j,i)*z((j-1)*3+k+1) + enddo + enddo + enddo + time_fricmatmult=time_fricmatmult+MPI_Wtime()-time01 +c write (2,*) "Before REDUCE" +c write (2,*) "d_a_tmp before reduce" +c do i=1,dimen3 +c write (2,*) i,temp(i) +c enddo +c call flush(2) + time00=MPI_Wtime() + call MPI_Reduce(temp(1),d_a_tmp(1),dimen3,MPI_DOUBLE_PRECISION, + & MPI_SUM,king,FG_COMM,IERR) + time_reduce=time_reduce+MPI_Wtime()-time00 +c write (2,*) "After REDUCE" +c call flush(2) + else +#endif +#ifdef TIMING + time01=MPI_Wtime() +#endif + do k=0,2 + do i=1,dimen + ind=(i-1)*3+k+1 + d_a_tmp(ind)=0.0d0 + do j=1,dimen + d_a_tmp(ind)=d_a_tmp(ind) + & -fricmat(j,i)*z((j-1)*3+k+1) + enddo + enddo + enddo +#ifdef TIMING + time_fricmatmult=time_fricmatmult+MPI_Wtime()-time01 +#endif +#ifdef MPI + endif +#endif +c write (iout,*) "Vector d_a" +c do i=1,dimen3 +c write (2,*) i,d_a_tmp(i) +c enddo + return + end