2 !-----------------------------------------------------------------------------
9 !-----------------------------------------------------------------------------
12 !-----------------------------------------------------------------------------
14 !-----------------------------------------------------------------------------
16 !-----------------------------------------------------------------------------
18 !-------------------------------------------------------------------------
19 ! This subroutine contains the total lagrangain from which the accelerations
20 ! are obtained. For numerical gradient checking, the derivetive of the
21 ! lagrangian in the velocities and coordinates are calculated seperately
22 !-------------------------------------------------------------------------
23 ! implicit real*8 (a-h,o-z)
24 ! include 'DIMENSIONS'
27 use geometry_data, only: nres
28 use control_data !el, only: mucadyn,lmuca
31 real(kind=8) :: time00
33 ! include 'COMMON.VAR'
34 ! include 'COMMON.CHAIN'
35 ! include 'COMMON.DERIV'
36 ! include 'COMMON.GEO'
37 ! include 'COMMON.LOCAL'
38 ! include 'COMMON.INTERACT'
40 ! include 'COMMON.IOUNITS'
41 ! include 'COMMON.CONTROL'
42 ! include 'COMMON.MUCA'
43 ! include 'COMMON.TIME1'
45 integer :: i,j,ind,itime
46 real(kind=8) :: zapas(6*nres) !,muca_factor !maxres6=6*maxres
47 logical :: lprn = .false.
48 !el common /cipiszcze/ itime
59 write (iout,*) "Potential forces backbone"
62 if (lprn) write (iout,'(i5,3e15.5,5x,3e15.5)') &
66 zapas(ind)=-gcart(j,i)
69 if (lprn) write (iout,*) "Potential forces sidechain"
71 if (itype(i).ne.10 .and. itype(i).ne.ntyp1) then
72 if (lprn) write (iout,'(i5,3e15.5,5x,3e15.5)') &
76 zapas(ind)=-gxcart(j,i)
81 call ginv_mult(zapas,d_a_work)
90 d_a(j,i)=d_a_work(ind)
94 if (itype(i).ne.10 .and. itype(i).ne.ntyp1) then
97 d_a(j,i+nres)=d_a_work(ind)
104 if(mucadyn.gt.0) call muca_update(potE)
105 factor=muca_factor(potE)*t_bath*Rb
107 !d print *,'lmuca ',factor,potE
109 d_a(j,0)=d_a(j,0)*factor
113 d_a(j,i)=d_a(j,i)*factor
118 d_a(j,i+nres)=d_a(j,i+nres)*factor
125 write(iout,*) 'acceleration 3D'
126 write (iout,'(i3,3f10.5,3x,3f10.5)') 0,(d_a(j,0),j=1,3)
128 write (iout,'(i3,3f10.5,3x,3f10.5)') i,(d_a(j,i),j=1,3)
131 write (iout,'(i3,3f10.5,3x,3f10.5)') &
132 i+nres,(d_a(j,i+nres),j=1,3)
136 time_lagrangian=time_lagrangian+MPI_Wtime()-time00
139 end subroutine lagrangian
140 !-----------------------------------------------------------------------------
141 subroutine setup_MD_matrices
143 use geometry_data, only: nres,nside
147 use geometry, only:int_bounds
149 ! implicit real*8 (a-h,o-z)
150 ! include 'DIMENSIONS'
154 real(kind=8) :: time00
156 ! include 'COMMON.SETUP'
157 ! include 'COMMON.VAR'
158 ! include 'COMMON.CHAIN'
159 ! include 'COMMON.DERIV'
160 ! include 'COMMON.GEO'
161 ! include 'COMMON.LOCAL'
162 ! include 'COMMON.INTERACT'
163 ! include 'COMMON.MD'
165 ! include 'COMMON.LANGEVIN'
167 ! include 'COMMON.LANGEVIN.lang0'
169 ! include 'COMMON.IOUNITS'
170 ! include 'COMMON.TIME1'
171 logical :: lprn = .false.
174 real(kind=8),dimension(2*nres) :: massvec,sqreig !(maxres2) maxres2=2*maxres
175 !el real(kind=8),dimension(:),allocatable :: Ghalf
176 !el real(kind=8),dimension(2*nres*(2*nres+1)/2) :: Ghalf !(mmaxres2) (mmaxres2=(maxres2*(maxres2+1)/2))
177 !el real(kind=8),dimension(2*nres,2*nres) :: Gcopy !(maxres2,maxres2)
178 !el real(kind=8),dimension(:,:),allocatable :: Gcopy
179 real(kind=8),dimension(8*6*nres) :: work !(8*maxres6)
180 integer,dimension(6*nres) :: iwork !(maxres6) maxres6=6*maxres
181 !el common /przechowalnia/ Gcopy,Ghalf
182 real(kind=8) :: coeff
183 integer :: i,j,ind,ind1,k,ii,jj,m,m1,ii1,iti,nres2,ierr
186 if(.not.allocated(Gcopy)) allocate(Gcopy(nres2,nres2)) !(maxres2,maxres2)
187 if(.not.allocated(Ghalf)) allocate(Ghalf(nres2*(nres2+1)/2)) !mmaxres2=(maxres2*(maxres+1)/2)
189 ! Set up the matrix of the (dC,dX)-->(C,X) transformation (A), the
190 ! inertia matrix (Gmat) and the inverse of the inertia matrix (Ginv)
192 ! Determine the number of degrees of freedom (dimen) and the number of
194 dimen=(nct-nnt+1)+nside
195 dimen1=(nct-nnt)+(nct-nnt+1)
198 if (nfgtasks.gt.1) then
200 call MPI_Bcast(5,1,MPI_INTEGER,king,FG_COMM,IERROR)
201 time_Bcast=time_Bcast+MPI_Wtime()-time00
202 call int_bounds(dimen,igmult_start,igmult_end)
203 igmult_start=igmult_start-1
204 call MPI_Allgather(3*igmult_start,1,MPI_INTEGER,&
205 ng_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
206 my_ng_count=igmult_end-igmult_start
207 call MPI_Allgather(3*my_ng_count,1,MPI_INTEGER,ng_counts(0),1,&
208 MPI_INTEGER,FG_COMM,IERROR)
209 write (iout,*) 'Processor:',fg_rank,' CG group',kolor,&
210 ' absolute rank',myrank,' igmult_start',igmult_start,&
211 ' igmult_end',igmult_end,' count',my_ng_count
212 write (iout,*) "ng_start",(ng_start(i),i=0,nfgtasks-1)
213 write (iout,*) "ng_counts",(ng_counts(i),i=0,nfgtasks-1)
223 ! write (iout,*) "dimen",dimen," dimen1",dimen1," dimen3",dimen3
224 ! Zeroing out A and fricmat
230 ! Diagonal elements of the dC part of A and the respective friction coefficients
242 ! Off-diagonal elements of the dC part of A
249 ! Diagonal elements of the dX part of A and the respective friction coefficients
259 massvec(ii)=msc(iabs(iti))
260 if (iti.ne.10 .and. iti.ne.ntyp1) then
264 Gmat(ii1,ii1)=ISC(iabs(iti))
267 ! Off-diagonal elements of the dX part of A
281 write (iout,*) "Vector massvec"
283 write (iout,*) i,massvec(i)
285 write (iout,'(//a)') "A"
286 call matout(dimen,dimen1,nres2,nres2,A)
289 ! Calculate the G matrix (store in Gmat)
294 dtdi=dtdi+A(j,k)*A(j,i)*massvec(j)
296 Gmat(k,i)=Gmat(k,i)+dtdi
301 write (iout,'(//a)') "Gmat"
302 call matout(dimen,dimen,nres2,nres2,Gmat)
311 ! Invert the G matrix
312 call MATINVERT(dimen,nres2,Gcopy,Ginv,osob)
314 write (iout,'(//a)') "Ginv"
315 call matout(dimen,dimen,nres2,nres2,Ginv)
318 if (nfgtasks.gt.1) then
319 myginv_ng_count=nres2*my_ng_count
320 call MPI_Allgather(nres2*igmult_start,1,MPI_INTEGER,&
321 nginv_start(0),1,MPI_INTEGER,FG_COMM,IERROR)
322 call MPI_Allgather(myginv_ng_count,1,MPI_INTEGER,&
323 nginv_counts(0),1,MPI_INTEGER,FG_COMM,IERROR)
324 write (iout,*) "nginv_start",(nginv_start(i),i=0,nfgtasks-1)
325 write (iout,*) "nginv_counts",(nginv_counts(i),i=0,nfgtasks-1)
327 ! call MPI_Scatterv(ginv(1,1),nginv_counts(0),
328 ! & nginv_start(0),MPI_DOUBLE_PRECISION,ginv,
329 ! & myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
330 ! call MPI_Barrier(FG_COMM,IERR)
332 call MPI_Scatterv(ginv(1,1),nginv_counts(0),&
333 nginv_start(0),MPI_DOUBLE_PRECISION,gcopy(1,1),&
334 myginv_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
336 time_scatter_ginv=time_scatter_ginv+MPI_Wtime()-time00
343 ! write (iout,*) "Master's chunk of ginv"
344 ! call MATOUT2(my_ng_count,dimen,maxres2,maxres2,ginv)
348 write (iout,*) "The G matrix is singular."
351 ! Compute G**(-1/2) and G**(1/2)
359 call gldiag(nres2,dimen,dimen,Ghalf,work,Geigen,Gvec,&
362 write (iout,'(//a)') &
363 "Eigenvectors and eigenvalues of the G matrix"
364 call eigout(dimen,dimen,nres2,nres2,Gvec,Geigen)
367 sqreig(i)=dsqrt(Geigen(i))
375 Gsqrp(i,j)=Gsqrp(i,j)+Gvec(i,k)*Gvec(j,k)*sqreig(k)
376 Gsqrm(i,j)=Gsqrm(i,j)+Gvec(i,k)*Gvec(j,k)/sqreig(k)
377 Gcopy(i,j)=Gcopy(i,j)+Gvec(i,k)*Gvec(j,k)*Geigen(k)
382 write (iout,*) "Comparison of original and restored G"
385 write (iout,'(2i5,5f10.5)') i,j,Gmat(i,j),Gcopy(i,j),&
386 Gmat(i,j)-Gcopy(i,j),Gsqrp(i,j),Gsqrm(i,j)
392 end subroutine setup_MD_matrices
393 !-----------------------------------------------------------------------------
394 subroutine EIGOUT(NC,NR,LM2,LM3,A,B)
396 ! implicit real*8 (a-h,o-z)
397 ! include 'DIMENSIONS'
398 ! include 'COMMON.IOUNITS'
399 integer :: LM2,LM3,NC,NR,KA,KC,KB,I,J,N
400 real(kind=8) :: A(LM2,LM3),B(LM2)
404 WRITE(IOUT,600) (I,I=KA,KB)
405 WRITE(IOUT,601) (B(I),I=KA,KB)
409 WRITE(IOUT,603) I,(A(I,J),J=KA,KB)
415 4 IF (KB.EQ.NC) RETURN
419 600 FORMAT (// 9H ROOT NO.,I4,9I11)
420 601 FORMAT (/5X,10(1PE11.4))
422 603 FORMAT (I5,10F11.5)
424 end subroutine EIGOUT
425 !-----------------------------------------------------------------------------
426 subroutine MATOUT(NC,NR,LM2,LM3,A)
428 ! implicit real*8 (a-h,o-z)
429 ! include 'DIMENSIONS'
430 ! include 'COMMON.IOUNITS'
431 integer :: LM2,LM3,NC,NR,KA,KC,KB,I,J,N
432 real(kind=8) :: A(LM2,LM3)
436 WRITE(IOUT,600) (I,I=KA,KB)
440 WRITE(IOUT,603) I,(A(I,J),J=KA,KB)
446 4 IF (KB.EQ.NC) RETURN
450 600 FORMAT (//5x,9I11)
452 603 FORMAT (I5,10F11.3)
454 end subroutine MATOUT
455 !-----------------------------------------------------------------------------
456 subroutine MATOUT1(NC,NR,LM2,LM3,A)
458 ! implicit real*8 (a-h,o-z)
459 ! include 'DIMENSIONS'
460 ! include 'COMMON.IOUNITS'
461 integer :: LM2,LM3,NC,NR,KA,KC,KB,I,J,N
462 real(kind=8) :: A(LM2,LM3)
466 WRITE(IOUT,600) (I,I=KA,KB)
470 WRITE(IOUT,603) I,(A(I,J),J=KA,KB)
476 4 IF (KB.EQ.NC) RETURN
480 600 FORMAT (//5x,7(3I5,2x))
482 603 FORMAT (I5,7(3F5.1,2x))
484 end subroutine MATOUT1
485 !-----------------------------------------------------------------------------
486 subroutine MATOUT2(NC,NR,LM2,LM3,A)
488 ! implicit real*8 (a-h,o-z)
489 ! include 'DIMENSIONS'
490 ! include 'COMMON.IOUNITS'
491 integer :: I,J,KA,KC,KB,N
492 integer :: LM2,LM3,NC,NR
493 real(kind=8) :: A(LM2,LM3)
497 WRITE(IOUT,600) (I,I=KA,KB)
501 WRITE(IOUT,603) I,(A(I,J),J=KA,KB)
507 4 IF (KB.EQ.NC) RETURN
511 600 FORMAT (//5x,4(3I9,2x))
513 603 FORMAT (I5,4(3F9.3,2x))
515 end subroutine MATOUT2
516 !-----------------------------------------------------------------------------
517 subroutine ginv_mult(z,d_a_tmp)
519 use geometry_data, only: nres
522 ! implicit real*8 (a-h,o-z)
523 ! include 'DIMENSIONS'
526 integer :: ierr,ierror
528 ! include 'COMMON.SETUP'
529 ! include 'COMMON.TIME1'
530 ! include 'COMMON.MD'
531 real(kind=8),dimension(dimen3) :: z,z1,d_a_tmp
532 real(kind=8),dimension(6*nres) :: temp !(maxres6) maxres6=6*maxres
533 real(kind=8) :: time00,time01
536 if (nfgtasks.gt.1) then
537 if (fg_rank.eq.0) then
538 ! The matching BROADCAST for fg processors is called in ERGASTULUM
540 call MPI_Bcast(4,1,MPI_INTEGER,king,FG_COMM,IERROR)
541 time_Bcast=time_Bcast+MPI_Wtime()-time00
542 ! print *,"Processor",myrank," BROADCAST iorder in GINV_MULT"
544 ! write (2,*) "time00",time00
545 ! write (2,*) "Before Scatterv"
547 ! write (2,*) "Whole z (for FG master)"
551 ! call MPI_Barrier(FG_COMM,IERROR)
553 !elwrite(iout,*) "do tej pory jest OK, MPI_Scatterv w ginv_mult"
554 call MPI_Scatterv(z,ng_counts(0),ng_start(0),&
555 MPI_DOUBLE_PRECISION,&
556 z1,3*my_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
557 ! write (2,*) "My chunk of z"
558 ! do i=1,3*my_ng_count
561 ! write (2,*) "After SCATTERV"
563 ! write (2,*) "MPI_Wtime",MPI_Wtime()
564 time_scatter=time_scatter+MPI_Wtime()-time00
566 time_scatter_ginvmult=time_scatter_ginvmult+MPI_Wtime()-time00
568 ! write (2,*) "time_scatter",time_scatter
569 ! write (2,*) "dimen",dimen," dimen3",dimen3," my_ng_count",
578 ! write (2,*) "k,i,j,ind",k,i,j,ind,(j-1)*3+k+1,
579 ! & Ginv(i,j),z((j-1)*3+k+1),
580 ! & Ginv(i,j)*z((j-1)*3+k+1)
581 ! temp(ind)=temp(ind)+Ginv(i,j)*z((j-1)*3+k+1)
582 temp(ind)=temp(ind)+Ginv(j,i)*z1((j-1)*3+k+1)
586 time_ginvmult=time_ginvmult+MPI_Wtime()-time01
587 ! write (2,*) "Before REDUCE"
589 ! write (2,*) "z before reduce"
591 ! write (2,*) i,temp(i)
594 call MPI_Reduce(temp(1),d_a_tmp(1),dimen3,MPI_DOUBLE_PRECISION,&
595 MPI_SUM,king,FG_COMM,IERR)
596 time_reduce=time_reduce+MPI_Wtime()-time00
597 ! write (2,*) "After REDUCE"
609 ! write (2,*) "k,i,j,ind",k,i,j,ind,(j-1)*3+k+1
611 ! & Ginv(i,j),z((j-1)*3+k+1),
612 ! & Ginv(i,j)*z((j-1)*3+k+1)
613 d_a_tmp(ind)=d_a_tmp(ind) &
614 +Ginv(j,i)*z((j-1)*3+k+1)
615 ! d_a_tmp(ind)=d_a_tmp(ind)
616 ! & +Ginv(i,j)*z((j-1)*3+k+1)
621 time_ginvmult=time_ginvmult+MPI_Wtime()-time01
627 end subroutine ginv_mult
628 !-----------------------------------------------------------------------------
630 subroutine ginv_mult_test(z,d_a_tmp)
632 ! include 'DIMENSIONS'
634 ! include 'COMMON.MD'
635 real(kind=8),dimension(dimen) :: z,d_a_tmp
636 real(kind=8),dimension(dimen/3) :: ztmp,dtmp
641 ! d_a_tmp(i)=d_a_tmp(i)+Ginv(i,j)*z(j)
650 ztmp(j)=z((j-1)*3+k+1)
653 call alignx(16,ztmp(1))
654 call alignx(16,dtmp(1))
655 call alignx(16,Ginv(1,1))
660 dtmp(i)=dtmp(i)+Ginv(i,j)*ztmp(j)
669 end subroutine ginv_mult_test
671 !-----------------------------------------------------------------------------
672 subroutine fricmat_mult(z,d_a_tmp)
674 use geometry_data, only: nres
677 ! include 'DIMENSIONS'
680 integer :: IERROR,ierr
682 ! include 'COMMON.MD'
683 ! include 'COMMON.IOUNITS'
684 ! include 'COMMON.SETUP'
685 ! include 'COMMON.TIME1'
687 ! include 'COMMON.LANGEVIN'
689 ! include 'COMMON.LANGEVIN.lang0'
691 real(kind=8),dimension(dimen3) :: z,z1,d_a_tmp
692 real(kind=8),dimension(6*nres) :: temp !(maxres6) maxres6=6*maxres
693 real(kind=8) :: time00,time01
694 integer :: i,j,k,ind,nres2
696 !el if(.not.allocated(fricmat)) allocate(fricmat(nres2,nres2))
699 if (nfgtasks.gt.1) then
700 if (fg_rank.eq.0) then
701 ! The matching BROADCAST for fg processors is called in ERGASTULUM
703 call MPI_Bcast(9,1,MPI_INTEGER,king,FG_COMM,IERROR)
704 time_Bcast=time_Bcast+MPI_Wtime()-time00
705 ! print *,"Processor",myrank," BROADCAST iorder in FRICMAT_MULT"
707 ! call MPI_Barrier(FG_COMM,IERROR)
709 call MPI_Scatterv(z,ng_counts(0),ng_start(0),&
710 MPI_DOUBLE_PRECISION,&
711 z1,3*my_ng_count,MPI_DOUBLE_PRECISION,king,FG_COMM,IERR)
712 ! write (2,*) "My chunk of z"
713 ! do i=1,3*my_ng_count
716 time_scatter=time_scatter+MPI_Wtime()-time00
718 time_scatter_fmatmult=time_scatter_fmatmult+MPI_Wtime()-time00
726 temp(ind)=temp(ind)-fricmat(j,i)*z1((j-1)*3+k+1)
730 time_fricmatmult=time_fricmatmult+MPI_Wtime()-time01
731 ! write (2,*) "Before REDUCE"
732 ! write (2,*) "d_a_tmp before reduce"
734 ! write (2,*) i,temp(i)
738 call MPI_Reduce(temp(1),d_a_tmp(1),dimen3,MPI_DOUBLE_PRECISION,&
739 MPI_SUM,king,FG_COMM,IERR)
740 time_reduce=time_reduce+MPI_Wtime()-time00
741 ! write (2,*) "After REDUCE"
753 d_a_tmp(ind)=d_a_tmp(ind) &
754 -fricmat(j,i)*z((j-1)*3+k+1)
759 time_fricmatmult=time_fricmatmult+MPI_Wtime()-time01
764 ! write (iout,*) "Vector d_a"
766 ! write (2,*) i,d_a_tmp(i)
769 end subroutine fricmat_mult
770 !-----------------------------------------------------------------------------
771 !-----------------------------------------------------------------------------