10
0
mirror of https://github.com/QuantumPackage/qp2.git synced 2024-11-15 02:23:51 +01:00

Improved AosxAos representations of 1e-Jastrow

This commit is contained in:
AbdAmmar 2024-01-24 19:25:17 +01:00
parent bb8dd171b8
commit 9b2ba694d9
3 changed files with 149 additions and 144 deletions

View File

@ -65,5 +65,8 @@ are defined by the tables `j1e_coef` and `j1e_expo`, respectively.
<img src="https://latex.codecogs.com/png.image?%5Cinline%20%5Clarge%20%5Cdpi%7B200%7D%5Cbg%7Bwhite%7Du_%7B1e%7D(%5Cmathbf%7Br%7D_1)=-%5Cfrac%7BN-1%7D%7B2N%7D%5C,%5Csum_%7B%5Csigma%7D%5C,%5Cint%20d%5Cmathbf%7Br%7D_2%5C,%5Crho%5E%7B%5Csigma%7D(%5Cmathbf%7Br%7D_2)%5C,u_%7B2e%7D(%5Cmathbf%7Br%7D_1,%5Cmathbf%7Br%7D_2)">
</p>
- if `j1e_type` is **Charge_Harmonizer_AO**: The one-electron Jastrow factor **Charge_Harmonizer** is fitted by the atomic orbitals
- if `j1e_type` is **Charge_Harmonizer_AO**: The one-electron Jastrow factor **Charge_Harmonizer** is fitted by the product of atomic orbitals:
<p align="center">
<img src="https://latex.codecogs.com/png.image?\inline&space;\large&space;\dpi{300}\bg{white}&space;u_{1e}(\mathbf{r})=\sum_{\alpha,\beta}C_{\alpha,\beta}\chi_{\alpha}(\mathbf{r})\chi_{\beta}(\mathbf{r})">
</p>

View File

@ -231,96 +231,22 @@ END_PROVIDER
! !$OMP END PARALLEL
!
! deallocate(coef_fit)
!
! elseif(j1e_type .eq. "Charge_Harmonizer_AO2") then
!
! ! \grad_1 \sum_{\eta,\beta} C_{\eta,\beta} \chi_{\eta} \chi_{\beta}
! ! where
! ! \chi_{\eta} are the AOs
! ! C_{\eta,\beta} are fitted to mimic (j1e_type .eq. "Charge_Harmonizer")
! !
! ! The - sign is in the parameters C_{\eta,\beta}
!
! PROVIDE aos_grad_in_r_array
!
! allocate(coef_fit2(ao_num*ao_num))
!
! if(mpi_master) then
! call ezfio_has_jastrow_j1e_coef_ao2(exists)
! endif
! IRP_IF MPI_DEBUG
! print *, irp_here, mpi_rank
! call MPI_BARRIER(MPI_COMM_WORLD, ierr)
! IRP_ENDIF
! IRP_IF MPI
! call MPI_BCAST(coef_fit2, ao_num*ao_num, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
! if (ierr /= MPI_SUCCESS) then
! stop 'Unable to read j1e_coef_ao2 with MPI'
! endif
! IRP_ENDIF
! if(exists) then
! if(mpi_master) then
! write(6,'(A)') '.. >>>>> [ IO READ: j1e_coef_ao2 ] <<<<< ..'
! call ezfio_get_jastrow_j1e_coef_ao2(coef_fit2)
! IRP_IF MPI
! call MPI_BCAST(coef_fit2, ao_num*ao_num, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
! if (ierr /= MPI_SUCCESS) then
! stop 'Unable to read j1e_coef_ao2 with MPI'
! endif
! IRP_ENDIF
! endif
! else
!
! call get_j1e_coef_fit_ao2(ao_num*ao_num, coef_fit2)
! call ezfio_set_jastrow_j1e_coef_ao2(coef_fit2)
!
! endif
!
! !$OMP PARALLEL &
! !$OMP DEFAULT (NONE) &
! !$OMP PRIVATE (i, j, ij, ipoint, c) &
! !$OMP SHARED (n_points_final_grid, ao_num, &
! !$OMP aos_grad_in_r_array, coef_fit2, &
! !$OMP aos_in_r_array, j1e_gradx, j1e_grady, j1e_gradz)
! !$OMP DO SCHEDULE (static)
! do ipoint = 1, n_points_final_grid
!
! j1e_gradx(ipoint) = 0.d0
! j1e_grady(ipoint) = 0.d0
! j1e_gradz(ipoint) = 0.d0
!
! do i = 1, ao_num
! do j = 1, ao_num
! ij = (i-1)*ao_num + j
!
! c = coef_fit2(ij)
!
! j1e_gradx(ipoint) += c * (aos_in_r_array(i,ipoint) * aos_grad_in_r_array(j,ipoint,1) + aos_grad_in_r_array(i,ipoint,1) * aos_in_r_array(j,ipoint))
! j1e_grady(ipoint) += c * (aos_in_r_array(i,ipoint) * aos_grad_in_r_array(j,ipoint,2) + aos_grad_in_r_array(i,ipoint,2) * aos_in_r_array(j,ipoint))
! j1e_gradz(ipoint) += c * (aos_in_r_array(i,ipoint) * aos_grad_in_r_array(j,ipoint,3) + aos_grad_in_r_array(i,ipoint,3) * aos_in_r_array(j,ipoint))
! enddo
! enddo
! enddo
! !$OMP END DO
! !$OMP END PARALLEL
!
! deallocate(coef_fit2)
elseif(j1e_type .eq. "Charge_Harmonizer_AO") then
! \sum_{\eta} \vec{C}_{\eta} \chi_{\eta}
! \grad_1 \sum_{\eta,\beta} C_{\eta,\beta} \chi_{\eta} \chi_{\beta}
! where
! \chi_{\eta} are the AOs
! \vec{C}_{\eta} are fitted to mimic (j1e_type .eq. "Charge_Harmonizer")
! C_{\eta,\beta} are fitted to mimic (j1e_type .eq. "Charge_Harmonizer")
!
! The - sign is in the parameters \vec{C}_{\eta}
! The - sign is in the parameters C_{\eta,\beta}
PROVIDE aos_grad_in_r_array
allocate(coef_fit3(ao_num,3))
allocate(coef_fit2(ao_num*ao_num))
if(mpi_master) then
call ezfio_has_jastrow_j1e_coef_ao3(exists)
call ezfio_has_jastrow_j1e_coef_ao2(exists)
endif
IRP_IF MPI_DEBUG
print *, irp_here, mpi_rank
@ -328,34 +254,34 @@ END_PROVIDER
IRP_ENDIF
IRP_IF MPI
include 'mpif.h'
call MPI_BCAST(coef_fit3, (ao_num*3), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(coef_fit2, ao_num*ao_num, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
if (ierr /= MPI_SUCCESS) then
stop 'Unable to read j1e_coef_ao3 with MPI'
stop 'Unable to read j1e_coef_ao2 with MPI'
endif
IRP_ENDIF
if(exists) then
if(mpi_master) then
write(6,'(A)') '.. >>>>> [ IO READ: j1e_coef_ao3 ] <<<<< ..'
call ezfio_get_jastrow_j1e_coef_ao3(coef_fit3)
write(6,'(A)') '.. >>>>> [ IO READ: j1e_coef_ao2 ] <<<<< ..'
call ezfio_get_jastrow_j1e_coef_ao2(coef_fit2)
IRP_IF MPI
call MPI_BCAST(coef_fit3, (ao_num*3), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(coef_fit2, ao_num*ao_num, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
if (ierr /= MPI_SUCCESS) then
stop 'Unable to read j1e_coef_ao3 with MPI'
stop 'Unable to read j1e_coef_ao2 with MPI'
endif
IRP_ENDIF
endif
else
call get_j1e_coef_fit_ao3(ao_num, coef_fit3)
call ezfio_set_jastrow_j1e_coef_ao3(coef_fit3)
call get_j1e_coef_fit_ao2(ao_num*ao_num, coef_fit2)
call ezfio_set_jastrow_j1e_coef_ao2(coef_fit2)
endif
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i, ipoint, cx, cy, cz) &
!$OMP PRIVATE (i, j, ij, ipoint, c) &
!$OMP SHARED (n_points_final_grid, ao_num, &
!$OMP aos_grad_in_r_array, coef_fit3, &
!$OMP aos_grad_in_r_array, coef_fit2, &
!$OMP aos_in_r_array, j1e_gradx, j1e_grady, j1e_gradz)
!$OMP DO SCHEDULE (static)
do ipoint = 1, n_points_final_grid
@ -363,20 +289,95 @@ END_PROVIDER
j1e_gradx(ipoint) = 0.d0
j1e_grady(ipoint) = 0.d0
j1e_gradz(ipoint) = 0.d0
do i = 1, ao_num
cx = coef_fit3(i,1)
cy = coef_fit3(i,2)
cz = coef_fit3(i,3)
j1e_gradx(ipoint) += cx * aos_in_r_array(i,ipoint)
j1e_grady(ipoint) += cy * aos_in_r_array(i,ipoint)
j1e_gradz(ipoint) += cz * aos_in_r_array(i,ipoint)
do i = 1, ao_num
do j = 1, ao_num
ij = (i-1)*ao_num + j
c = coef_fit2(ij)
j1e_gradx(ipoint) += c * (aos_in_r_array(i,ipoint) * aos_grad_in_r_array(j,ipoint,1) + aos_grad_in_r_array(i,ipoint,1) * aos_in_r_array(j,ipoint))
j1e_grady(ipoint) += c * (aos_in_r_array(i,ipoint) * aos_grad_in_r_array(j,ipoint,2) + aos_grad_in_r_array(i,ipoint,2) * aos_in_r_array(j,ipoint))
j1e_gradz(ipoint) += c * (aos_in_r_array(i,ipoint) * aos_grad_in_r_array(j,ipoint,3) + aos_grad_in_r_array(i,ipoint,3) * aos_in_r_array(j,ipoint))
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
deallocate(coef_fit3)
deallocate(coef_fit2)
! elseif(j1e_type .eq. "Charge_Harmonizer_AO3") then
!
! ! \sum_{\eta} \vec{C}_{\eta} \chi_{\eta}
! ! where
! ! \chi_{\eta} are the AOs
! ! \vec{C}_{\eta} are fitted to mimic (j1e_type .eq. "Charge_Harmonizer")
! !
! ! The - sign is in the parameters \vec{C}_{\eta}
!
! PROVIDE aos_grad_in_r_array
!
! allocate(coef_fit3(ao_num,3))
!
! if(mpi_master) then
! call ezfio_has_jastrow_j1e_coef_ao3(exists)
! endif
! IRP_IF MPI_DEBUG
! print *, irp_here, mpi_rank
! call MPI_BARRIER(MPI_COMM_WORLD, ierr)
! IRP_ENDIF
! IRP_IF MPI
! !include 'mpif.h'
! call MPI_BCAST(coef_fit3, (ao_num*3), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
! if (ierr /= MPI_SUCCESS) then
! stop 'Unable to read j1e_coef_ao3 with MPI'
! endif
! IRP_ENDIF
! if(exists) then
! if(mpi_master) then
! write(6,'(A)') '.. >>>>> [ IO READ: j1e_coef_ao3 ] <<<<< ..'
! call ezfio_get_jastrow_j1e_coef_ao3(coef_fit3)
! IRP_IF MPI
! call MPI_BCAST(coef_fit3, (ao_num*3), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
! if (ierr /= MPI_SUCCESS) then
! stop 'Unable to read j1e_coef_ao3 with MPI'
! endif
! IRP_ENDIF
! endif
! else
!
! call get_j1e_coef_fit_ao3(ao_num, coef_fit3)
! call ezfio_set_jastrow_j1e_coef_ao3(coef_fit3)
!
! endif
!
! !$OMP PARALLEL &
! !$OMP DEFAULT (NONE) &
! !$OMP PRIVATE (i, ipoint, cx, cy, cz) &
! !$OMP SHARED (n_points_final_grid, ao_num, &
! !$OMP aos_grad_in_r_array, coef_fit3, &
! !$OMP aos_in_r_array, j1e_gradx, j1e_grady, j1e_gradz)
! !$OMP DO SCHEDULE (static)
! do ipoint = 1, n_points_final_grid
!
! j1e_gradx(ipoint) = 0.d0
! j1e_grady(ipoint) = 0.d0
! j1e_gradz(ipoint) = 0.d0
! do i = 1, ao_num
! cx = coef_fit3(i,1)
! cy = coef_fit3(i,2)
! cz = coef_fit3(i,3)
!
! j1e_gradx(ipoint) += cx * aos_in_r_array(i,ipoint)
! j1e_grady(ipoint) += cy * aos_in_r_array(i,ipoint)
! j1e_gradz(ipoint) += cz * aos_in_r_array(i,ipoint)
! enddo
! enddo
! !$OMP END DO
! !$OMP END PARALLEL
!
! deallocate(coef_fit3)
else

View File

@ -80,24 +80,33 @@ subroutine get_j1e_coef_fit_ao(dim_fit, coef_fit)
allocate(A_inv(ao_num,ao_num))
call get_inverse(A, ao_num, ao_num, A_inv, ao_num)
deallocate(A)
! coef_fit = A_inv x b
call dgemv("N", ao_num, ao_num, 1.d0, A_inv, ao_num, b, 1, 0.d0, coef_fit, 1)
!integer :: j, k
!double precision :: tmp
!print *, ' check A_inv'
!do i = 1, ao_num
! tmp = 0.d0
! do j = 1, ao_num
! tmp += ao_overlap(i,j) * coef_fit(j)
! enddo
! tmp = tmp - b(i)
! print*, i, tmp
!enddo
integer :: j
double precision :: tmp, acc, nrm
deallocate(A_inv, b)
acc = 0.d0
nrm = 0.d0
print *, ' check A_inv'
do i = 1, ao_num
tmp = 0.d0
do j = 1, ao_num
tmp += ao_overlap(i,j) * coef_fit(j)
enddo
tmp = tmp - b(i)
if(dabs(tmp) .gt. 1d-8) then
print*, ' problem found in fitting 1e-Jastrow'
print*, i, tmp
endif
acc += dabs(tmp)
nrm += dabs(b(i))
enddo
print *, ' Relative Error (%) =', 100.d0*acc/nrm
deallocate(A, A_inv, b)
call wall_time(t1)
print*, ' END after (min) ', (t1-t0)/60.d0
@ -128,7 +137,7 @@ subroutine get_j1e_coef_fit_ao2(dim_fit, coef_fit)
PROVIDE mo_coef
call wall_time(t0)
print*, ' PROVIDING the representation of 1e-Jastrow in AOs x AOx ... '
print*, ' PROVIDING the representation of 1e-Jastrow in AOs x AOs ... '
! --- --- ---
! get u1e(r)
@ -188,10 +197,10 @@ subroutine get_j1e_coef_fit_ao2(dim_fit, coef_fit)
!$OMP END DO
!$OMP END PARALLEL
print *, ' A'
do ij = 1, ao_num*ao_num
write(*, '(100000(f15.7))') (A(ij,kl), kl = 1, ao_num*ao_num)
enddo
! print *, ' A'
! do ij = 1, ao_num*ao_num
! write(*, '(100000(f15.7))') (A(ij,kl), kl = 1, ao_num*ao_num)
! enddo
! --- --- ---
! get b
@ -223,44 +232,35 @@ subroutine get_j1e_coef_fit_ao2(dim_fit, coef_fit)
! solve Ax = b
allocate(A_inv(ao_num*ao_num,ao_num*ao_num))
call get_inverse(A, ao_num*ao_num, ao_num*ao_num, A_inv, ao_num*ao_num)
integer :: mn
print *, ' check A_inv'
do ij = 1, ao_num*ao_num
do kl = 1, ao_num*ao_num
tmp = 0.d0
do mn = 1, ao_num*ao_num
tmp += A(ij,mn) * A_inv(mn,kl)
enddo
print*, ij, kl, tmp
enddo
enddo
!call get_inverse(A, ao_num*ao_num, ao_num*ao_num, A_inv, ao_num*ao_num)
call get_pseudo_inverse(A, ao_num*ao_num, ao_num*ao_num, ao_num*ao_num, A_inv, ao_num*ao_num, 5d-8)
! coef_fit = A_inv x b
!call dgemv("N", ao_num*ao_num, ao_num*ao_num, 1.d0, A_inv, ao_num*ao_num, b, 1, 0.d0, coef_fit(1,1), 1)
do ij = 1, ao_num*ao_num
coef_fit(ij) = 0.d0
do kl = 1, ao_num*ao_num
coef_fit(ij) += A_inv(ij,kl) * b(kl)
enddo
enddo
call dgemv("N", ao_num*ao_num, ao_num*ao_num, 1.d0, A_inv, ao_num*ao_num, b, 1, 0.d0, coef_fit, 1)
double precision :: tmp
print *, ' check A_inv'
integer :: mn
double precision :: tmp, acc, nrm
acc = 0.d0
nrm = 0.d0
do ij = 1, ao_num*ao_num
tmp = 0.d0
do kl = 1, ao_num*ao_num
tmp += A(ij,kl) * coef_fit(kl)
enddo
tmp = tmp - b(ij)
print*, ij, tmp
enddo
if(dabs(tmp) .gt. 1d-7) then
print*, ' problem found in fitting 1e-Jastrow'
print*, ij, tmp
endif
deallocate(A)
deallocate(A_inv, b)
acc += dabs(tmp)
nrm += dabs(b(ij))
enddo
print *, ' Relative Error (%) =', 100.d0*acc/nrm
deallocate(A, A_inv, b)
call wall_time(t1)
print*, ' END after (min) ', (t1-t0)/60.d0
@ -373,6 +373,7 @@ subroutine get_j1e_coef_fit_ao3(dim_fit, coef_fit)
enddo
tmp = tmp - b(i,d)
if(dabs(tmp) .gt. 1d-8) then
print*, ' problem found in fitting 1e-Jastrow'
print*, d, i, tmp
endif