diff --git a/README.md b/README.md index b03f2ecc..5a35f63d 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +**Important**: The Intel ifx compiler is not able to produce correct +executables for Quantum Package. Please use ifort as long as you can, and +consider switching to gfortran in the long term. + # Quantum Package 2.2 diff --git a/config/gfortran_mkl.cfg b/config/gfortran_mkl.cfg new file mode 100644 index 00000000..f2787d63 --- /dev/null +++ b/config/gfortran_mkl.cfg @@ -0,0 +1,62 @@ +# Common flags +############## +# +# -ffree-line-length-none : Needed for IRPF90 which produces long lines +# -lblas -llapack : Link with libblas and liblapack libraries provided by the system +# -I . : Include the curent directory (Mandatory) +# +# --ninja : Allow the utilisation of ninja. (Mandatory) +# --align=32 : Align all provided arrays on a 32-byte boundary +# +# +[COMMON] +FC : gfortran -ffree-line-length-none -I . -mavx -g -fPIC -std=legacy +LAPACK_LIB : -I${MKLROOT}/include -L${MKLROOT}/lib/intel64 -Wl,--no-as-needed -lmkl_gf_lp64 -lmkl_core -lpthread -lm -ldl -lmkl_gnu_thread -lgomp -fopenmp +IRPF90 : irpf90 +IRPF90_FLAGS : --ninja --align=32 -DSET_NESTED + +# Global options +################ +# +# 1 : Activate +# 0 : Deactivate +# +[OPTION] +MODE : OPT ; [ OPT | PROFILE | DEBUG ] : Chooses the section below +CACHE : 0 ; Enable cache_compile.py +OPENMP : 1 ; Append OpenMP flags + +# Optimization flags +#################### +# +# -Ofast : Disregard strict standards compliance. Enables all -O3 optimizations. +# It also enables optimizations that are not valid +# for all standard-compliant programs. It turns on +# -ffast-math and the Fortran-specific +# -fno-protect-parens and -fstack-arrays. +[OPT] +FCFLAGS : -Ofast -mavx + +# Profiling flags +################# +# +[PROFILE] +FC : -p -g +FCFLAGS : -Ofast + +# Debugging flags +################# +# +# -fcheck=all : Checks uninitialized variables, array subscripts, etc... +# -g : Extra debugging information +# +[DEBUG] +FCFLAGS : -fcheck=all -g + +# OpenMP flags +################# +# +[OPENMP] +FC : -fopenmp +IRPF90_FLAGS : --openmp + diff --git a/config/ifort_2021_avx.cfg b/config/ifort_2021_avx.cfg index 6c34cf47..55fe0ee7 100644 --- a/config/ifort_2021_avx.cfg +++ b/config/ifort_2021_avx.cfg @@ -6,7 +6,7 @@ # --align=32 : Align all provided arrays on a 32-byte boundary # [COMMON] -FC : ifort -fpic +FC : ifort -fpic -diag-disable=10448 LAPACK_LIB : -mkl=parallel IRPF90 : irpf90 IRPF90_FLAGS : --ninja --align=32 -DINTEL diff --git a/config/ifort_2021_avx_mpi.cfg b/config/ifort_2021_avx_mpi.cfg index 4c893c73..362f482a 100644 --- a/config/ifort_2021_avx_mpi.cfg +++ b/config/ifort_2021_avx_mpi.cfg @@ -6,7 +6,7 @@ # --align=32 : Align all provided arrays on a 32-byte boundary # [COMMON] -FC : mpiifort -fpic +FC : mpiifort -fpic -diag-disable=10448 LAPACK_LIB : -mkl=parallel IRPF90 : irpf90 IRPF90_FLAGS : --ninja --align=32 -DMPI -DINTEL diff --git a/config/ifort_2021_avx_notz.cfg b/config/ifort_2021_avx_notz.cfg index 1fa595d7..3cd80236 100644 --- a/config/ifort_2021_avx_notz.cfg +++ b/config/ifort_2021_avx_notz.cfg @@ -6,7 +6,7 @@ # --align=32 : Align all provided arrays on a 32-byte boundary # [COMMON] -FC : ifort -fpic +FC : ifort -fpic -diag-disable=10448 LAPACK_LIB : -mkl=parallel IRPF90 : irpf90 IRPF90_FLAGS : --ninja --align=32 --define=WITHOUT_TRAILZ --define=WITHOUT_SHIFTRL diff --git a/config/ifort_2021_debug.cfg b/config/ifort_2021_debug.cfg index 80802f33..2e30642c 100644 --- a/config/ifort_2021_debug.cfg +++ b/config/ifort_2021_debug.cfg @@ -6,7 +6,7 @@ # --align=32 : Align all provided arrays on a 32-byte boundary # [COMMON] -FC : ifort -fpic +FC : ifort -fpic -diag-disable=10448 LAPACK_LIB : -mkl=parallel IRPF90 : irpf90 IRPF90_FLAGS : --ninja --align=32 --assert -DINTEL diff --git a/config/ifort_2021_mpi_rome.cfg b/config/ifort_2021_mpi_rome.cfg index e47a466e..b7341388 100644 --- a/config/ifort_2021_mpi_rome.cfg +++ b/config/ifort_2021_mpi_rome.cfg @@ -6,7 +6,7 @@ # --align=32 : Align all provided arrays on a 32-byte boundary # [COMMON] -FC : mpiifort -fpic +FC : mpiifort -fpic -diag-disable=10448 LAPACK_LIB : -mkl=parallel IRPF90 : irpf90 IRPF90_FLAGS : --ninja --align=32 -DINTEL diff --git a/config/ifort_2021_rome.cfg b/config/ifort_2021_rome.cfg index 504438c9..1d2d8c77 100644 --- a/config/ifort_2021_rome.cfg +++ b/config/ifort_2021_rome.cfg @@ -6,7 +6,7 @@ # --align=32 : Align all provided arrays on a 32-byte boundary # [COMMON] -FC : ifort -fpic +FC : ifort -fpic -diag-disable=10448 LAPACK_LIB : -mkl=parallel IRPF90 : irpf90 IRPF90_FLAGS : --ninja --align=32 -DINTEL diff --git a/config/ifort_2021_sse4.cfg b/config/ifort_2021_sse4.cfg index 07c3ebb8..e43147ba 100644 --- a/config/ifort_2021_sse4.cfg +++ b/config/ifort_2021_sse4.cfg @@ -6,7 +6,7 @@ # --align=32 : Align all provided arrays on a 32-byte boundary # [COMMON] -FC : ifort -fpic +FC : ifort -fpic -diag-disable=10448 LAPACK_LIB : -mkl=parallel IRPF90 : irpf90 IRPF90_FLAGS : --ninja --align=32 -DINTEL diff --git a/config/ifort_2021_sse4_mpi.cfg b/config/ifort_2021_sse4_mpi.cfg index f3fa0eaa..1914988b 100644 --- a/config/ifort_2021_sse4_mpi.cfg +++ b/config/ifort_2021_sse4_mpi.cfg @@ -6,7 +6,7 @@ # --align=32 : Align all provided arrays on a 32-byte boundary # [COMMON] -FC : mpiifort -fpic +FC : mpiifort -fpic -diag-disable=10448 LAPACK_LIB : -mkl=parallel IRPF90 : irpf90 IRPF90_FLAGS : --ninja --align=32 -DMPI -DINTEL diff --git a/config/ifort_2021_xHost.cfg b/config/ifort_2021_xHost.cfg index 9170b059..0dfce550 100644 --- a/config/ifort_2021_xHost.cfg +++ b/config/ifort_2021_xHost.cfg @@ -6,7 +6,7 @@ # --align=32 : Align all provided arrays on a 32-byte boundary # [COMMON] -FC : ifort -fpic -diag-disable 5462 +FC : ifort -fpic -diag-disable=5462 -diag-disable=10448 LAPACK_LIB : -mkl=parallel IRPF90 : irpf90 IRPF90_FLAGS : --ninja --align=64 -DINTEL diff --git a/ocaml/Zmatrix.ml b/ocaml/Zmatrix.ml index 9e6ab2f8..6427f734 100644 --- a/ocaml/Zmatrix.ml +++ b/ocaml/Zmatrix.ml @@ -58,17 +58,32 @@ let int_of_atom_id : atom_id -> int = fun x -> x let float_of_distance : float StringMap.t -> distance -> float = fun map -> function | Value x -> x - | Label s -> StringMap.find s map + | Label s -> begin + try StringMap.find s map with + | Not_found -> + Printf.sprintf "Zmatrix error: distance %s undefined" s + |> failwith + end let float_of_angle : float StringMap.t -> angle -> float = fun map -> function | Value x -> x - | Label s -> StringMap.find s map + | Label s -> begin + try StringMap.find s map with + | Not_found -> + Printf.sprintf "Zmatrix error: angle %s undefined" s + |> failwith + end let float_of_dihedral : float StringMap.t -> dihedral -> float = fun map -> function | Value x -> x - | Label s -> StringMap.find s map + | Label s -> begin + try StringMap.find s map with + | Not_found -> + Printf.sprintf "Zmatrix error: dihedral %s undefined" s + |> failwith + end type line = diff --git a/plugins/local/ao_many_one_e_ints/NEED b/plugins/local/ao_many_one_e_ints/NEED index c57219cd..6e16c74a 100644 --- a/plugins/local/ao_many_one_e_ints/NEED +++ b/plugins/local/ao_many_one_e_ints/NEED @@ -4,3 +4,4 @@ becke_numerical_grid mo_one_e_ints dft_utils_in_r tc_keywords +hamiltonian diff --git a/plugins/local/ao_many_one_e_ints/ao_erf_gauss.irp.f b/plugins/local/ao_many_one_e_ints/ao_erf_gauss.irp.f index 823536cc..46124c44 100644 --- a/plugins/local/ao_many_one_e_ints/ao_erf_gauss.irp.f +++ b/plugins/local/ao_many_one_e_ints/ao_erf_gauss.irp.f @@ -98,7 +98,7 @@ double precision function phi_j_erf_mu_r_phi(i, j, mu_in, C_center) enddo enddo -end function phi_j_erf_mu_r_phi +end ! --- @@ -201,7 +201,7 @@ subroutine erf_mu_gauss_ij_ao(i, j, mu, C_center, delta, gauss_ints) enddo enddo -end subroutine erf_mu_gauss_ij_ao +end ! --- @@ -266,7 +266,7 @@ subroutine NAI_pol_x_mult_erf_ao(i_ao, j_ao, mu_in, C_center, ints) enddo enddo -end subroutine NAI_pol_x_mult_erf_ao +end ! --- @@ -340,7 +340,7 @@ subroutine NAI_pol_x_mult_erf_ao_v0(i_ao, j_ao, mu_in, C_center, LD_C, ints, LD_ deallocate(integral) -end subroutine NAI_pol_x_mult_erf_ao_v0 +end ! --- @@ -420,7 +420,7 @@ subroutine NAI_pol_x_mult_erf_ao_v(i_ao, j_ao, mu_in, C_center, LD_C, ints, LD_i deallocate(integral) -end subroutine NAI_pol_x_mult_erf_ao_v +end ! --- @@ -479,7 +479,7 @@ double precision function NAI_pol_x_mult_erf_ao_x(i_ao, j_ao, mu_in, C_center) enddo enddo -end function NAI_pol_x_mult_erf_ao_x +end ! --- @@ -538,7 +538,7 @@ double precision function NAI_pol_x_mult_erf_ao_y(i_ao, j_ao, mu_in, C_center) enddo enddo -end function NAI_pol_x_mult_erf_ao_y +end ! --- @@ -597,7 +597,7 @@ double precision function NAI_pol_x_mult_erf_ao_z(i_ao, j_ao, mu_in, C_center) enddo enddo -end function NAI_pol_x_mult_erf_ao_z +end ! --- @@ -667,7 +667,7 @@ double precision function NAI_pol_x_mult_erf_ao_with1s_x(i_ao, j_ao, beta, B_cen enddo enddo -end function NAI_pol_x_mult_erf_ao_with1s_x +end ! --- @@ -737,7 +737,7 @@ double precision function NAI_pol_x_mult_erf_ao_with1s_y(i_ao, j_ao, beta, B_cen enddo enddo -end function NAI_pol_x_mult_erf_ao_with1s_y +end ! --- @@ -807,7 +807,7 @@ double precision function NAI_pol_x_mult_erf_ao_with1s_z(i_ao, j_ao, beta, B_cen enddo enddo -end function NAI_pol_x_mult_erf_ao_with1s_z +end ! --- @@ -880,7 +880,7 @@ subroutine NAI_pol_x_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, mu_in, C_cen enddo enddo -end subroutine NAI_pol_x_mult_erf_ao_with1s +end ! --- @@ -967,7 +967,7 @@ subroutine NAI_pol_x_mult_erf_ao_with1s_v0(i_ao, j_ao, beta, B_center, LD_B, mu_ deallocate(integral) -end subroutine NAI_pol_x_mult_erf_ao_with1s_v0 +end ! --- @@ -1057,7 +1057,7 @@ subroutine NAI_pol_x_mult_erf_ao_with1s_v(i_ao, j_ao, beta, B_center, LD_B, mu_i deallocate(integral) -end subroutine NAI_pol_x_mult_erf_ao_with1s_v +end ! --- @@ -1175,7 +1175,7 @@ subroutine NAI_pol_x2_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, mu_in, C_ce enddo enddo -end subroutine NAI_pol_x2_mult_erf_ao_with1s +end ! --- @@ -1241,7 +1241,7 @@ subroutine NAI_pol_x2_mult_erf_ao(i_ao, j_ao, mu_in, C_center, ints) enddo enddo -end subroutine NAI_pol_x2_mult_erf_ao +end ! --- @@ -1320,7 +1320,7 @@ subroutine NAI_pol_012_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, mu_in, C_c enddo enddo -end subroutine NAI_pol_012_mult_erf_ao_with1s +end ! --- @@ -1328,7 +1328,7 @@ subroutine NAI_pol_012_mult_erf_ao(i_ao, j_ao, mu_in, C_center, ints) BEGIN_DOC ! - ! Computes the following integral : + ! Computes the following integrals : ! ! int(1) = $\int_{-\infty}^{infty} dr x^0 * \chi_i(r) \chi_j(r) \frac{\erf(\mu | r - R_C | )}{ | r - R_C | }$. ! @@ -1395,7 +1395,7 @@ subroutine NAI_pol_012_mult_erf_ao(i_ao, j_ao, mu_in, C_center, ints) enddo enddo -end subroutine NAI_pol_012_mult_erf_ao +end ! --- diff --git a/plugins/local/ao_many_one_e_ints/ao_gaus_gauss.irp.f b/plugins/local/ao_many_one_e_ints/ao_gaus_gauss.irp.f index d2115d9e..1e4f340c 100644 --- a/plugins/local/ao_many_one_e_ints/ao_gaus_gauss.irp.f +++ b/plugins/local/ao_many_one_e_ints/ao_gaus_gauss.irp.f @@ -152,7 +152,7 @@ double precision function overlap_gauss_r12_ao(D_center, delta, i, j) enddo enddo -end function overlap_gauss_r12_ao +end ! -- @@ -199,7 +199,7 @@ double precision function overlap_abs_gauss_r12_ao(D_center, delta, i, j) enddo enddo -end function overlap_gauss_r12_ao +end ! -- @@ -257,7 +257,7 @@ subroutine overlap_gauss_r12_ao_v(D_center, LD_D, delta, i, j, resv, LD_resv, n_ deallocate(analytical_j) -end subroutine overlap_gauss_r12_ao_v +end ! --- @@ -327,7 +327,7 @@ double precision function overlap_gauss_r12_ao_with1s(B_center, beta, D_center, enddo enddo -end function overlap_gauss_r12_ao_with1s +end ! --- @@ -420,7 +420,86 @@ subroutine overlap_gauss_r12_ao_with1s_v(B_center, beta, D_center, LD_D, delta, deallocate(fact_g, G_center, analytical_j) -end subroutine overlap_gauss_r12_ao_with1s_v +end + +! --- + +subroutine overlap_gauss_r12_ao_012(D_center, delta, i, j, ints) + + BEGIN_DOC + ! + ! Computes the following integrals : + ! + ! ints(1) = $\int_{-\infty}^{infty} dr x^0 * \chi_i(r) \chi_j(r) e^{-\delta (r - D_center)^2} + ! + ! ints(2) = $\int_{-\infty}^{infty} dr x^1 * \chi_i(r) \chi_j(r) e^{-\delta (r - D_center)^2} + ! ints(3) = $\int_{-\infty}^{infty} dr y^1 * \chi_i(r) \chi_j(r) e^{-\delta (r - D_center)^2} + ! ints(4) = $\int_{-\infty}^{infty} dr z^1 * \chi_i(r) \chi_j(r) e^{-\delta (r - D_center)^2} + ! + ! ints(5) = $\int_{-\infty}^{infty} dr x^2 * \chi_i(r) \chi_j(r) e^{-\delta (r - D_center)^2} + ! ints(6) = $\int_{-\infty}^{infty} dr y^2 * \chi_i(r) \chi_j(r) e^{-\delta (r - D_center)^2} + ! ints(7) = $\int_{-\infty}^{infty} dr z^2 * \chi_i(r) \chi_j(r) e^{-\delta (r - D_center)^2} + ! + END_DOC + + include 'utils/constants.include.F' + + implicit none + + integer, intent(in) :: i, j + double precision, intent(in) :: delta, D_center(3) + double precision, intent(out) :: ints(7) + + integer :: k, l, m + integer :: power_A(3), power_B(3), power_A1(3), power_A2(3) + double precision :: A_center(3), B_center(3), alpha, beta, coef1, coef + double precision :: integral0, integral1, integral2 + + double precision, external :: overlap_gauss_r12 + + ints = 0.d0 + + if(ao_overlap_abs(j,i).lt.1.d-12) then + return + endif + + power_A(1:3) = ao_power(i,1:3) + power_B(1:3) = ao_power(j,1:3) + + A_center(1:3) = nucl_coord(ao_nucl(i),1:3) + B_center(1:3) = nucl_coord(ao_nucl(j),1:3) + + do l = 1, ao_prim_num(i) + alpha = ao_expo_ordered_transp (l,i) + coef1 = ao_coef_normalized_ordered_transp(l,i) + + do k = 1, ao_prim_num(j) + beta = ao_expo_ordered_transp(k,j) + coef = coef1 * ao_coef_normalized_ordered_transp(k,j) + + if(dabs(coef) .lt. 1d-12) cycle + + integral0 = overlap_gauss_r12(D_center, delta, A_center, B_center, power_A, power_B, alpha, beta) + + ints(1) += coef * integral0 + + do m = 1, 3 + power_A1 = power_A + power_A1(m) += 1 + integral1 = overlap_gauss_r12(D_center, delta, A_center, B_center, power_A1, power_B, alpha, beta) + ints(1+m) += coef * (integral1 + A_center(m)*integral0) + + power_A2 = power_A + power_A2(m) += 2 + integral2 = overlap_gauss_r12(D_center, delta, A_center, B_center, power_A2, power_B, alpha, beta) + ints(4+m) += coef * (integral2 + A_center(m) * (2.d0*integral1 + A_center(m)*integral0)) + enddo + + enddo ! k + enddo ! l + + return +end ! --- diff --git a/plugins/local/ao_many_one_e_ints/grad2_jmu_manu.irp.f b/plugins/local/ao_many_one_e_ints/grad2_jmu_manu.irp.f index 14170ede..5879d83f 100644 --- a/plugins/local/ao_many_one_e_ints/grad2_jmu_manu.irp.f +++ b/plugins/local/ao_many_one_e_ints/grad2_jmu_manu.irp.f @@ -1,11 +1,11 @@ ! --- -BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, int2_grad1u2_grad2u2_env2_test, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! -\frac{1}{4} x int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 [1 - erf(mu r12)]^2 + ! -\frac{1}{4} x int dr2 phi_i(r2) phi_j(r2) 1s_env(r2)^2 [1 - erf(mu r12)]^2 ! END_DOC @@ -15,30 +15,30 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test, (ao_num, ao_n double precision :: coef, beta, B_center(3) double precision :: tmp double precision :: wall0, wall1 - double precision :: int_gauss, dsqpi_3_2, int_j1b + double precision :: int_gauss, dsqpi_3_2, int_env double precision :: factor_ij_1s, beta_ij, center_ij_1s(3), sq_pi_3_2 double precision, allocatable :: int_fit_v(:) double precision, external :: overlap_gauss_r12_ao double precision, external :: overlap_gauss_r12_ao_with1s - print*, ' providing int2_grad1u2_grad2u2_j1b2_test ...' + print*, ' providing int2_grad1u2_grad2u2_env2_test ...' sq_pi_3_2 = (dacos(-1.d0))**(1.5d0) - provide mu_erf final_grid_points_transp j1b_pen List_comb_thr_b3_coef + provide mu_erf final_grid_points_transp List_comb_thr_b3_coef call wall_time(wall0) - int2_grad1u2_grad2u2_j1b2_test(:,:,:) = 0.d0 + int2_grad1u2_grad2u2_env2_test(:,:,:) = 0.d0 - !$OMP PARALLEL DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & - !$OMP coef_fit, expo_fit, int_fit_v, tmp,int_gauss,int_j1b,factor_ij_1s,beta_ij,center_ij_1s) & - !$OMP SHARED (n_points_final_grid, ao_num, final_grid_points,List_comb_thr_b3_size, & - !$OMP final_grid_points_transp, ng_fit_jast, & - !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & - !$OMP List_comb_thr_b3_coef, List_comb_thr_b3_expo, & - !$OMP List_comb_thr_b3_cent, int2_grad1u2_grad2u2_j1b2_test, ao_abs_comb_b3_j1b, & - !$OMP ao_overlap_abs,sq_pi_3_2,thrsh_cycle_tc) + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit_v, tmp,int_gauss,int_env,factor_ij_1s,beta_ij,center_ij_1s) & + !$OMP SHARED (n_points_final_grid, ao_num, final_grid_points,List_comb_thr_b3_size, & + !$OMP final_grid_points_transp, ng_fit_jast, & + !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & + !$OMP List_comb_thr_b3_coef, List_comb_thr_b3_expo, & + !$OMP List_comb_thr_b3_cent, int2_grad1u2_grad2u2_env2_test, ao_abs_comb_b3_env, & + !$OMP ao_overlap_abs,sq_pi_3_2,thrsh_cycle_tc) !$OMP DO SCHEDULE(dynamic) do ipoint = 1, n_points_final_grid r(1) = final_grid_points(1,ipoint) @@ -54,13 +54,13 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test, (ao_num, ao_n ! i_1s = 1 ! --- --- --- - int_j1b = ao_abs_comb_b3_j1b(1,j,i) + int_env = ao_abs_comb_b3_env(1,j,i) do i_fit = 1, ng_fit_jast expo_fit = expo_gauss_1_erf_x_2(i_fit) coef_fit = -0.25d0 * coef_gauss_1_erf_x_2(i_fit) -! if(dabs(coef_fit*int_j1b*sq_pi_3_2*(expo_fit)**(-1.5d0)).lt.thrsh_cycle_tc)cycle +! if(dabs(coef_fit*int_env*sq_pi_3_2*(expo_fit)**(-1.5d0)).lt.thrsh_cycle_tc)cycle int_gauss = overlap_gauss_r12_ao(r, expo_fit, i, j) - int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint) += coef_fit * int_gauss + int2_grad1u2_grad2u2_env2_test(j,i,ipoint) += coef_fit * int_gauss enddo ! --- --- --- @@ -71,7 +71,7 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test, (ao_num, ao_n coef = List_comb_thr_b3_coef (i_1s,j,i) beta = List_comb_thr_b3_expo (i_1s,j,i) - int_j1b = ao_abs_comb_b3_j1b(i_1s,j,i) + int_env = ao_abs_comb_b3_env(i_1s,j,i) B_center(1) = List_comb_thr_b3_cent(1,i_1s,j,i) B_center(2) = List_comb_thr_b3_cent(2,i_1s,j,i) B_center(3) = List_comb_thr_b3_cent(3,i_1s,j,i) @@ -81,11 +81,11 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test, (ao_num, ao_n !DIR$ FORCEINLINE call gaussian_product(expo_fit,r,beta,B_center,factor_ij_1s,beta_ij,center_ij_1s) coef_fit = -0.25d0 * coef_gauss_1_erf_x_2(i_fit) * coef -! if(dabs(coef_fit*factor_ij_1s*int_j1b*sq_pi_3_2*(beta_ij)**(-1.5d0)).lt.thrsh_cycle_tc)cycle +! if(dabs(coef_fit*factor_ij_1s*int_env*sq_pi_3_2*(beta_ij)**(-1.5d0)).lt.thrsh_cycle_tc)cycle ! call overlap_gauss_r12_ao_with1s_v(B_center, beta, final_grid_points_transp, & ! expo_fit, i, j, int_fit_v, n_points_final_grid) int_gauss = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) - int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint) += coef_fit * int_gauss + int2_grad1u2_grad2u2_env2_test(j,i,ipoint) += coef_fit * int_gauss enddo enddo @@ -98,26 +98,26 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test, (ao_num, ao_n do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = 1, i-1 - int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint) = int2_grad1u2_grad2u2_j1b2_test(i,j,ipoint) + int2_grad1u2_grad2u2_env2_test(j,i,ipoint) = int2_grad1u2_grad2u2_env2_test(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_grad1u2_grad2u2_j1b2_test', wall1 - wall0 + print*, ' wall time for int2_grad1u2_grad2u2_env2_test (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test_v, (ao_num, ao_num, n_points_final_grid)] -! -! BEGIN_DOC -! ! -! ! -\frac{1}{4} x int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 [1 - erf(mu r12)]^2 -! ! -! END_DOC -! +BEGIN_PROVIDER [double precision, int2_grad1u2_grad2u2_env2_test_v, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! -\frac{1}{4} x int dr2 phi_i(r2) phi_j(r2) 1s_env(r2)^2 [1 - erf(mu r12)]^2 + ! + END_DOC + implicit none integer :: i, j, ipoint, i_1s, i_fit double precision :: r(3), expo_fit, coef_fit @@ -128,24 +128,24 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test_v, (ao_num, ao double precision, allocatable :: int_fit_v(:),big_array(:,:,:) double precision, external :: overlap_gauss_r12_ao_with1s - print*, ' providing int2_grad1u2_grad2u2_j1b2_test_v ...' + print*, ' providing int2_grad1u2_grad2u2_env2_test_v ...' - provide mu_erf final_grid_points_transp j1b_pen + provide mu_erf final_grid_points_transp call wall_time(wall0) - double precision :: int_j1b + double precision :: int_env big_array(:,:,:) = 0.d0 allocate(big_array(n_points_final_grid,ao_num, ao_num)) !$OMP PARALLEL DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center,& - !$OMP coef_fit, expo_fit, int_fit_v, tmp,int_j1b) & - !$OMP SHARED (n_points_final_grid, ao_num, List_comb_thr_b3_size,& - !$OMP final_grid_points_transp, ng_fit_jast, & - !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & - !$OMP List_comb_thr_b3_coef, List_comb_thr_b3_expo, & - !$OMP List_comb_thr_b3_cent, big_array,& - !$OMP ao_abs_comb_b3_j1b,ao_overlap_abs,thrsh_cycle_tc) -! + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center,& + !$OMP coef_fit, expo_fit, int_fit_v, tmp,int_env) & + !$OMP SHARED (n_points_final_grid, ao_num, List_comb_thr_b3_size,& + !$OMP final_grid_points_transp, ng_fit_jast, & + !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & + !$OMP List_comb_thr_b3_coef, List_comb_thr_b3_expo, & + !$OMP List_comb_thr_b3_cent, big_array,& + !$OMP ao_abs_comb_b3_env,ao_overlap_abs,thrsh_cycle_tc) + ! allocate(int_fit_v(n_points_final_grid)) !$OMP DO SCHEDULE(dynamic) do i = 1, ao_num @@ -159,7 +159,7 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test_v, (ao_num, ao coef = List_comb_thr_b3_coef (i_1s,j,i) beta = List_comb_thr_b3_expo (i_1s,j,i) - int_j1b = ao_abs_comb_b3_j1b(i_1s,j,i) + int_env = ao_abs_comb_b3_env(i_1s,j,i) B_center(1) = List_comb_thr_b3_cent(1,i_1s,j,i) B_center(2) = List_comb_thr_b3_cent(2,i_1s,j,i) B_center(3) = List_comb_thr_b3_cent(3,i_1s,j,i) @@ -187,7 +187,7 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test_v, (ao_num, ao do i = 1, ao_num do j = i, ao_num do ipoint = 1, n_points_final_grid - int2_grad1u2_grad2u2_j1b2_test_v(j,i,ipoint) = big_array(ipoint,j,i) + int2_grad1u2_grad2u2_env2_test_v(j,i,ipoint) = big_array(ipoint,j,i) enddo enddo enddo @@ -195,23 +195,23 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2_test_v, (ao_num, ao do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - int2_grad1u2_grad2u2_j1b2_test_v(j,i,ipoint) = big_array(ipoint,i,j) + int2_grad1u2_grad2u2_env2_test_v(j,i,ipoint) = big_array(ipoint,i,j) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_grad1u2_grad2u2_j1b2_test_v', wall1 - wall0 + print*, ' wall time for int2_grad1u2_grad2u2_env2_test_v (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, int2_u2_j1b2_test, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, int2_u2_env2_test, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 [u_12^mu]^2 + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2)^2 [u_12^mu]^2 ! END_DOC @@ -219,29 +219,29 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b2_test, (ao_num, ao_num, n_points_ integer :: i, j, ipoint, i_1s, i_fit double precision :: r(3), int_fit, expo_fit, coef_fit double precision :: coef, beta, B_center(3), tmp - double precision :: wall0, wall1,int_j1b + double precision :: wall0, wall1,int_env double precision, external :: overlap_gauss_r12_ao double precision, external :: overlap_gauss_r12_ao_with1s double precision :: factor_ij_1s,beta_ij,center_ij_1s(3),sq_pi_3_2 - print*, ' providing int2_u2_j1b2_test ...' + print*, ' providing int2_u2_env2_test ...' sq_pi_3_2 = (dacos(-1.d0))**(1.5d0) - provide mu_erf final_grid_points j1b_pen + provide mu_erf final_grid_points call wall_time(wall0) - int2_u2_j1b2_test = 0.d0 + int2_u2_env2_test = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & - !$OMP coef_fit, expo_fit, int_fit, tmp, int_j1b,factor_ij_1s,beta_ij,center_ij_1s) & + !$OMP coef_fit, expo_fit, int_fit, tmp, int_env,factor_ij_1s,beta_ij,center_ij_1s) & !$OMP SHARED (n_points_final_grid, ao_num, List_comb_thr_b3_size, & !$OMP final_grid_points, ng_fit_jast, & !$OMP expo_gauss_j_mu_x_2, coef_gauss_j_mu_x_2, & !$OMP List_comb_thr_b3_coef, List_comb_thr_b3_expo,sq_pi_3_2, & - !$OMP List_comb_thr_b3_cent, int2_u2_j1b2_test,ao_abs_comb_b3_j1b,thrsh_cycle_tc) + !$OMP List_comb_thr_b3_cent, int2_u2_env2_test,ao_abs_comb_b3_env,thrsh_cycle_tc) !$OMP DO do ipoint = 1, n_points_final_grid r(1) = final_grid_points(1,ipoint) @@ -257,12 +257,12 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b2_test, (ao_num, ao_num, n_points_ ! i_1s = 1 ! --- --- --- - int_j1b = ao_abs_comb_b3_j1b(1,j,i) - if(dabs(int_j1b).lt.thrsh_cycle_tc) cycle + int_env = ao_abs_comb_b3_env(1,j,i) + if(dabs(int_env).lt.thrsh_cycle_tc) cycle do i_fit = 1, ng_fit_jast expo_fit = expo_gauss_j_mu_x_2(i_fit) coef_fit = coef_gauss_j_mu_x_2(i_fit) -! if(dabs(coef_fit*int_j1b*sq_pi_3_2*(expo_fit)**(-1.5d0)).lt.thrsh_cycle_tc)cycle +! if(dabs(coef_fit*int_env*sq_pi_3_2*(expo_fit)**(-1.5d0)).lt.thrsh_cycle_tc)cycle int_fit = overlap_gauss_r12_ao(r, expo_fit, i, j) tmp += coef_fit * int_fit enddo @@ -275,8 +275,8 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b2_test, (ao_num, ao_num, n_points_ coef = List_comb_thr_b3_coef (i_1s,j,i) beta = List_comb_thr_b3_expo (i_1s,j,i) - int_j1b = ao_abs_comb_b3_j1b(i_1s,j,i) -! if(dabs(coef)*dabs(int_j1b).lt.thrsh_cycle_tc)cycle + int_env = ao_abs_comb_b3_env(i_1s,j,i) +! if(dabs(coef)*dabs(int_env).lt.thrsh_cycle_tc)cycle B_center(1) = List_comb_thr_b3_cent(1,i_1s,j,i) B_center(2) = List_comb_thr_b3_cent(2,i_1s,j,i) B_center(3) = List_comb_thr_b3_cent(3,i_1s,j,i) @@ -286,13 +286,13 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b2_test, (ao_num, ao_num, n_points_ coef_fit = coef_gauss_j_mu_x_2(i_fit) !DIR$ FORCEINLINE call gaussian_product(expo_fit,r,beta,B_center,factor_ij_1s,beta_ij,center_ij_1s) -! if(dabs(coef_fit*coef*factor_ij_1s*int_j1b*sq_pi_3_2*(beta_ij)**(-1.5d0)).lt.thrsh_cycle_tc)cycle +! if(dabs(coef_fit*coef*factor_ij_1s*int_env*sq_pi_3_2*(beta_ij)**(-1.5d0)).lt.thrsh_cycle_tc)cycle int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) tmp += coef * coef_fit * int_fit enddo enddo - int2_u2_j1b2_test(j,i,ipoint) = tmp + int2_u2_env2_test(j,i,ipoint) = tmp enddo enddo enddo @@ -302,23 +302,23 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b2_test, (ao_num, ao_num, n_points_ do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - int2_u2_j1b2_test(j,i,ipoint) = int2_u2_j1b2_test(i,j,ipoint) + int2_u2_env2_test(j,i,ipoint) = int2_u2_env2_test(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_u2_j1b2_test', wall1 - wall0 + print*, ' wall time for int2_u2_env2_test (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2_test, (ao_num, ao_num, n_points_final_grid, 3)] +BEGIN_PROVIDER [double precision, int2_u_grad1u_x_env2_test, (ao_num,ao_num,n_points_final_grid,3)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 u_12^mu [\grad_1 u_12^mu] r2 + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2)^2 u_12^mu [\grad_1 u_12^mu] r2 ! END_DOC @@ -327,27 +327,27 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2_test, (ao_num, ao_num, n double precision :: r(3), int_fit(3), expo_fit, coef_fit double precision :: coef, beta, B_center(3), dist double precision :: alpha_1s, alpha_1s_inv, centr_1s(3), expo_coef_1s, coef_tmp - double precision :: tmp_x, tmp_y, tmp_z, int_j1b + double precision :: tmp_x, tmp_y, tmp_z, int_env double precision :: wall0, wall1, sq_pi_3_2,sq_alpha - print*, ' providing int2_u_grad1u_x_j1b2_test ...' + print*, ' providing int2_u_grad1u_x_env2_test ...' sq_pi_3_2 = dacos(-1.D0)**(1.d0) - provide mu_erf final_grid_points j1b_pen + provide mu_erf final_grid_points call wall_time(wall0) - int2_u_grad1u_x_j1b2_test = 0.d0 + int2_u_grad1u_x_env2_test = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & !$OMP coef_fit, expo_fit, int_fit, alpha_1s, dist, & !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coef_tmp, & - !$OMP tmp_x, tmp_y, tmp_z,int_j1b,sq_alpha) & + !$OMP tmp_x, tmp_y, tmp_z,int_env,sq_alpha) & !$OMP SHARED (n_points_final_grid, ao_num, List_comb_thr_b3_size, & !$OMP final_grid_points, ng_fit_jast, & !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & !$OMP List_comb_thr_b3_coef, List_comb_thr_b3_expo, & - !$OMP List_comb_thr_b3_cent, int2_u_grad1u_x_j1b2_test,ao_abs_comb_b3_j1b,sq_pi_3_2,thrsh_cycle_tc) + !$OMP List_comb_thr_b3_cent, int2_u_grad1u_x_env2_test,ao_abs_comb_b3_env,sq_pi_3_2,thrsh_cycle_tc) !$OMP DO do ipoint = 1, n_points_final_grid @@ -365,8 +365,8 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2_test, (ao_num, ao_num, n coef = List_comb_thr_b3_coef (i_1s,j,i) beta = List_comb_thr_b3_expo (i_1s,j,i) - int_j1b = ao_abs_comb_b3_j1b(i_1s,j,i) - if(dabs(coef)*dabs(int_j1b).lt.thrsh_cycle_tc)cycle + int_env = ao_abs_comb_b3_env(i_1s,j,i) + if(dabs(coef)*dabs(int_env).lt.thrsh_cycle_tc)cycle B_center(1) = List_comb_thr_b3_cent(1,i_1s,j,i) B_center(2) = List_comb_thr_b3_cent(2,i_1s,j,i) B_center(3) = List_comb_thr_b3_cent(3,i_1s,j,i) @@ -389,7 +389,7 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2_test, (ao_num, ao_num, n expo_coef_1s = beta * expo_fit * alpha_1s_inv * dist coef_tmp = coef * coef_fit * dexp(-expo_coef_1s) sq_alpha = alpha_1s_inv * dsqrt(alpha_1s_inv) -! if(dabs(coef_tmp*int_j1b*sq_pi_3_2*sq_alpha) .lt. thrsh_cycle_tc) cycle +! if(dabs(coef_tmp*int_env*sq_pi_3_2*sq_alpha) .lt. thrsh_cycle_tc) cycle call NAI_pol_x_mult_erf_ao_with1s(i, j, alpha_1s, centr_1s, 1.d+9, r, int_fit) @@ -402,9 +402,9 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2_test, (ao_num, ao_num, n enddo - int2_u_grad1u_x_j1b2_test(j,i,ipoint,1) = tmp_x - int2_u_grad1u_x_j1b2_test(j,i,ipoint,2) = tmp_y - int2_u_grad1u_x_j1b2_test(j,i,ipoint,3) = tmp_z + int2_u_grad1u_x_env2_test(j,i,ipoint,1) = tmp_x + int2_u_grad1u_x_env2_test(j,i,ipoint,2) = tmp_y + int2_u_grad1u_x_env2_test(j,i,ipoint,3) = tmp_z enddo enddo enddo @@ -414,24 +414,25 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2_test, (ao_num, ao_num, n do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - int2_u_grad1u_x_j1b2_test(j,i,ipoint,1) = int2_u_grad1u_x_j1b2_test(i,j,ipoint,1) - int2_u_grad1u_x_j1b2_test(j,i,ipoint,2) = int2_u_grad1u_x_j1b2_test(i,j,ipoint,2) - int2_u_grad1u_x_j1b2_test(j,i,ipoint,3) = int2_u_grad1u_x_j1b2_test(i,j,ipoint,3) + int2_u_grad1u_x_env2_test(j,i,ipoint,1) = int2_u_grad1u_x_env2_test(i,j,ipoint,1) + int2_u_grad1u_x_env2_test(j,i,ipoint,2) = int2_u_grad1u_x_env2_test(i,j,ipoint,2) + int2_u_grad1u_x_env2_test(j,i,ipoint,3) = int2_u_grad1u_x_env2_test(i,j,ipoint,3) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_u_grad1u_x_j1b2_test', wall1 - wall0 + print*, ' wall time for int2_u_grad1u_x_env2_test (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER +! --- -BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2_test, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, int2_u_grad1u_env2_test, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 u_12^mu [\grad_1 u_12^mu] + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2)^2 u_12^mu [\grad_1 u_12^mu] ! END_DOC @@ -442,31 +443,31 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2_test, (ao_num, ao_num, n_p double precision :: alpha_1s, alpha_1s_inv, centr_1s(3), expo_coef_1s, tmp double precision :: wall0, wall1 double precision, external :: NAI_pol_mult_erf_ao_with1s - double precision :: j12_mu_r12,int_j1b + double precision :: j12_mu_r12,int_env double precision :: sigma_ij,dist_ij_ipoint,dsqpi_3_2 double precision :: beta_ij,center_ij_1s(3),factor_ij_1s - print*, ' providing int2_u_grad1u_j1b2_test ...' + print*, ' providing int2_u_grad1u_env2_test ...' dsqpi_3_2 = (dacos(-1.d0))**(1.5d0) - provide mu_erf final_grid_points j1b_pen ao_overlap_abs List_comb_thr_b3_cent + provide mu_erf final_grid_points ao_overlap_abs List_comb_thr_b3_cent call wall_time(wall0) - int2_u_grad1u_j1b2_test = 0.d0 + int2_u_grad1u_env2_test = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & !$OMP coef_fit, expo_fit, int_fit, tmp, alpha_1s, dist, & !$OMP beta_ij,center_ij_1s,factor_ij_1s, & - !$OMP int_j1b,alpha_1s_inv, centr_1s, expo_coef_1s, coef_tmp) & + !$OMP int_env,alpha_1s_inv, centr_1s, expo_coef_1s, coef_tmp) & !$OMP SHARED (n_points_final_grid, ao_num, List_comb_thr_b3_size, & !$OMP final_grid_points, ng_fit_jast, & !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & !$OMP ao_prod_dist_grid, ao_prod_sigma, ao_overlap_abs_grid,ao_prod_center,dsqpi_3_2, & - !$OMP List_comb_thr_b3_coef, List_comb_thr_b3_expo, ao_abs_comb_b3_j1b, & - !$OMP List_comb_thr_b3_cent, int2_u_grad1u_j1b2_test,thrsh_cycle_tc) + !$OMP List_comb_thr_b3_coef, List_comb_thr_b3_expo, ao_abs_comb_b3_env, & + !$OMP List_comb_thr_b3_cent, int2_u_grad1u_env2_test,thrsh_cycle_tc) !$OMP DO do ipoint = 1, n_points_final_grid do i = 1, ao_num @@ -484,11 +485,9 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2_test, (ao_num, ao_num, n_p ! i_1s = 1 ! --- --- --- - int_j1b = ao_abs_comb_b3_j1b(1,j,i) -! if(dabs(int_j1b).lt.thrsh_cycle_tc) cycle + int_env = ao_abs_comb_b3_env(1,j,i) do i_fit = 1, ng_fit_jast expo_fit = expo_gauss_j_mu_1_erf(i_fit) -! if(dabs(int_j1b)*dsqpi_3_2*expo_fit**(-1.5d0).lt.thrsh_cycle_tc) cycle coef_fit = coef_gauss_j_mu_1_erf(i_fit) int_fit = NAI_pol_mult_erf_ao_with1s(i, j, expo_fit, r, 1.d+9, r) tmp += coef_fit * int_fit @@ -502,8 +501,7 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2_test, (ao_num, ao_num, n_p coef = List_comb_thr_b3_coef (i_1s,j,i) beta = List_comb_thr_b3_expo (i_1s,j,i) - int_j1b = ao_abs_comb_b3_j1b(i_1s,j,i) -! if(dabs(coef)*dabs(int_j1b).lt.thrsh_cycle_tc)cycle + int_env = ao_abs_comb_b3_env(i_1s,j,i) B_center(1) = List_comb_thr_b3_cent(1,i_1s,j,i) B_center(2) = List_comb_thr_b3_cent(2,i_1s,j,i) B_center(3) = List_comb_thr_b3_cent(3,i_1s,j,i) @@ -513,7 +511,6 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2_test, (ao_num, ao_num, n_p do i_fit = 1, ng_fit_jast expo_fit = expo_gauss_j_mu_1_erf(i_fit) call gaussian_product(expo_fit,r,beta,B_center,factor_ij_1s,beta_ij,center_ij_1s) -! if(factor_ij_1s*dabs(coef*int_j1b)*dsqpi_3_2*beta_ij**(-1.5d0).lt.thrsh_cycle_tc)cycle coef_fit = coef_gauss_j_mu_1_erf(i_fit) alpha_1s = beta + expo_fit @@ -533,7 +530,7 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2_test, (ao_num, ao_num, n_p enddo enddo - int2_u_grad1u_j1b2_test(j,i,ipoint) = tmp + int2_u_grad1u_env2_test(j,i,ipoint) = tmp enddo enddo enddo @@ -543,14 +540,15 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2_test, (ao_num, ao_num, n_p do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - int2_u_grad1u_j1b2_test(j,i,ipoint) = int2_u_grad1u_j1b2_test(i,j,ipoint) + int2_u_grad1u_env2_test(j,i,ipoint) = int2_u_grad1u_env2_test(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_u_grad1u_j1b2_test', wall1 - wall0 + print*, ' wall time for int2_u_grad1u_env2_test (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- + diff --git a/plugins/local/ao_many_one_e_ints/grad2_jmu_modif.irp.f b/plugins/local/ao_many_one_e_ints/grad2_jmu_modif.irp.f index fda2db82..bdcaac9d 100644 --- a/plugins/local/ao_many_one_e_ints/grad2_jmu_modif.irp.f +++ b/plugins/local/ao_many_one_e_ints/grad2_jmu_modif.irp.f @@ -6,7 +6,7 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2, (ao_num, ao_num, n_poin BEGIN_DOC ! - ! -\frac{1}{4} x int dr2 phi_i(r2) phi_j(r2) [1 - erf(mu r12)]^2 + ! \frac{1}{4} x int dr2 phi_i(r2) phi_j(r2) [1 - erf(mu r12)]^2 ! END_DOC @@ -21,7 +21,8 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2, (ao_num, ao_num, n_poin print*, ' providing int2_grad1u2_grad2u2 ...' call wall_time(wall0) - provide mu_erf final_grid_points j1b_pen + provide mu_erf + provide final_grid_points int2_grad1u2_grad2u2 = 0.d0 @@ -44,7 +45,7 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2, (ao_num, ao_num, n_poin expo_fit = expo_gauss_1_erf_x_2(i_fit) coef_fit = coef_gauss_1_erf_x_2(i_fit) - tmp += -0.25d0 * coef_fit * overlap_gauss_r12_ao(r, expo_fit, i, j) + tmp += 0.25d0 * coef_fit * overlap_gauss_r12_ao(r, expo_fit, i, j) enddo int2_grad1u2_grad2u2(j,i,ipoint) = tmp @@ -63,17 +64,17 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2, (ao_num, ao_num, n_poin enddo call wall_time(wall1) - print*, ' wall time for int2_grad1u2_grad2u2 =', wall1 - wall0 + print*, ' wall time for int2_grad1u2_grad2u2 (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, int2_grad1u2_grad2u2_env2, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! -\frac{1}{4} x int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 [1 - erf(mu r12)]^2 + ! -\frac{1}{4} x int dr2 phi_i(r2) phi_j(r2) 1s_env(r2)^2 [1 - erf(mu r12)]^2 ! END_DOC @@ -87,21 +88,22 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2, (ao_num, ao_num, n double precision, external :: overlap_gauss_r12_ao double precision, external :: overlap_gauss_r12_ao_with1s - print*, ' providing int2_grad1u2_grad2u2_j1b2 ...' + print*, ' providing int2_grad1u2_grad2u2_env2 ...' call wall_time(wall0) - provide mu_erf final_grid_points j1b_pen + provide mu_erf + provide final_grid_points - int2_grad1u2_grad2u2_j1b2 = 0.d0 + int2_grad1u2_grad2u2_env2 = 0.d0 - !$OMP PARALLEL DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & - !$OMP coef_fit, expo_fit, int_fit, tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & - !$OMP final_grid_points, ng_fit_jast, & - !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & - !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & - !$OMP List_all_comb_b3_cent, int2_grad1u2_grad2u2_j1b2) + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit, tmp) & + !$OMP SHARED (n_points_final_grid, ao_num, List_env1s_square_size, & + !$OMP final_grid_points, ng_fit_jast, & + !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & + !$OMP List_env1s_square_coef, List_env1s_square_expo, & + !$OMP List_env1s_square_cent, int2_grad1u2_grad2u2_env2) !$OMP DO do ipoint = 1, n_points_final_grid r(1) = final_grid_points(1,ipoint) @@ -125,14 +127,14 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2, (ao_num, ao_num, n ! --- - do i_1s = 2, List_all_comb_b3_size + do i_1s = 2, List_env1s_square_size - coef = List_all_comb_b3_coef (i_1s) + coef = List_env1s_square_coef (i_1s) if(dabs(coef) .lt. 1d-15) cycle ! beta = 0.0 - beta = List_all_comb_b3_expo (i_1s) - B_center(1) = List_all_comb_b3_cent(1,i_1s) - B_center(2) = List_all_comb_b3_cent(2,i_1s) - B_center(3) = List_all_comb_b3_cent(3,i_1s) + beta = List_env1s_square_expo (i_1s) + B_center(1) = List_env1s_square_cent(1,i_1s) + B_center(2) = List_env1s_square_cent(2,i_1s) + B_center(3) = List_env1s_square_cent(3,i_1s) int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) @@ -143,7 +145,7 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2, (ao_num, ao_num, n enddo - int2_grad1u2_grad2u2_j1b2(j,i,ipoint) = tmp + int2_grad1u2_grad2u2_env2(j,i,ipoint) = tmp enddo enddo enddo @@ -153,23 +155,23 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2, (ao_num, ao_num, n do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - int2_grad1u2_grad2u2_j1b2(j,i,ipoint) = int2_grad1u2_grad2u2_j1b2(i,j,ipoint) + int2_grad1u2_grad2u2_env2(j,i,ipoint) = int2_grad1u2_grad2u2_env2(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_grad1u2_grad2u2_j1b2 =', wall1 - wall0 + print*, ' wall time for int2_grad1u2_grad2u2_env2 (min) =', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [double precision, int2_u2_j1b2, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, int2_u2_env2, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 [u_12^mu]^2 + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2)^2 [u_12^mu]^2 ! END_DOC @@ -182,21 +184,22 @@ BEGIN_PROVIDER [double precision, int2_u2_j1b2, (ao_num, ao_num, n_points_final_ double precision, external :: overlap_gauss_r12_ao double precision, external :: overlap_gauss_r12_ao_with1s - print*, ' providing int2_u2_j1b2 ...' + print*, ' providing int2_u2_env2 ...' call wall_time(wall0) - provide mu_erf final_grid_points j1b_pen + provide mu_erf + provide final_grid_points - int2_u2_j1b2 = 0.d0 + int2_u2_env2 = 0.d0 - !$OMP PARALLEL DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & - !$OMP coef_fit, expo_fit, int_fit, tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & - !$OMP final_grid_points, ng_fit_jast, & - !$OMP expo_gauss_j_mu_x_2, coef_gauss_j_mu_x_2, & - !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & - !$OMP List_all_comb_b3_cent, int2_u2_j1b2) + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit, tmp) & + !$OMP SHARED (n_points_final_grid, ao_num, List_env1s_square_size, & + !$OMP final_grid_points, ng_fit_jast, & + !$OMP expo_gauss_j_mu_x_2, coef_gauss_j_mu_x_2, & + !$OMP List_env1s_square_coef, List_env1s_square_expo, & + !$OMP List_env1s_square_cent, int2_u2_env2) !$OMP DO do ipoint = 1, n_points_final_grid r(1) = final_grid_points(1,ipoint) @@ -220,14 +223,14 @@ BEGIN_PROVIDER [double precision, int2_u2_j1b2, (ao_num, ao_num, n_points_final_ ! --- - do i_1s = 2, List_all_comb_b3_size + do i_1s = 2, List_env1s_square_size - coef = List_all_comb_b3_coef (i_1s) + coef = List_env1s_square_coef (i_1s) if(dabs(coef) .lt. 1d-15) cycle ! beta = 0.0 - beta = List_all_comb_b3_expo (i_1s) - B_center(1) = List_all_comb_b3_cent(1,i_1s) - B_center(2) = List_all_comb_b3_cent(2,i_1s) - B_center(3) = List_all_comb_b3_cent(3,i_1s) + beta = List_env1s_square_expo (i_1s) + B_center(1) = List_env1s_square_cent(1,i_1s) + B_center(2) = List_env1s_square_cent(2,i_1s) + B_center(3) = List_env1s_square_cent(3,i_1s) int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) @@ -238,7 +241,7 @@ BEGIN_PROVIDER [double precision, int2_u2_j1b2, (ao_num, ao_num, n_points_final_ enddo - int2_u2_j1b2(j,i,ipoint) = tmp + int2_u2_env2(j,i,ipoint) = tmp enddo enddo enddo @@ -248,23 +251,23 @@ BEGIN_PROVIDER [double precision, int2_u2_j1b2, (ao_num, ao_num, n_points_final_ do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - int2_u2_j1b2(j,i,ipoint) = int2_u2_j1b2(i,j,ipoint) + int2_u2_env2(j,i,ipoint) = int2_u2_env2(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_u2_j1b2', wall1 - wall0 + print*, ' wall time for int2_u2_env2 (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2, (ao_num, ao_num, n_points_final_grid, 3)] +BEGIN_PROVIDER [double precision, int2_u_grad1u_x_env2, (ao_num, ao_num, n_points_final_grid, 3)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 u_12^mu [\grad_1 u_12^mu] r2 + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2)^2 u_12^mu [\grad_1 u_12^mu] r2 ! END_DOC @@ -276,23 +279,24 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2, (ao_num, ao_num, n_poin double precision :: tmp_x, tmp_y, tmp_z double precision :: wall0, wall1 - print*, ' providing int2_u_grad1u_x_j1b2 ...' + print*, ' providing int2_u_grad1u_x_env2 ...' call wall_time(wall0) - provide mu_erf final_grid_points j1b_pen + provide mu_erf + provide final_grid_points - int2_u_grad1u_x_j1b2 = 0.d0 + int2_u_grad1u_x_env2 = 0.d0 - !$OMP PARALLEL DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & - !$OMP coef_fit, expo_fit, int_fit, alpha_1s, dist, & - !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coef_tmp, & - !$OMP tmp_x, tmp_y, tmp_z) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & - !$OMP final_grid_points, ng_fit_jast, & - !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & - !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & - !$OMP List_all_comb_b3_cent, int2_u_grad1u_x_j1b2) + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit, alpha_1s, dist, & + !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coef_tmp, & + !$OMP tmp_x, tmp_y, tmp_z) & + !$OMP SHARED (n_points_final_grid, ao_num, List_env1s_square_size, & + !$OMP final_grid_points, ng_fit_jast, & + !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & + !$OMP List_env1s_square_coef, List_env1s_square_expo, & + !$OMP List_env1s_square_cent, int2_u_grad1u_x_env2) !$OMP DO do ipoint = 1, n_points_final_grid @@ -321,14 +325,14 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2, (ao_num, ao_num, n_poin ! --- - do i_1s = 2, List_all_comb_b3_size + do i_1s = 2, List_env1s_square_size - coef = List_all_comb_b3_coef (i_1s) + coef = List_env1s_square_coef (i_1s) if(dabs(coef) .lt. 1d-15) cycle ! beta = 0.0 - beta = List_all_comb_b3_expo (i_1s) - B_center(1) = List_all_comb_b3_cent(1,i_1s) - B_center(2) = List_all_comb_b3_cent(2,i_1s) - B_center(3) = List_all_comb_b3_cent(3,i_1s) + beta = List_env1s_square_expo (i_1s) + B_center(1) = List_env1s_square_cent(1,i_1s) + B_center(2) = List_env1s_square_cent(2,i_1s) + B_center(3) = List_env1s_square_cent(3,i_1s) dist = (B_center(1) - r(1)) * (B_center(1) - r(1)) & + (B_center(2) - r(2)) * (B_center(2) - r(2)) & + (B_center(3) - r(3)) * (B_center(3) - r(3)) @@ -355,9 +359,9 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2, (ao_num, ao_num, n_poin enddo - int2_u_grad1u_x_j1b2(j,i,ipoint,1) = tmp_x - int2_u_grad1u_x_j1b2(j,i,ipoint,2) = tmp_y - int2_u_grad1u_x_j1b2(j,i,ipoint,3) = tmp_z + int2_u_grad1u_x_env2(j,i,ipoint,1) = tmp_x + int2_u_grad1u_x_env2(j,i,ipoint,2) = tmp_y + int2_u_grad1u_x_env2(j,i,ipoint,3) = tmp_z enddo enddo enddo @@ -367,25 +371,25 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2, (ao_num, ao_num, n_poin do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - int2_u_grad1u_x_j1b2(j,i,ipoint,1) = int2_u_grad1u_x_j1b2(i,j,ipoint,1) - int2_u_grad1u_x_j1b2(j,i,ipoint,2) = int2_u_grad1u_x_j1b2(i,j,ipoint,2) - int2_u_grad1u_x_j1b2(j,i,ipoint,3) = int2_u_grad1u_x_j1b2(i,j,ipoint,3) + int2_u_grad1u_x_env2(j,i,ipoint,1) = int2_u_grad1u_x_env2(i,j,ipoint,1) + int2_u_grad1u_x_env2(j,i,ipoint,2) = int2_u_grad1u_x_env2(i,j,ipoint,2) + int2_u_grad1u_x_env2(j,i,ipoint,3) = int2_u_grad1u_x_env2(i,j,ipoint,3) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_u_grad1u_x_j1b2 = ', wall1 - wall0 + print*, ' wall time for int2_u_grad1u_x_env2 (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [ double precision, int2_u_grad1u_env2, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 u_12^mu [\grad_1 u_12^mu] + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2)^2 u_12^mu [\grad_1 u_12^mu] ! END_DOC @@ -397,22 +401,23 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points double precision :: wall0, wall1 double precision, external :: NAI_pol_mult_erf_ao_with1s - print*, ' providing int2_u_grad1u_j1b2 ...' + print*, ' providing int2_u_grad1u_env2 ...' call wall_time(wall0) - provide mu_erf final_grid_points j1b_pen + provide mu_erf + provide final_grid_points - int2_u_grad1u_j1b2 = 0.d0 + int2_u_grad1u_env2 = 0.d0 - !$OMP PARALLEL DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & - !$OMP coef_fit, expo_fit, int_fit, tmp, alpha_1s, dist, & - !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coef_tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & - !$OMP final_grid_points, ng_fit_jast, & - !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & - !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & - !$OMP List_all_comb_b3_cent, int2_u_grad1u_j1b2) + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit, tmp, alpha_1s, dist, & + !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coef_tmp) & + !$OMP SHARED (n_points_final_grid, ao_num, List_env1s_square_size, & + !$OMP final_grid_points, ng_fit_jast, & + !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & + !$OMP List_env1s_square_coef, List_env1s_square_expo, & + !$OMP List_env1s_square_cent, int2_u_grad1u_env2) !$OMP DO do ipoint = 1, n_points_final_grid do i = 1, ao_num @@ -436,14 +441,14 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points ! --- - do i_1s = 2, List_all_comb_b3_size + do i_1s = 2, List_env1s_square_size - coef = List_all_comb_b3_coef (i_1s) + coef = List_env1s_square_coef (i_1s) if(dabs(coef) .lt. 1d-15) cycle ! beta = 0.0 - beta = List_all_comb_b3_expo (i_1s) - B_center(1) = List_all_comb_b3_cent(1,i_1s) - B_center(2) = List_all_comb_b3_cent(2,i_1s) - B_center(3) = List_all_comb_b3_cent(3,i_1s) + beta = List_env1s_square_expo (i_1s) + B_center(1) = List_env1s_square_cent(1,i_1s) + B_center(2) = List_env1s_square_cent(2,i_1s) + B_center(3) = List_env1s_square_cent(3,i_1s) dist = (B_center(1) - r(1)) * (B_center(1) - r(1)) & + (B_center(2) - r(2)) * (B_center(2) - r(2)) & + (B_center(3) - r(3)) * (B_center(3) - r(3)) @@ -468,7 +473,7 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points enddo - int2_u_grad1u_j1b2(j,i,ipoint) = tmp + int2_u_grad1u_env2(j,i,ipoint) = tmp enddo enddo enddo @@ -478,13 +483,13 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - int2_u_grad1u_j1b2(j,i,ipoint) = int2_u_grad1u_j1b2(i,j,ipoint) + int2_u_grad1u_env2(j,i,ipoint) = int2_u_grad1u_env2(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_u_grad1u_j1b2', wall1 - wall0 + print*, ' wall time for int2_u_grad1u_env2 (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER diff --git a/plugins/local/ao_many_one_e_ints/grad2_jmu_modif_vect.irp.f b/plugins/local/ao_many_one_e_ints/grad2_jmu_modif_vect.irp.f deleted file mode 100644 index 21927371..00000000 --- a/plugins/local/ao_many_one_e_ints/grad2_jmu_modif_vect.irp.f +++ /dev/null @@ -1,453 +0,0 @@ -! -!! --- -! -!BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2, (ao_num, ao_num, n_points_final_grid)] -! -! BEGIN_DOC -! ! -! ! -\frac{1}{4} int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 [1 - erf(mu r12)]^2 -! ! -! END_DOC -! -! implicit none -! integer :: i, j, ipoint, i_1s, i_fit -! integer :: i_mask_grid -! double precision :: r(3), expo_fit, coef_fit -! double precision :: coef, beta, B_center(3) -! double precision :: wall0, wall1 -! -! integer, allocatable :: n_mask_grid(:) -! double precision, allocatable :: r_mask_grid(:,:) -! double precision, allocatable :: int_fit_v(:) -! -! print*, ' providing int2_grad1u2_grad2u2_j1b2' -! -! provide mu_erf final_grid_points_transp j1b_pen -! call wall_time(wall0) -! -! int2_grad1u2_grad2u2_j1b2(:,:,:) = 0.d0 -! -! !$OMP PARALLEL DEFAULT (NONE) & -! !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center,& -! !$OMP coef_fit, expo_fit, int_fit_v, n_mask_grid, & -! !$OMP i_mask_grid, r_mask_grid) & -! !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size,& -! !$OMP final_grid_points_transp, n_max_fit_slat, & -! !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & -! !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & -! !$OMP List_all_comb_b3_cent, int2_grad1u2_grad2u2_j1b2, & -! !$OMP ao_overlap_abs) -! -! allocate(int_fit_v(n_points_final_grid)) -! allocate(n_mask_grid(n_points_final_grid)) -! allocate(r_mask_grid(n_points_final_grid,3)) -! -! !$OMP DO SCHEDULE(dynamic) -! do i = 1, ao_num -! do j = i, ao_num -! -! if(ao_overlap_abs(j,i) .lt. 1.d-12) then -! cycle -! endif -! -! do i_fit = 1, n_max_fit_slat -! -! expo_fit = expo_gauss_1_erf_x_2(i_fit) -! coef_fit = coef_gauss_1_erf_x_2(i_fit) * (-0.25d0) -! -! ! --- -! -! call overlap_gauss_r12_ao_v(final_grid_points_transp, n_points_final_grid, expo_fit, i, j, int_fit_v, n_points_final_grid, n_points_final_grid) -! -! i_mask_grid = 0 ! dim -! n_mask_grid = 0 ! ind -! r_mask_grid = 0.d0 ! val -! do ipoint = 1, n_points_final_grid -! -! int2_grad1u2_grad2u2_j1b2(j,i,ipoint) += coef_fit * int_fit_v(ipoint) -! -! if(dabs(int_fit_v(ipoint)) .gt. 1d-10) then -! i_mask_grid += 1 -! n_mask_grid(i_mask_grid ) = ipoint -! r_mask_grid(i_mask_grid,1) = final_grid_points_transp(ipoint,1) -! r_mask_grid(i_mask_grid,2) = final_grid_points_transp(ipoint,2) -! r_mask_grid(i_mask_grid,3) = final_grid_points_transp(ipoint,3) -! endif -! -! enddo -! -! if(i_mask_grid .eq. 0) cycle -! -! ! --- -! -! do i_1s = 2, List_all_comb_b3_size -! -! coef = List_all_comb_b3_coef (i_1s) * coef_fit -! beta = List_all_comb_b3_expo (i_1s) -! B_center(1) = List_all_comb_b3_cent(1,i_1s) -! B_center(2) = List_all_comb_b3_cent(2,i_1s) -! B_center(3) = List_all_comb_b3_cent(3,i_1s) -! -! call overlap_gauss_r12_ao_with1s_v(B_center, beta, r_mask_grid, n_points_final_grid, expo_fit, i, j, int_fit_v, n_points_final_grid, i_mask_grid) -! -! do ipoint = 1, i_mask_grid -! int2_grad1u2_grad2u2_j1b2(j,i,n_mask_grid(ipoint)) += coef * int_fit_v(ipoint) -! enddo -! -! enddo -! -! ! --- -! -! enddo -! enddo -! enddo -! !$OMP END DO -! -! deallocate(n_mask_grid) -! deallocate(r_mask_grid) -! deallocate(int_fit_v) -! -! !$OMP END PARALLEL -! -! do ipoint = 1, n_points_final_grid -! do i = 2, ao_num -! do j = 1, i-1 -! int2_grad1u2_grad2u2_j1b2(j,i,ipoint) = int2_grad1u2_grad2u2_j1b2(i,j,ipoint) -! enddo -! enddo -! enddo -! -! call wall_time(wall1) -! print*, ' wall time for int2_grad1u2_grad2u2_j1b2', wall1 - wall0 -! -!END_PROVIDER -! -!! --- -! -!BEGIN_PROVIDER [ double precision, int2_u2_j1b2, (ao_num, ao_num, n_points_final_grid)] -! -! BEGIN_DOC -! ! -! ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 [u_12^mu]^2 -! ! -! END_DOC -! -! implicit none -! integer :: i, j, ipoint, i_1s, i_fit -! integer :: i_mask_grid -! double precision :: r(3), expo_fit, coef_fit -! double precision :: coef, beta, B_center(3), tmp -! double precision :: wall0, wall1 -! -! integer, allocatable :: n_mask_grid(:) -! double precision, allocatable :: r_mask_grid(:,:) -! double precision, allocatable :: int_fit_v(:) -! -! print*, ' providing int2_u2_j1b2' -! -! provide mu_erf final_grid_points_transp j1b_pen -! call wall_time(wall0) -! -! int2_u2_j1b2(:,:,:) = 0.d0 -! -! !$OMP PARALLEL DEFAULT (NONE) & -! !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & -! !$OMP coef_fit, expo_fit, int_fit_v, & -! !$OMP i_mask_grid, n_mask_grid, r_mask_grid ) & -! !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & -! !$OMP final_grid_points_transp, n_max_fit_slat, & -! !$OMP expo_gauss_j_mu_x_2, coef_gauss_j_mu_x_2, & -! !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & -! !$OMP List_all_comb_b3_cent, int2_u2_j1b2) -! -! allocate(n_mask_grid(n_points_final_grid)) -! allocate(r_mask_grid(n_points_final_grid,3)) -! allocate(int_fit_v(n_points_final_grid)) -! -! !$OMP DO SCHEDULE(dynamic) -! do i = 1, ao_num -! do j = i, ao_num -! -! do i_fit = 1, n_max_fit_slat -! -! expo_fit = expo_gauss_j_mu_x_2(i_fit) -! coef_fit = coef_gauss_j_mu_x_2(i_fit) -! -! ! --- -! -! call overlap_gauss_r12_ao_v(final_grid_points_transp, n_points_final_grid, expo_fit, i, j, int_fit_v, n_points_final_grid, n_points_final_grid) -! -! i_mask_grid = 0 ! dim -! n_mask_grid = 0 ! ind -! r_mask_grid = 0.d0 ! val -! -! do ipoint = 1, n_points_final_grid -! int2_u2_j1b2(j,i,ipoint) += coef_fit * int_fit_v(ipoint) -! -! if(dabs(int_fit_v(ipoint)) .gt. 1d-10) then -! i_mask_grid += 1 -! n_mask_grid(i_mask_grid ) = ipoint -! r_mask_grid(i_mask_grid,1) = final_grid_points_transp(ipoint,1) -! r_mask_grid(i_mask_grid,2) = final_grid_points_transp(ipoint,2) -! r_mask_grid(i_mask_grid,3) = final_grid_points_transp(ipoint,3) -! endif -! enddo -! -! if(i_mask_grid .eq. 0) cycle -! -! ! --- -! -! do i_1s = 2, List_all_comb_b3_size -! -! coef = List_all_comb_b3_coef (i_1s) * coef_fit -! beta = List_all_comb_b3_expo (i_1s) -! B_center(1) = List_all_comb_b3_cent(1,i_1s) -! B_center(2) = List_all_comb_b3_cent(2,i_1s) -! B_center(3) = List_all_comb_b3_cent(3,i_1s) -! -! call overlap_gauss_r12_ao_with1s_v(B_center, beta, r_mask_grid, n_points_final_grid, expo_fit, i, j, int_fit_v, n_points_final_grid, i_mask_grid) -! -! do ipoint = 1, i_mask_grid -! int2_u2_j1b2(j,i,n_mask_grid(ipoint)) += coef * int_fit_v(ipoint) -! enddo -! -! enddo -! -! ! --- -! -! enddo -! enddo -! enddo -! !$OMP END DO -! -! deallocate(n_mask_grid) -! deallocate(r_mask_grid) -! deallocate(int_fit_v) -! -! !$OMP END PARALLEL -! -! do ipoint = 1, n_points_final_grid -! do i = 2, ao_num -! do j = 1, i-1 -! int2_u2_j1b2(j,i,ipoint) = int2_u2_j1b2(i,j,ipoint) -! enddo -! enddo -! enddo -! -! call wall_time(wall1) -! print*, ' wall time for int2_u2_j1b2', wall1 - wall0 -! -!END_PROVIDER -! -!! --- -! -!BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2, (ao_num, ao_num, n_points_final_grid, 3)] -! -! BEGIN_DOC -! ! -! ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 u_12^mu [\grad_1 u_12^mu] r2 -! ! -! END_DOC -! -! implicit none -! -! integer :: i, j, ipoint, i_1s, i_fit -! integer :: i_mask_grid1, i_mask_grid2, i_mask_grid3, i_mask_grid(3) -! double precision :: x, y, z, expo_fit, coef_fit -! double precision :: coef, beta, B_center(3) -! double precision :: alpha_1s, alpha_1s_inv, expo_coef_1s -! double precision :: wall0, wall1 -! -! integer, allocatable :: n_mask_grid(:,:) -! double precision, allocatable :: r_mask_grid(:,:,:) -! double precision, allocatable :: int_fit_v(:,:), dist(:,:), centr_1s(:,:,:) -! -! print*, ' providing int2_u_grad1u_x_j1b2' -! -! provide mu_erf final_grid_points_transp j1b_pen -! call wall_time(wall0) -! -! int2_u_grad1u_x_j1b2(:,:,:,:) = 0.d0 -! -! !$OMP PARALLEL DEFAULT (NONE) & -! !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, x, y, z, coef, beta, & -! !$OMP coef_fit, expo_fit, int_fit_v, alpha_1s, dist, B_center,& -! !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, & -! !$OMP i_mask_grid1, i_mask_grid2, i_mask_grid3, i_mask_grid, & -! !$OMP n_mask_grid, r_mask_grid) & -! !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & -! !$OMP final_grid_points_transp, n_max_fit_slat, & -! !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & -! !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & -! !$OMP List_all_comb_b3_cent, int2_u_grad1u_x_j1b2) -! -! allocate(dist(n_points_final_grid,3)) -! allocate(centr_1s(n_points_final_grid,3,3)) -! allocate(n_mask_grid(n_points_final_grid,3)) -! allocate(r_mask_grid(n_points_final_grid,3,3)) -! allocate(int_fit_v(n_points_final_grid,3)) -! -! !$OMP DO SCHEDULE(dynamic) -! do i = 1, ao_num -! do j = i, ao_num -! do i_fit = 1, n_max_fit_slat -! -! expo_fit = expo_gauss_j_mu_1_erf(i_fit) -! coef_fit = coef_gauss_j_mu_1_erf(i_fit) -! -! ! --- -! -! call NAI_pol_x_mult_erf_ao_with1s_v0(i, j, expo_fit, final_grid_points_transp, n_points_final_grid, 1.d+9, final_grid_points_transp, n_points_final_grid, int_fit_v, n_points_final_grid, n_points_final_grid) -! -! i_mask_grid1 = 0 ! dim -! i_mask_grid2 = 0 ! dim -! i_mask_grid3 = 0 ! dim -! n_mask_grid = 0 ! ind -! r_mask_grid = 0.d0 ! val -! do ipoint = 1, n_points_final_grid -! -! ! --- -! -! int2_u_grad1u_x_j1b2(j,i,ipoint,1) += coef_fit * int_fit_v(ipoint,1) -! -! if(dabs(int_fit_v(ipoint,1)) .gt. 1d-10) then -! i_mask_grid1 += 1 -! n_mask_grid(i_mask_grid1, 1) = ipoint -! r_mask_grid(i_mask_grid1,1,1) = final_grid_points_transp(ipoint,1) -! r_mask_grid(i_mask_grid1,2,1) = final_grid_points_transp(ipoint,2) -! r_mask_grid(i_mask_grid1,3,1) = final_grid_points_transp(ipoint,3) -! endif -! -! ! --- -! -! int2_u_grad1u_x_j1b2(j,i,ipoint,2) += coef_fit * int_fit_v(ipoint,2) -! -! if(dabs(int_fit_v(ipoint,2)) .gt. 1d-10) then -! i_mask_grid2 += 1 -! n_mask_grid(i_mask_grid2, 2) = ipoint -! r_mask_grid(i_mask_grid2,1,2) = final_grid_points_transp(ipoint,1) -! r_mask_grid(i_mask_grid2,2,2) = final_grid_points_transp(ipoint,2) -! r_mask_grid(i_mask_grid2,3,2) = final_grid_points_transp(ipoint,3) -! endif -! -! ! --- -! -! int2_u_grad1u_x_j1b2(j,i,ipoint,3) += coef_fit * int_fit_v(ipoint,3) -! -! if(dabs(int_fit_v(ipoint,3)) .gt. 1d-10) then -! i_mask_grid3 += 1 -! n_mask_grid(i_mask_grid3, 3) = ipoint -! r_mask_grid(i_mask_grid3,1,3) = final_grid_points_transp(ipoint,1) -! r_mask_grid(i_mask_grid3,2,3) = final_grid_points_transp(ipoint,2) -! r_mask_grid(i_mask_grid3,3,3) = final_grid_points_transp(ipoint,3) -! endif -! -! ! --- -! -! enddo -! -! if((i_mask_grid1+i_mask_grid2+i_mask_grid3) .eq. 0) cycle -! -! i_mask_grid(1) = i_mask_grid1 -! i_mask_grid(2) = i_mask_grid2 -! i_mask_grid(3) = i_mask_grid3 -! -! ! --- -! -! do i_1s = 2, List_all_comb_b3_size -! -! coef = List_all_comb_b3_coef (i_1s) * coef_fit -! beta = List_all_comb_b3_expo (i_1s) -! B_center(1) = List_all_comb_b3_cent(1,i_1s) -! B_center(2) = List_all_comb_b3_cent(2,i_1s) -! B_center(3) = List_all_comb_b3_cent(3,i_1s) -! -! alpha_1s = beta + expo_fit -! alpha_1s_inv = 1.d0 / alpha_1s -! expo_coef_1s = beta * expo_fit * alpha_1s_inv -! -! do ipoint = 1, i_mask_grid1 -! -! x = r_mask_grid(ipoint,1,1) -! y = r_mask_grid(ipoint,2,1) -! z = r_mask_grid(ipoint,3,1) -! -! centr_1s(ipoint,1,1) = alpha_1s_inv * (beta * B_center(1) + expo_fit * x) -! centr_1s(ipoint,2,1) = alpha_1s_inv * (beta * B_center(2) + expo_fit * y) -! centr_1s(ipoint,3,1) = alpha_1s_inv * (beta * B_center(3) + expo_fit * z) -! -! dist(ipoint,1) = (B_center(1) - x) * (B_center(1) - x) + (B_center(2) - y) * (B_center(2) - y) + (B_center(3) - z) * (B_center(3) - z) -! enddo -! -! do ipoint = 1, i_mask_grid2 -! -! x = r_mask_grid(ipoint,1,2) -! y = r_mask_grid(ipoint,2,2) -! z = r_mask_grid(ipoint,3,2) -! -! centr_1s(ipoint,1,2) = alpha_1s_inv * (beta * B_center(1) + expo_fit * x) -! centr_1s(ipoint,2,2) = alpha_1s_inv * (beta * B_center(2) + expo_fit * y) -! centr_1s(ipoint,3,2) = alpha_1s_inv * (beta * B_center(3) + expo_fit * z) -! -! dist(ipoint,2) = (B_center(1) - x) * (B_center(1) - x) + (B_center(2) - y) * (B_center(2) - y) + (B_center(3) - z) * (B_center(3) - z) -! enddo -! -! do ipoint = 1, i_mask_grid3 -! -! x = r_mask_grid(ipoint,1,3) -! y = r_mask_grid(ipoint,2,3) -! z = r_mask_grid(ipoint,3,3) -! -! centr_1s(ipoint,1,3) = alpha_1s_inv * (beta * B_center(1) + expo_fit * x) -! centr_1s(ipoint,2,3) = alpha_1s_inv * (beta * B_center(2) + expo_fit * y) -! centr_1s(ipoint,3,3) = alpha_1s_inv * (beta * B_center(3) + expo_fit * z) -! -! dist(ipoint,3) = (B_center(1) - x) * (B_center(1) - x) + (B_center(2) - y) * (B_center(2) - y) + (B_center(3) - z) * (B_center(3) - z) -! enddo -! -! call NAI_pol_x_mult_erf_ao_with1s_v(i, j, alpha_1s, centr_1s, n_points_final_grid, 1.d+9, r_mask_grid, n_points_final_grid, int_fit_v, n_points_final_grid, i_mask_grid) -! -! do ipoint = 1, i_mask_grid1 -! int2_u_grad1u_x_j1b2(j,i,n_mask_grid(ipoint,1),1) += coef * dexp(-expo_coef_1s * dist(ipoint,1)) * int_fit_v(ipoint,1) -! enddo -! -! do ipoint = 1, i_mask_grid2 -! int2_u_grad1u_x_j1b2(j,i,n_mask_grid(ipoint,2),2) += coef * dexp(-expo_coef_1s * dist(ipoint,2)) * int_fit_v(ipoint,2) -! enddo -! -! do ipoint = 1, i_mask_grid3 -! int2_u_grad1u_x_j1b2(j,i,n_mask_grid(ipoint,3),3) += coef * dexp(-expo_coef_1s * dist(ipoint,3)) * int_fit_v(ipoint,3) -! enddo -! -! enddo -! -! ! --- -! -! enddo -! enddo -! enddo -! !$OMP END DO -! -! deallocate(dist) -! deallocate(centr_1s) -! deallocate(n_mask_grid) -! deallocate(r_mask_grid) -! deallocate(int_fit_v) -! -! !$OMP END PARALLEL -! -! do ipoint = 1, n_points_final_grid -! do i = 2, ao_num -! do j = 1, i-1 -! int2_u_grad1u_x_j1b2(j,i,ipoint,1) = int2_u_grad1u_x_j1b2(i,j,ipoint,1) -! int2_u_grad1u_x_j1b2(j,i,ipoint,2) = int2_u_grad1u_x_j1b2(i,j,ipoint,2) -! int2_u_grad1u_x_j1b2(j,i,ipoint,3) = int2_u_grad1u_x_j1b2(i,j,ipoint,3) -! enddo -! enddo -! enddo -! -! call wall_time(wall1) -! print*, ' wall time for int2_u_grad1u_x_j1b2 =', wall1 - wall0 -! -!END_PROVIDER -! diff --git a/plugins/local/ao_many_one_e_ints/grad_lapl_jmu_manu.irp.f b/plugins/local/ao_many_one_e_ints/grad_lapl_jmu_manu.irp.f index 66a2b961..6c163df6 100644 --- a/plugins/local/ao_many_one_e_ints/grad_lapl_jmu_manu.irp.f +++ b/plugins/local/ao_many_one_e_ints/grad_lapl_jmu_manu.irp.f @@ -1,11 +1,11 @@ ! --- -BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b_test, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, v_ij_erf_rk_cst_mu_env_test, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R| - 1) / |r - R| + ! int dr phi_i(r) phi_j(r) 1s_env(r) (erf(mu(R) |r - R| - 1) / |r - R| ! END_DOC @@ -13,24 +13,23 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b_test, (ao_num, ao_num, integer :: i, j, ipoint, i_1s double precision :: r(3), int_mu, int_coulomb double precision :: coef, beta, B_center(3) - double precision :: tmp,int_j1b + double precision :: tmp,int_env double precision :: wall0, wall1 double precision, external :: NAI_pol_mult_erf_ao_with1s double precision :: sigma_ij,dist_ij_ipoint,dsqpi_3_2 - print*, ' providing v_ij_erf_rk_cst_mu_j1b_test ...' + print*, ' providing v_ij_erf_rk_cst_mu_env_test ...' dsqpi_3_2 = (dacos(-1.d0))**(1.5d0) - provide mu_erf final_grid_points j1b_pen call wall_time(wall0) - v_ij_erf_rk_cst_mu_j1b_test = 0.d0 + v_ij_erf_rk_cst_mu_env_test = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, int_mu, int_coulomb, tmp, int_j1b)& + !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, int_mu, int_coulomb, tmp, int_env)& !$OMP SHARED (n_points_final_grid, ao_num, List_comb_thr_b2_size, final_grid_points, & - !$OMP List_comb_thr_b2_coef, List_comb_thr_b2_expo, List_comb_thr_b2_cent,ao_abs_comb_b2_j1b, & - !$OMP v_ij_erf_rk_cst_mu_j1b_test, mu_erf, & + !$OMP List_comb_thr_b2_coef, List_comb_thr_b2_expo, List_comb_thr_b2_cent,ao_abs_comb_b2_env, & + !$OMP v_ij_erf_rk_cst_mu_env_test, mu_erf, & !$OMP ao_overlap_abs_grid,ao_prod_center,ao_prod_sigma,dsqpi_3_2,thrsh_cycle_tc) !$OMP DO !do ipoint = 1, 10 @@ -48,8 +47,8 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b_test, (ao_num, ao_num, coef = List_comb_thr_b2_coef (i_1s,j,i) beta = List_comb_thr_b2_expo (i_1s,j,i) - int_j1b = ao_abs_comb_b2_j1b(i_1s,j,i) -! if(dabs(coef)*dabs(int_j1b).lt.thrsh_cycle_tc)cycle + int_env = ao_abs_comb_b2_env(i_1s,j,i) +! if(dabs(coef)*dabs(int_env).lt.thrsh_cycle_tc)cycle B_center(1) = List_comb_thr_b2_cent(1,i_1s,j,i) B_center(2) = List_comb_thr_b2_cent(2,i_1s,j,i) B_center(3) = List_comb_thr_b2_cent(3,i_1s,j,i) @@ -60,7 +59,7 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b_test, (ao_num, ao_num, tmp += coef * (int_mu - int_coulomb) enddo - v_ij_erf_rk_cst_mu_j1b_test(j,i,ipoint) = tmp + v_ij_erf_rk_cst_mu_env_test(j,i,ipoint) = tmp enddo enddo enddo @@ -70,22 +69,22 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b_test, (ao_num, ao_num, do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - v_ij_erf_rk_cst_mu_j1b_test(j,i,ipoint) = v_ij_erf_rk_cst_mu_j1b_test(i,j,ipoint) + v_ij_erf_rk_cst_mu_env_test(j,i,ipoint) = v_ij_erf_rk_cst_mu_env_test(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for v_ij_erf_rk_cst_mu_j1b_test', wall1 - wall0 + print*, ' wall time for v_ij_erf_rk_cst_mu_env_test (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b_test, (ao_num, ao_num, n_points_final_grid, 3)] +BEGIN_PROVIDER [double precision, x_v_ij_erf_rk_cst_mu_env_test, (ao_num, ao_num, n_points_final_grid, 3)] BEGIN_DOC - ! int dr x phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R|) - 1)/|r - R| + ! int dr x phi_i(r) phi_j(r) 1s_env(r) (erf(mu(R) |r - R|) - 1)/|r - R| END_DOC implicit none @@ -93,23 +92,23 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b_test, (ao_num, ao_nu double precision :: coef, beta, B_center(3), r(3), ints(3), ints_coulomb(3) double precision :: tmp_x, tmp_y, tmp_z double precision :: wall0, wall1 - double precision :: sigma_ij,dist_ij_ipoint,dsqpi_3_2,int_j1b,factor_ij_1s,beta_ij,center_ij_1s + double precision :: sigma_ij,dist_ij_ipoint,dsqpi_3_2,int_env,factor_ij_1s,beta_ij,center_ij_1s - print*, ' providing x_v_ij_erf_rk_cst_mu_j1b_test ...' + print*, ' providing x_v_ij_erf_rk_cst_mu_env_test ...' dsqpi_3_2 = (dacos(-1.d0))**(1.5d0) provide expo_erfc_mu_gauss ao_prod_sigma ao_prod_center call wall_time(wall0) - x_v_ij_erf_rk_cst_mu_j1b_test = 0.d0 + x_v_ij_erf_rk_cst_mu_env_test = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, ints, ints_coulomb, & - !$OMP int_j1b, tmp_x, tmp_y, tmp_z,factor_ij_1s,beta_ij,center_ij_1s) & + !$OMP int_env, tmp_x, tmp_y, tmp_z,factor_ij_1s,beta_ij,center_ij_1s) & !$OMP SHARED (n_points_final_grid, ao_num, List_comb_thr_b2_size, final_grid_points,& !$OMP List_comb_thr_b2_coef, List_comb_thr_b2_expo, List_comb_thr_b2_cent, & - !$OMP x_v_ij_erf_rk_cst_mu_j1b_test, mu_erf,ao_abs_comb_b2_j1b, & + !$OMP x_v_ij_erf_rk_cst_mu_env_test, mu_erf,ao_abs_comb_b2_env, & !$OMP ao_overlap_abs_grid,ao_prod_center,ao_prod_sigma,thrsh_cycle_tc) ! !$OMP ao_overlap_abs_grid,ao_prod_center,ao_prod_sigma,dsqpi_3_2,expo_erfc_mu_gauss) !$OMP DO @@ -129,8 +128,8 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b_test, (ao_num, ao_nu coef = List_comb_thr_b2_coef (i_1s,j,i) beta = List_comb_thr_b2_expo (i_1s,j,i) - int_j1b = ao_abs_comb_b2_j1b(i_1s,j,i) - ! if(dabs(coef)*dabs(int_j1b).lt.thrsh_cycle_tc)cycle + int_env = ao_abs_comb_b2_env(i_1s,j,i) + ! if(dabs(coef)*dabs(int_env).lt.thrsh_cycle_tc)cycle B_center(1) = List_comb_thr_b2_cent(1,i_1s,j,i) B_center(2) = List_comb_thr_b2_cent(2,i_1s,j,i) B_center(3) = List_comb_thr_b2_cent(3,i_1s,j,i) @@ -143,9 +142,9 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b_test, (ao_num, ao_nu tmp_z += coef * (ints(3) - ints_coulomb(3)) enddo - x_v_ij_erf_rk_cst_mu_j1b_test(j,i,ipoint,1) = tmp_x - x_v_ij_erf_rk_cst_mu_j1b_test(j,i,ipoint,2) = tmp_y - x_v_ij_erf_rk_cst_mu_j1b_test(j,i,ipoint,3) = tmp_z + x_v_ij_erf_rk_cst_mu_env_test(j,i,ipoint,1) = tmp_x + x_v_ij_erf_rk_cst_mu_env_test(j,i,ipoint,2) = tmp_y + x_v_ij_erf_rk_cst_mu_env_test(j,i,ipoint,3) = tmp_z enddo enddo enddo @@ -155,26 +154,26 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b_test, (ao_num, ao_nu do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - x_v_ij_erf_rk_cst_mu_j1b_test(j,i,ipoint,1) = x_v_ij_erf_rk_cst_mu_j1b_test(i,j,ipoint,1) - x_v_ij_erf_rk_cst_mu_j1b_test(j,i,ipoint,2) = x_v_ij_erf_rk_cst_mu_j1b_test(i,j,ipoint,2) - x_v_ij_erf_rk_cst_mu_j1b_test(j,i,ipoint,3) = x_v_ij_erf_rk_cst_mu_j1b_test(i,j,ipoint,3) + x_v_ij_erf_rk_cst_mu_env_test(j,i,ipoint,1) = x_v_ij_erf_rk_cst_mu_env_test(i,j,ipoint,1) + x_v_ij_erf_rk_cst_mu_env_test(j,i,ipoint,2) = x_v_ij_erf_rk_cst_mu_env_test(i,j,ipoint,2) + x_v_ij_erf_rk_cst_mu_env_test(j,i,ipoint,3) = x_v_ij_erf_rk_cst_mu_env_test(i,j,ipoint,3) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for x_v_ij_erf_rk_cst_mu_j1b_test', wall1 - wall0 + print*, ' wall time for x_v_ij_erf_rk_cst_mu_env_test (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- ! TODO analytically -BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_test, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_env_test, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2) u(mu, r12) + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2) u(mu, r12) ! END_DOC @@ -185,29 +184,28 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_test, (ao_num, ao_num, n_po double precision :: tmp double precision :: wall0, wall1 double precision :: beta_ij_u, factor_ij_1s_u, center_ij_1s_u(3), coeftot - double precision :: sigma_ij, dist_ij_ipoint, dsqpi_3_2, int_j1b + double precision :: sigma_ij, dist_ij_ipoint, dsqpi_3_2, int_env double precision, external :: overlap_gauss_r12_ao double precision, external :: overlap_gauss_r12_ao_with1s - print*, ' providing v_ij_u_cst_mu_j1b_test ...' + print*, ' providing v_ij_u_cst_mu_env_test ...' dsqpi_3_2 = (dacos(-1.d0))**(1.5d0) - provide mu_erf final_grid_points j1b_pen call wall_time(wall0) - v_ij_u_cst_mu_j1b_test = 0.d0 + v_ij_u_cst_mu_env_test = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & !$OMP beta_ij_u, factor_ij_1s_u, center_ij_1s_u, & - !$OMP coef_fit, expo_fit, int_fit, tmp,coeftot,int_j1b) & + !$OMP coef_fit, expo_fit, int_fit, tmp,coeftot,int_env) & !$OMP SHARED (n_points_final_grid, ao_num, & !$OMP final_grid_points, ng_fit_jast, & !$OMP expo_gauss_j_mu_x, coef_gauss_j_mu_x, & !$OMP List_comb_thr_b2_coef, List_comb_thr_b2_expo,List_comb_thr_b2_size, & - !$OMP List_comb_thr_b2_cent, v_ij_u_cst_mu_j1b_test,ao_abs_comb_b2_j1b, & + !$OMP List_comb_thr_b2_cent, v_ij_u_cst_mu_env_test,ao_abs_comb_b2_env, & !$OMP ao_overlap_abs_grid,ao_prod_center,ao_prod_sigma,dsqpi_3_2,thrsh_cycle_tc) !$OMP DO do ipoint = 1, n_points_final_grid @@ -225,8 +223,8 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_test, (ao_num, ao_num, n_po ! i_1s = 1 ! --- --- --- - int_j1b = ao_abs_comb_b2_j1b(1,j,i) - ! if(dabs(int_j1b).lt.thrsh_cycle_tc) cycle + int_env = ao_abs_comb_b2_env(1,j,i) + ! if(dabs(int_env).lt.thrsh_cycle_tc) cycle do i_fit = 1, ng_fit_jast expo_fit = expo_gauss_j_mu_x(i_fit) coef_fit = coef_gauss_j_mu_x(i_fit) @@ -242,8 +240,8 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_test, (ao_num, ao_num, n_po do i_1s = 2, List_comb_thr_b2_size(j,i) coef = List_comb_thr_b2_coef (i_1s,j,i) beta = List_comb_thr_b2_expo (i_1s,j,i) - int_j1b = ao_abs_comb_b2_j1b(i_1s,j,i) -! if(dabs(coef)*dabs(int_j1b).lt.thrsh_cycle_tc)cycle + int_env = ao_abs_comb_b2_env(i_1s,j,i) +! if(dabs(coef)*dabs(int_env).lt.thrsh_cycle_tc)cycle B_center(1) = List_comb_thr_b2_cent(1,i_1s,j,i) B_center(2) = List_comb_thr_b2_cent(2,i_1s,j,i) B_center(3) = List_comb_thr_b2_cent(3,i_1s,j,i) @@ -259,7 +257,7 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_test, (ao_num, ao_num, n_po enddo enddo - v_ij_u_cst_mu_j1b_test(j,i,ipoint) = tmp + v_ij_u_cst_mu_env_test(j,i,ipoint) = tmp enddo enddo enddo @@ -269,23 +267,23 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_test, (ao_num, ao_num, n_po do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - v_ij_u_cst_mu_j1b_test(j,i,ipoint) = v_ij_u_cst_mu_j1b_test(i,j,ipoint) + v_ij_u_cst_mu_env_test(j,i,ipoint) = v_ij_u_cst_mu_env_test(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for v_ij_u_cst_mu_j1b_test', wall1 - wall0 + print*, ' wall time for v_ij_u_cst_mu_env_test (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_ng_1_test, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_env_ng_1_test, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2) u(mu, r12) with u(mu,r12) \approx 1/2 mu e^{-2.5 * mu (r12)^2} + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2) u(mu, r12) with u(mu,r12) \approx 1/2 mu e^{-2.5 * mu (r12)^2} ! END_DOC @@ -296,27 +294,26 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_ng_1_test, (ao_num, ao_num, double precision :: tmp double precision :: wall0, wall1 double precision :: beta_ij_u, factor_ij_1s_u, center_ij_1s_u(3), coeftot - double precision :: sigma_ij, dist_ij_ipoint, dsqpi_3_2, int_j1b + double precision :: sigma_ij, dist_ij_ipoint, dsqpi_3_2, int_env double precision, external :: overlap_gauss_r12_ao double precision, external :: overlap_gauss_r12_ao_with1s dsqpi_3_2 = (dacos(-1.d0))**(1.5d0) - provide mu_erf final_grid_points j1b_pen call wall_time(wall0) - v_ij_u_cst_mu_j1b_ng_1_test = 0.d0 + v_ij_u_cst_mu_env_ng_1_test = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, & !$OMP beta_ij_u, factor_ij_1s_u, center_ij_1s_u, & - !$OMP coef_fit, expo_fit, int_fit, tmp,coeftot,int_j1b) & + !$OMP coef_fit, expo_fit, int_fit, tmp,coeftot,int_env) & !$OMP SHARED (n_points_final_grid, ao_num, & !$OMP final_grid_points, expo_good_j_mu_1gauss,coef_good_j_mu_1gauss, & !$OMP expo_gauss_j_mu_x, coef_gauss_j_mu_x, & !$OMP List_comb_thr_b2_coef, List_comb_thr_b2_expo,List_comb_thr_b2_size, & - !$OMP List_comb_thr_b2_cent, v_ij_u_cst_mu_j1b_ng_1_test,ao_abs_comb_b2_j1b, & + !$OMP List_comb_thr_b2_cent, v_ij_u_cst_mu_env_ng_1_test,ao_abs_comb_b2_env, & !$OMP ao_overlap_abs_grid,ao_prod_center,ao_prod_sigma,dsqpi_3_2,thrsh_cycle_tc) !$OMP DO do ipoint = 1, n_points_final_grid @@ -334,8 +331,8 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_ng_1_test, (ao_num, ao_num, ! i_1s = 1 ! --- --- --- - int_j1b = ao_abs_comb_b2_j1b(1,j,i) -! if(dabs(int_j1b).lt.thrsh_cycle_tc) cycle + int_env = ao_abs_comb_b2_env(1,j,i) +! if(dabs(int_env).lt.thrsh_cycle_tc) cycle expo_fit = expo_good_j_mu_1gauss int_fit = overlap_gauss_r12_ao(r, expo_fit, i, j) tmp += int_fit @@ -347,8 +344,8 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_ng_1_test, (ao_num, ao_num, do i_1s = 2, List_comb_thr_b2_size(j,i) coef = List_comb_thr_b2_coef (i_1s,j,i) beta = List_comb_thr_b2_expo (i_1s,j,i) - int_j1b = ao_abs_comb_b2_j1b(i_1s,j,i) -! if(dabs(coef)*dabs(int_j1b).lt.thrsh_cycle_tc)cycle + int_env = ao_abs_comb_b2_env(i_1s,j,i) +! if(dabs(coef)*dabs(int_env).lt.thrsh_cycle_tc)cycle B_center(1) = List_comb_thr_b2_cent(1,i_1s,j,i) B_center(2) = List_comb_thr_b2_cent(2,i_1s,j,i) B_center(3) = List_comb_thr_b2_cent(3,i_1s,j,i) @@ -364,7 +361,7 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_ng_1_test, (ao_num, ao_num, ! enddo enddo - v_ij_u_cst_mu_j1b_ng_1_test(j,i,ipoint) = tmp + v_ij_u_cst_mu_env_ng_1_test(j,i,ipoint) = tmp enddo enddo enddo @@ -374,13 +371,13 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_ng_1_test, (ao_num, ao_num, do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - v_ij_u_cst_mu_j1b_ng_1_test(j,i,ipoint) = v_ij_u_cst_mu_j1b_ng_1_test(i,j,ipoint) + v_ij_u_cst_mu_env_ng_1_test(j,i,ipoint) = v_ij_u_cst_mu_env_ng_1_test(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for v_ij_u_cst_mu_j1b_ng_1_test', wall1 - wall0 + print*, ' wall time for v_ij_u_cst_mu_env_ng_1_test (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER diff --git a/plugins/local/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f b/plugins/local/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f index 24b33eb5..00e2d5fc 100644 --- a/plugins/local/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f +++ b/plugins/local/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f @@ -1,11 +1,11 @@ ! --- -BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, v_ij_erf_rk_cst_mu_env, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R| - 1) / |r - R| + ! int dr phi_i(r) phi_j(r) 1s_env(r) (erf(mu(R) |r - R| - 1) / |r - R| ! END_DOC @@ -17,18 +17,20 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po double precision :: wall0, wall1 double precision, external :: NAI_pol_mult_erf_ao_with1s - print *, ' providing v_ij_erf_rk_cst_mu_j1b ...' + PROVIDE mu_erf + PROVIDE final_grid_points + PROVIDE env_expo + + print *, ' providing v_ij_erf_rk_cst_mu_env ...' call wall_time(wall0) - provide mu_erf final_grid_points j1b_pen - - v_ij_erf_rk_cst_mu_j1b = 0.d0 + v_ij_erf_rk_cst_mu_env = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, int_mu, int_coulomb, tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, final_grid_points, & - !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, List_all_comb_b2_cent, & - !$OMP v_ij_erf_rk_cst_mu_j1b, mu_erf) + !$OMP SHARED (n_points_final_grid, ao_num, List_env1s_size, final_grid_points, & + !$OMP List_env1s_coef, List_env1s_expo, List_env1s_cent, & + !$OMP v_ij_erf_rk_cst_mu_env, mu_erf) !$OMP DO !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid @@ -43,28 +45,27 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po ! --- - coef = List_all_comb_b2_coef (1) - beta = List_all_comb_b2_expo (1) - B_center(1) = List_all_comb_b2_cent(1,1) - B_center(2) = List_all_comb_b2_cent(2,1) - B_center(3) = List_all_comb_b2_cent(3,1) + coef = List_env1s_coef (1) + beta = List_env1s_expo (1) + B_center(1) = List_env1s_cent(1,1) + B_center(2) = List_env1s_cent(2,1) + B_center(3) = List_env1s_cent(3,1) int_mu = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r) int_coulomb = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r) -! if(dabs(coef)*dabs(int_mu - int_coulomb) .lt. 1d-12) cycle tmp += coef * (int_mu - int_coulomb) ! --- - do i_1s = 2, List_all_comb_b2_size + do i_1s = 2, List_env1s_size - coef = List_all_comb_b2_coef (i_1s) + coef = List_env1s_coef (i_1s) if(dabs(coef) .lt. 1d-15) cycle ! beta = 0.0 - beta = List_all_comb_b2_expo (i_1s) - B_center(1) = List_all_comb_b2_cent(1,i_1s) - B_center(2) = List_all_comb_b2_cent(2,i_1s) - B_center(3) = List_all_comb_b2_cent(3,i_1s) + beta = List_env1s_expo (i_1s) + B_center(1) = List_env1s_cent(1,i_1s) + B_center(2) = List_env1s_cent(2,i_1s) + B_center(3) = List_env1s_cent(3,i_1s) int_mu = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r) int_coulomb = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r) @@ -74,7 +75,7 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po ! --- - v_ij_erf_rk_cst_mu_j1b(j,i,ipoint) = tmp + v_ij_erf_rk_cst_mu_env(j,i,ipoint) = tmp enddo enddo enddo @@ -84,22 +85,22 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - v_ij_erf_rk_cst_mu_j1b(j,i,ipoint) = v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) + v_ij_erf_rk_cst_mu_env(j,i,ipoint) = v_ij_erf_rk_cst_mu_env(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for v_ij_erf_rk_cst_mu_j1b', wall1 - wall0 + print*, ' wall time for v_ij_erf_rk_cst_mu_env (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_points_final_grid, 3)] +BEGIN_PROVIDER [double precision, x_v_ij_erf_rk_cst_mu_env, (ao_num, ao_num, n_points_final_grid, 3)] BEGIN_DOC - ! int dr x phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R|) - 1)/|r - R| + ! int dr x phi_i(r) phi_j(r) 1s_env(r) (erf(mu(R) |r - R|) - 1)/|r - R| END_DOC implicit none @@ -108,17 +109,17 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_ double precision :: tmp_x, tmp_y, tmp_z double precision :: wall0, wall1 - print*, ' providing x_v_ij_erf_rk_cst_mu_j1b ...' + print*, ' providing x_v_ij_erf_rk_cst_mu_env ...' call wall_time(wall0) - x_v_ij_erf_rk_cst_mu_j1b = 0.d0 + x_v_ij_erf_rk_cst_mu_env = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, ints, ints_coulomb, & !$OMP tmp_x, tmp_y, tmp_z) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, final_grid_points,& - !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, List_all_comb_b2_cent, & - !$OMP x_v_ij_erf_rk_cst_mu_j1b, mu_erf) + !$OMP SHARED (n_points_final_grid, ao_num, List_env1s_size, final_grid_points,& + !$OMP List_env1s_coef, List_env1s_expo, List_env1s_cent, & + !$OMP x_v_ij_erf_rk_cst_mu_env, mu_erf) !$OMP DO !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid @@ -135,11 +136,11 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_ ! --- - coef = List_all_comb_b2_coef (1) - beta = List_all_comb_b2_expo (1) - B_center(1) = List_all_comb_b2_cent(1,1) - B_center(2) = List_all_comb_b2_cent(2,1) - B_center(3) = List_all_comb_b2_cent(3,1) + coef = List_env1s_coef (1) + beta = List_env1s_expo (1) + B_center(1) = List_env1s_cent(1,1) + B_center(2) = List_env1s_cent(2,1) + B_center(3) = List_env1s_cent(3,1) call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r, ints ) call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r, ints_coulomb) @@ -152,14 +153,14 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_ ! --- - do i_1s = 2, List_all_comb_b2_size + do i_1s = 2, List_env1s_size - coef = List_all_comb_b2_coef (i_1s) + coef = List_env1s_coef (i_1s) if(dabs(coef) .lt. 1d-15) cycle ! beta = 0.0 - beta = List_all_comb_b2_expo (i_1s) - B_center(1) = List_all_comb_b2_cent(1,i_1s) - B_center(2) = List_all_comb_b2_cent(2,i_1s) - B_center(3) = List_all_comb_b2_cent(3,i_1s) + beta = List_env1s_expo (i_1s) + B_center(1) = List_env1s_cent(1,i_1s) + B_center(2) = List_env1s_cent(2,i_1s) + B_center(3) = List_env1s_cent(3,i_1s) call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r, ints ) call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r, ints_coulomb) @@ -171,9 +172,9 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_ ! --- - x_v_ij_erf_rk_cst_mu_j1b(j,i,ipoint,1) = tmp_x - x_v_ij_erf_rk_cst_mu_j1b(j,i,ipoint,2) = tmp_y - x_v_ij_erf_rk_cst_mu_j1b(j,i,ipoint,3) = tmp_z + x_v_ij_erf_rk_cst_mu_env(j,i,ipoint,1) = tmp_x + x_v_ij_erf_rk_cst_mu_env(j,i,ipoint,2) = tmp_y + x_v_ij_erf_rk_cst_mu_env(j,i,ipoint,3) = tmp_z enddo enddo enddo @@ -183,25 +184,25 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_ do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - x_v_ij_erf_rk_cst_mu_j1b(j,i,ipoint,1) = x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) - x_v_ij_erf_rk_cst_mu_j1b(j,i,ipoint,2) = x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) - x_v_ij_erf_rk_cst_mu_j1b(j,i,ipoint,3) = x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + x_v_ij_erf_rk_cst_mu_env(j,i,ipoint,1) = x_v_ij_erf_rk_cst_mu_env(i,j,ipoint,1) + x_v_ij_erf_rk_cst_mu_env(j,i,ipoint,2) = x_v_ij_erf_rk_cst_mu_env(i,j,ipoint,2) + x_v_ij_erf_rk_cst_mu_env(j,i,ipoint,3) = x_v_ij_erf_rk_cst_mu_env(i,j,ipoint,3) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for x_v_ij_erf_rk_cst_mu_j1b =', wall1 - wall0 + print*, ' wall time for x_v_ij_erf_rk_cst_mu_env (min) =', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_fit, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_env_fit, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2) u(mu, r12) + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2) u(mu, r12) ! END_DOC @@ -214,23 +215,23 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_fit, (ao_num, ao_num, n_poi double precision, external :: overlap_gauss_r12_ao_with1s - print*, ' providing v_ij_u_cst_mu_j1b_fit ...' + print*, ' providing v_ij_u_cst_mu_env_fit ...' call wall_time(wall0) - provide mu_erf final_grid_points j1b_pen + provide mu_erf final_grid_points env_expo PROVIDE ng_fit_jast expo_gauss_j_mu_x coef_gauss_j_mu_x - PROVIDE List_all_comb_b2_size List_all_comb_b2_coef List_all_comb_b2_expo List_all_comb_b2_cent + PROVIDE List_env1s_size List_env1s_coef List_env1s_expo List_env1s_cent - v_ij_u_cst_mu_j1b_fit = 0.d0 + v_ij_u_cst_mu_env_fit = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & !$OMP coef_fit, expo_fit, int_fit, tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, & + !$OMP SHARED (n_points_final_grid, ao_num, List_env1s_size, & !$OMP final_grid_points, ng_fit_jast, & !$OMP expo_gauss_j_mu_x, coef_gauss_j_mu_x, & - !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, & - !$OMP List_all_comb_b2_cent, v_ij_u_cst_mu_j1b_fit) + !$OMP List_env1s_coef, List_env1s_expo, & + !$OMP List_env1s_cent, v_ij_u_cst_mu_env_fit) !$OMP DO do ipoint = 1, n_points_final_grid r(1) = final_grid_points(1,ipoint) @@ -247,11 +248,11 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_fit, (ao_num, ao_num, n_poi ! --- - coef = List_all_comb_b2_coef (1) - beta = List_all_comb_b2_expo (1) - B_center(1) = List_all_comb_b2_cent(1,1) - B_center(2) = List_all_comb_b2_cent(2,1) - B_center(3) = List_all_comb_b2_cent(3,1) + coef = List_env1s_coef (1) + beta = List_env1s_expo (1) + B_center(1) = List_env1s_cent(1,1) + B_center(2) = List_env1s_cent(2,1) + B_center(3) = List_env1s_cent(3,1) int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) @@ -259,14 +260,14 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_fit, (ao_num, ao_num, n_poi ! --- - do i_1s = 2, List_all_comb_b2_size + do i_1s = 2, List_env1s_size - coef = List_all_comb_b2_coef (i_1s) + coef = List_env1s_coef (i_1s) if(dabs(coef) .lt. 1d-15) cycle ! beta = 0.0 - beta = List_all_comb_b2_expo (i_1s) - B_center(1) = List_all_comb_b2_cent(1,i_1s) - B_center(2) = List_all_comb_b2_cent(2,i_1s) - B_center(3) = List_all_comb_b2_cent(3,i_1s) + beta = List_env1s_expo (i_1s) + B_center(1) = List_env1s_cent(1,i_1s) + B_center(2) = List_env1s_cent(2,i_1s) + B_center(3) = List_env1s_cent(3,i_1s) int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) @@ -277,7 +278,7 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_fit, (ao_num, ao_num, n_poi enddo - v_ij_u_cst_mu_j1b_fit(j,i,ipoint) = tmp + v_ij_u_cst_mu_env_fit(j,i,ipoint) = tmp enddo enddo enddo @@ -287,23 +288,23 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b_fit, (ao_num, ao_num, n_poi do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - v_ij_u_cst_mu_j1b_fit(j,i,ipoint) = v_ij_u_cst_mu_j1b_fit(i,j,ipoint) + v_ij_u_cst_mu_env_fit(j,i,ipoint) = v_ij_u_cst_mu_env_fit(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for v_ij_u_cst_mu_j1b_fit', wall1 - wall0 + print*, ' wall time for v_ij_u_cst_mu_env_fit (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an_old, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_env_an_old, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2) u(mu, r12) + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2) u(mu, r12) ! END_DOC @@ -322,24 +323,24 @@ BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an_old, (ao_num, ao_num, n_p double precision, external :: overlap_gauss_r12_ao_with1s double precision, external :: NAI_pol_mult_erf_ao_with1s - print*, ' providing v_ij_u_cst_mu_j1b_an_old ...' + print*, ' providing v_ij_u_cst_mu_env_an_old ...' call wall_time(wall0) - provide mu_erf final_grid_points j1b_pen - PROVIDE List_all_comb_b2_size List_all_comb_b2_coef List_all_comb_b2_expo List_all_comb_b2_cent + provide mu_erf final_grid_points env_expo + PROVIDE List_env1s_size List_env1s_coef List_env1s_expo List_env1s_cent ct = inv_sq_pi_2 / mu_erf - v_ij_u_cst_mu_j1b_an_old = 0.d0 + v_ij_u_cst_mu_env_an_old = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, & !$OMP r1_2, tmp, int_c1, int_e1, int_o, int_c2, & !$OMP int_e2, int_c3, int_e3) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, & + !$OMP SHARED (n_points_final_grid, ao_num, List_env1s_size, & !$OMP final_grid_points, mu_erf, ct, & - !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, & - !$OMP List_all_comb_b2_cent, v_ij_u_cst_mu_j1b_an_old) + !$OMP List_env1s_coef, List_env1s_expo, & + !$OMP List_env1s_cent, v_ij_u_cst_mu_env_an_old) !$OMP DO do ipoint = 1, n_points_final_grid @@ -353,11 +354,11 @@ BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an_old, (ao_num, ao_num, n_p ! --- - coef = List_all_comb_b2_coef (1) - beta = List_all_comb_b2_expo (1) - B_center(1) = List_all_comb_b2_cent(1,1) - B_center(2) = List_all_comb_b2_cent(2,1) - B_center(3) = List_all_comb_b2_cent(3,1) + coef = List_env1s_coef (1) + beta = List_env1s_expo (1) + B_center(1) = List_env1s_cent(1,1) + B_center(2) = List_env1s_cent(2,1) + B_center(3) = List_env1s_cent(3,1) int_c1 = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r) int_e1 = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r) @@ -379,14 +380,14 @@ BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an_old, (ao_num, ao_num, n_p ! --- - do i_1s = 2, List_all_comb_b2_size + do i_1s = 2, List_env1s_size - coef = List_all_comb_b2_coef (i_1s) + coef = List_env1s_coef (i_1s) if(dabs(coef) .lt. 1d-15) cycle ! beta = 0.0 - beta = List_all_comb_b2_expo (i_1s) - B_center(1) = List_all_comb_b2_cent(1,i_1s) - B_center(2) = List_all_comb_b2_cent(2,i_1s) - B_center(3) = List_all_comb_b2_cent(3,i_1s) + beta = List_env1s_expo (i_1s) + B_center(1) = List_env1s_cent(1,i_1s) + B_center(2) = List_env1s_cent(2,i_1s) + B_center(3) = List_env1s_cent(3,i_1s) int_c1 = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r) int_e1 = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r) @@ -410,7 +411,7 @@ BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an_old, (ao_num, ao_num, n_p ! --- - v_ij_u_cst_mu_j1b_an_old(j,i,ipoint) = tmp + v_ij_u_cst_mu_env_an_old(j,i,ipoint) = tmp enddo enddo enddo @@ -420,23 +421,23 @@ BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an_old, (ao_num, ao_num, n_p do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - v_ij_u_cst_mu_j1b_an_old(j,i,ipoint) = v_ij_u_cst_mu_j1b_an_old(i,j,ipoint) + v_ij_u_cst_mu_env_an_old(j,i,ipoint) = v_ij_u_cst_mu_env_an_old(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for v_ij_u_cst_mu_j1b_an_old', wall1 - wall0 + print*, ' wall time for v_ij_u_cst_mu_env_an_old (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_env_an, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2) u(mu, r12) + ! int dr2 phi_i(r2) phi_j(r2) 1s_env(r2) u(mu, r12) ! END_DOC @@ -454,23 +455,23 @@ BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an, (ao_num, ao_num, n_point double precision, external :: overlap_gauss_r12_ao_with1s double precision, external :: NAI_pol_mult_erf_ao_with1s - print*, ' providing v_ij_u_cst_mu_j1b_an ...' + print*, ' providing v_ij_u_cst_mu_env_an ...' call wall_time(wall0) - provide mu_erf final_grid_points j1b_pen - PROVIDE List_all_comb_b2_size List_all_comb_b2_coef List_all_comb_b2_expo List_all_comb_b2_cent + provide mu_erf final_grid_points env_expo + PROVIDE List_env1s_size List_env1s_coef List_env1s_expo List_env1s_cent ct = inv_sq_pi_2 / mu_erf - v_ij_u_cst_mu_j1b_an = 0.d0 + v_ij_u_cst_mu_env_an = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, & !$OMP r1_2, tmp, int_c, int_e, int_o) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, & + !$OMP SHARED (n_points_final_grid, ao_num, List_env1s_size, & !$OMP final_grid_points, mu_erf, ct, & - !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, & - !$OMP List_all_comb_b2_cent, v_ij_u_cst_mu_j1b_an) + !$OMP List_env1s_coef, List_env1s_expo, & + !$OMP List_env1s_cent, v_ij_u_cst_mu_env_an) !$OMP DO do ipoint = 1, n_points_final_grid @@ -484,11 +485,11 @@ BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an, (ao_num, ao_num, n_point ! --- - coef = List_all_comb_b2_coef (1) - beta = List_all_comb_b2_expo (1) - B_center(1) = List_all_comb_b2_cent(1,1) - B_center(2) = List_all_comb_b2_cent(2,1) - B_center(3) = List_all_comb_b2_cent(3,1) + coef = List_env1s_coef (1) + beta = List_env1s_expo (1) + B_center(1) = List_env1s_cent(1,1) + B_center(2) = List_env1s_cent(2,1) + B_center(3) = List_env1s_cent(3,1) call NAI_pol_012_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r, int_c) call NAI_pol_012_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r, int_e) @@ -504,14 +505,14 @@ BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an, (ao_num, ao_num, n_point ! --- - do i_1s = 2, List_all_comb_b2_size + do i_1s = 2, List_env1s_size - coef = List_all_comb_b2_coef (i_1s) + coef = List_env1s_coef (i_1s) if(dabs(coef) .lt. 1d-15) cycle ! beta = 0.0 - beta = List_all_comb_b2_expo (i_1s) - B_center(1) = List_all_comb_b2_cent(1,i_1s) - B_center(2) = List_all_comb_b2_cent(2,i_1s) - B_center(3) = List_all_comb_b2_cent(3,i_1s) + beta = List_env1s_expo (i_1s) + B_center(1) = List_env1s_cent(1,i_1s) + B_center(2) = List_env1s_cent(2,i_1s) + B_center(3) = List_env1s_cent(3,i_1s) call NAI_pol_012_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r, int_c) call NAI_pol_012_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r, int_e) @@ -529,7 +530,7 @@ BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an, (ao_num, ao_num, n_point ! --- - v_ij_u_cst_mu_j1b_an(j,i,ipoint) = tmp + v_ij_u_cst_mu_env_an(j,i,ipoint) = tmp enddo enddo enddo @@ -539,13 +540,13 @@ BEGIN_PROVIDER [double precision, v_ij_u_cst_mu_j1b_an, (ao_num, ao_num, n_point do ipoint = 1, n_points_final_grid do i = 2, ao_num do j = 1, i-1 - v_ij_u_cst_mu_j1b_an(j,i,ipoint) = v_ij_u_cst_mu_j1b_an(i,j,ipoint) + v_ij_u_cst_mu_env_an(j,i,ipoint) = v_ij_u_cst_mu_env_an(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for v_ij_u_cst_mu_j1b_an', wall1 - wall0 + print*, ' wall time for v_ij_u_cst_mu_env_an (min) = ', (wall1 - wall0) / 60.d0 END_PROVIDER diff --git a/plugins/local/ao_many_one_e_ints/lin_fc_rsdft.irp.f b/plugins/local/ao_many_one_e_ints/lin_fc_rsdft.irp.f new file mode 100644 index 00000000..3483872b --- /dev/null +++ b/plugins/local/ao_many_one_e_ints/lin_fc_rsdft.irp.f @@ -0,0 +1,574 @@ + +! --- + + BEGIN_PROVIDER [double precision, Ir2_Mu_long_Du_0, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_long_Du_x, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_long_Du_y, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_long_Du_z, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_long_Du_2, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! Ir2_Mu_long_Du_0 = int dr2 phi_i(r2) phi_j(r2) fc_env(r2) [(1 - erf(mu r_12) / r_12] + ! + ! Ir2_Mu_long_Du_x = int dr2 phi_i(r2) phi_j(r2) fc_env(r2) [(1 - erf(mu r_12) / r_12] * x2 + ! Ir2_Mu_long_Du_y = int dr2 phi_i(r2) phi_j(r2) fc_env(r2) [(1 - erf(mu r_12) / r_12] * y2 + ! Ir2_Mu_long_Du_z = int dr2 phi_i(r2) phi_j(r2) fc_env(r2) [(1 - erf(mu r_12) / r_12] * z2 + ! + ! Ir2_Mu_long_Du_2 = int dr2 phi_i(r2) phi_j(r2) fc_env(r2) [(1 - erf(mu r_12) / r_12] * r2^2 + ! + END_DOC + + implicit none + + integer :: i, j, ipoint, i_1s + double precision :: r(3), int_clb(7), int_erf(7) + double precision :: c_1s, e_1s, R_1s(3) + double precision :: tmp_Du_0, tmp_Du_x, tmp_Du_y, tmp_Du_z, tmp_Du_2 + double precision :: wall0, wall1 + + PROVIDE mu_erf + PROVIDE final_grid_points + PROVIDE List_env1s_size List_env1s_expo List_env1s_coef List_env1s_cent + + + print *, ' providing Ir2_Mu_long_Du ...' + call wall_time(wall0) + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, r, c_1s, e_1s, R_1s, int_erf, int_clb, & + !$OMP tmp_Du_0, tmp_Du_x, tmp_Du_y, tmp_Du_z, tmp_Du_2) & + !$OMP SHARED (n_points_final_grid, ao_num, final_grid_points, mu_erf, & + !$OMP List_env1s_size, List_env1s_expo, & + !$OMP List_env1s_coef, List_env1s_cent, & + !$OMP Ir2_Mu_long_Du_0, Ir2_Mu_long_Du_x, & + !$OMP Ir2_Mu_long_Du_y, Ir2_Mu_long_Du_z, & + !$OMP Ir2_Mu_long_Du_2) + !$OMP DO + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i = 1, ao_num + do j = i, ao_num + + call NAI_pol_012_mult_erf_ao(i, j, 1.d+9, r, int_clb) + call NAI_pol_012_mult_erf_ao(i, j, mu_erf, r, int_erf) + + tmp_Du_0 = int_clb(1) - int_erf(1) + tmp_Du_x = int_clb(2) - int_erf(2) + tmp_Du_y = int_clb(3) - int_erf(3) + tmp_Du_z = int_clb(4) - int_erf(4) + tmp_Du_2 = int_clb(5) + int_clb(6) + int_clb(7) - int_erf(5) - int_erf(6) - int_erf(7) + + do i_1s = 2, List_env1s_size + + e_1s = List_env1s_expo(i_1s) + c_1s = List_env1s_coef(i_1s) + R_1s(1) = List_env1s_cent(1,i_1s) + R_1s(2) = List_env1s_cent(2,i_1s) + R_1s(3) = List_env1s_cent(3,i_1s) + + call NAI_pol_012_mult_erf_ao_with1s(i, j, e_1s, R_1s, 1.d+9, r, int_clb) + call NAI_pol_012_mult_erf_ao_with1s(i, j, e_1s, R_1s, mu_erf, r, int_erf) + + tmp_Du_0 = tmp_Du_0 + c_1s * (int_clb(1) - int_erf(1)) + tmp_Du_x = tmp_Du_x + c_1s * (int_clb(2) - int_erf(2)) + tmp_Du_y = tmp_Du_y + c_1s * (int_clb(3) - int_erf(3)) + tmp_Du_z = tmp_Du_z + c_1s * (int_clb(4) - int_erf(4)) + tmp_Du_2 = tmp_Du_2 + c_1s * (int_clb(5) + int_clb(6) + int_clb(7) - int_erf(5) - int_erf(6) - int_erf(7)) + enddo + + Ir2_Mu_long_Du_0(j,i,ipoint) = tmp_Du_0 + Ir2_Mu_long_Du_x(j,i,ipoint) = tmp_Du_x + Ir2_Mu_long_Du_y(j,i,ipoint) = tmp_Du_y + Ir2_Mu_long_Du_z(j,i,ipoint) = tmp_Du_z + Ir2_Mu_long_Du_2(j,i,ipoint) = tmp_Du_2 + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 2, ao_num + do j = 1, i-1 + Ir2_Mu_long_Du_0(j,i,ipoint) = Ir2_Mu_long_Du_0(i,j,ipoint) + Ir2_Mu_long_Du_x(j,i,ipoint) = Ir2_Mu_long_Du_x(i,j,ipoint) + Ir2_Mu_long_Du_y(j,i,ipoint) = Ir2_Mu_long_Du_y(i,j,ipoint) + Ir2_Mu_long_Du_z(j,i,ipoint) = Ir2_Mu_long_Du_z(i,j,ipoint) + Ir2_Mu_long_Du_2(j,i,ipoint) = Ir2_Mu_long_Du_2(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for Ir2_Mu_long_Du (min) = ', (wall1 - wall0) / 60.d0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, Ir2_Mu_gauss_Du, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! Ir2_Mu_gauss_Du = int dr2 phi_i(r2) phi_j(r2) fc_env(r2) e^{-(mu r_12)^2} + ! + END_DOC + + implicit none + + integer :: i, j, ipoint, i_1s + double precision :: r(3) + double precision :: coef, beta, B_center(3) + double precision :: tmp_Du + double precision :: mu_sq, dx, dy, dz, tmp_arg, rmu_sq(3) + double precision :: e_1s, c_1s, R_1s(3) + double precision :: wall0, wall1 + + double precision, external :: overlap_gauss_r12_ao + + PROVIDE mu_erf + PROVIDE final_grid_points + PROVIDE List_env1s_size List_env1s_expo List_env1s_coef List_env1s_cent + + + print *, ' providing Ir2_Mu_gauss_Du ...' + call wall_time(wall0) + + mu_sq = mu_erf * mu_erf + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, dx, dy, dz, r, tmp_arg, coef, & + !$OMP rmu_sq, e_1s, c_1s, R_1s, beta, B_center, tmp_Du) & + !$OMP SHARED (n_points_final_grid, ao_num, final_grid_points, mu_sq, & + !$OMP List_env1s_size, List_env1s_expo, & + !$OMP List_env1s_coef, List_env1s_cent, & + !$OMP Ir2_Mu_gauss_Du) + !$OMP DO + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + rmu_sq(1) = mu_sq * r(1) + rmu_sq(2) = mu_sq * r(2) + rmu_sq(3) = mu_sq * r(3) + + do i = 1, ao_num + do j = i, ao_num + + tmp_Du = overlap_gauss_r12_ao(r, mu_sq, j, i) + + do i_1s = 2, List_env1s_size + + e_1s = List_env1s_expo(i_1s) + c_1s = List_env1s_coef(i_1s) + R_1s(1) = List_env1s_cent(1,i_1s) + R_1s(2) = List_env1s_cent(2,i_1s) + R_1s(3) = List_env1s_cent(3,i_1s) + + dx = r(1) - R_1s(1) + dy = r(2) - R_1s(2) + dz = r(3) - R_1s(3) + + beta = mu_sq + e_1s + tmp_arg = mu_sq * e_1s * (dx*dx + dy*dy + dz*dz) / beta + coef = c_1s * dexp(-tmp_arg) + B_center(1) = (rmu_sq(1) + e_1s * R_1s(1)) / beta + B_center(2) = (rmu_sq(2) + e_1s * R_1s(2)) / beta + B_center(3) = (rmu_sq(3) + e_1s * R_1s(3)) / beta + + tmp_Du += coef * overlap_gauss_r12_ao(B_center, beta, j, i) + enddo + + Ir2_Mu_gauss_Du(j,i,ipoint) = tmp_Du + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 2, ao_num + do j = 1, i-1 + + Ir2_Mu_gauss_Du(j,i,ipoint) = Ir2_Mu_gauss_Du(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for Ir2_Mu_gauss_Du (min) = ', (wall1 - wall0) / 60.d0 + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [double precision, Ir2_Mu_long_Du2_0, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_long_Du2_x, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_long_Du2_y, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_long_Du2_z, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_long_Du2_2, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! Ir2_Mu_long_Du2_0 = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 [(1 - erf(mu r_12) / r_12] + ! + ! Ir2_Mu_long_Du2_x = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 [(1 - erf(mu r_12) / r_12] * x2 + ! Ir2_Mu_long_Du2_y = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 [(1 - erf(mu r_12) / r_12] * y2 + ! Ir2_Mu_long_Du2_z = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 [(1 - erf(mu r_12) / r_12] * z2 + ! + ! Ir2_Mu_long_Du2_2 = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 [(1 - erf(mu r_12) / r_12] * r2^2 + ! + END_DOC + + implicit none + + integer :: i, j, ipoint, i_1s + double precision :: r(3), int_clb(7), int_erf(7) + double precision :: coef, beta, B_center(3) + double precision :: tmp_Du2_0, tmp_Du2_x, tmp_Du2_y, tmp_Du2_z, tmp_Du2_2 + double precision :: mu_sq, tmp_arg, dx, dy, dz, rmu_sq(3) + double precision :: e_1s, c_1s, R_1s(3) + double precision :: wall0, wall1 + + + PROVIDE mu_erf + PROVIDE final_grid_points + PROVIDE List_env1s_square_size List_env1s_square_expo List_env1s_square_coef List_env1s_square_cent + + print *, ' providing Ir2_Mu_long_Du2 ...' + call wall_time(wall0) + + mu_sq = mu_erf * mu_erf + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, r, rmu_sq, dx, dy, dz, & + !$OMP e_1s, c_1s, R_1s, tmp_arg, coef, beta, B_center, & + !$OMP int_erf, int_clb, & + !$OMP tmp_Du2_0, tmp_Du2_x, tmp_Du2_y, tmp_Du2_z, tmp_Du2_2) & + !$OMP SHARED (n_points_final_grid, ao_num, final_grid_points, mu_sq, & + !$OMP mu_erf, List_env1s_square_size, List_env1s_square_expo, & + !$OMP List_env1s_square_coef, List_env1s_square_cent, & + !$OMP Ir2_Mu_long_Du2_0, Ir2_Mu_long_Du2_x, & + !$OMP Ir2_Mu_long_Du2_y, Ir2_Mu_long_Du2_z, & + !$OMP Ir2_Mu_long_Du2_2) + !$OMP DO + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + rmu_sq(1) = mu_sq * r(1) + rmu_sq(2) = mu_sq * r(2) + rmu_sq(3) = mu_sq * r(3) + + do i = 1, ao_num + do j = i, ao_num + + call NAI_pol_012_mult_erf_ao_with1s(i, j, mu_sq, r, 1.d+9, r, int_clb) + call NAI_pol_012_mult_erf_ao_with1s(i, j, mu_sq, r, mu_erf, r, int_erf) + + tmp_Du2_0 = int_clb(1) - int_erf(1) + tmp_Du2_x = int_clb(2) - int_erf(2) + tmp_Du2_y = int_clb(3) - int_erf(3) + tmp_Du2_z = int_clb(4) - int_erf(4) + tmp_Du2_2 = int_clb(5) + int_clb(6) + int_clb(7) - int_erf(5) - int_erf(6) - int_erf(7) + + do i_1s = 2, List_env1s_square_size + + e_1s = List_env1s_square_expo(i_1s) + c_1s = List_env1s_square_coef(i_1s) + R_1s(1) = List_env1s_square_cent(1,i_1s) + R_1s(2) = List_env1s_square_cent(2,i_1s) + R_1s(3) = List_env1s_square_cent(3,i_1s) + + dx = r(1) - R_1s(1) + dy = r(2) - R_1s(2) + dz = r(3) - R_1s(3) + + beta = mu_sq + e_1s + tmp_arg = mu_sq * e_1s * (dx*dx + dy*dy + dz*dz) / beta + coef = c_1s * dexp(-tmp_arg) + B_center(1) = (rmu_sq(1) + e_1s * R_1s(1)) / beta + B_center(2) = (rmu_sq(2) + e_1s * R_1s(2)) / beta + B_center(3) = (rmu_sq(3) + e_1s * R_1s(3)) / beta + + call NAI_pol_012_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r, int_clb) + call NAI_pol_012_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r, int_erf) + + tmp_Du2_0 = tmp_Du2_0 + coef * (int_clb(1) - int_erf(1)) + tmp_Du2_x = tmp_Du2_x + coef * (int_clb(2) - int_erf(2)) + tmp_Du2_y = tmp_Du2_y + coef * (int_clb(3) - int_erf(3)) + tmp_Du2_z = tmp_Du2_z + coef * (int_clb(4) - int_erf(4)) + tmp_Du2_2 = tmp_Du2_2 + coef * (int_clb(5) + int_clb(6) + int_clb(7) - int_erf(5) - int_erf(6) - int_erf(7)) + enddo + + Ir2_Mu_long_Du2_0(j,i,ipoint) = tmp_Du2_0 + Ir2_Mu_long_Du2_x(j,i,ipoint) = tmp_Du2_x + Ir2_Mu_long_Du2_y(j,i,ipoint) = tmp_Du2_y + Ir2_Mu_long_Du2_z(j,i,ipoint) = tmp_Du2_z + Ir2_Mu_long_Du2_2(j,i,ipoint) = tmp_Du2_2 + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 2, ao_num + do j = 1, i-1 + Ir2_Mu_long_Du2_0(j,i,ipoint) = Ir2_Mu_long_Du2_0(i,j,ipoint) + Ir2_Mu_long_Du2_x(j,i,ipoint) = Ir2_Mu_long_Du2_x(i,j,ipoint) + Ir2_Mu_long_Du2_y(j,i,ipoint) = Ir2_Mu_long_Du2_y(i,j,ipoint) + Ir2_Mu_long_Du2_z(j,i,ipoint) = Ir2_Mu_long_Du2_z(i,j,ipoint) + Ir2_Mu_long_Du2_2(j,i,ipoint) = Ir2_Mu_long_Du2_2(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for Ir2_Mu_long_Du2 (min) = ', (wall1 - wall0) / 60.d0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, Ir2_Mu_gauss_Du2, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! Ir2_Mu_gauss_Du2 = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 e^{-(mu r_12)^2} + ! + END_DOC + + implicit none + + integer :: i, j, ipoint, i_1s + double precision :: r(3) + double precision :: coef, beta, B_center(3) + double precision :: tmp_Du2 + double precision :: mu_sq, dx, dy, dz, tmp_arg, rmu_sq(3) + double precision :: e_1s, c_1s, R_1s(3) + double precision :: wall0, wall1 + + double precision, external :: overlap_gauss_r12_ao + + PROVIDE mu_erf + PROVIDE final_grid_points + PROVIDE List_env1s_square_size List_env1s_square_expo List_env1s_square_coef List_env1s_square_cent + + + print *, ' providing Ir2_Mu_gauss_Du2 ...' + call wall_time(wall0) + + mu_sq = 2.d0 * mu_erf * mu_erf + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, dx, dy, dz, r, tmp_arg, coef, & + !$OMP rmu_sq, e_1s, c_1s, R_1s, beta, B_center, tmp_Du2) & + !$OMP SHARED (n_points_final_grid, ao_num, final_grid_points, mu_sq, & + !$OMP List_env1s_square_size, List_env1s_square_expo, & + !$OMP List_env1s_square_coef, List_env1s_square_cent, & + !$OMP Ir2_Mu_gauss_Du2) + !$OMP DO + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + rmu_sq(1) = mu_sq * r(1) + rmu_sq(2) = mu_sq * r(2) + rmu_sq(3) = mu_sq * r(3) + + do i = 1, ao_num + do j = i, ao_num + + tmp_Du2 = overlap_gauss_r12_ao(r, mu_sq, j, i) + + do i_1s = 2, List_env1s_square_size + + e_1s = List_env1s_square_expo(i_1s) + c_1s = List_env1s_square_coef(i_1s) + R_1s(1) = List_env1s_square_cent(1,i_1s) + R_1s(2) = List_env1s_square_cent(2,i_1s) + R_1s(3) = List_env1s_square_cent(3,i_1s) + + dx = r(1) - R_1s(1) + dy = r(2) - R_1s(2) + dz = r(3) - R_1s(3) + + beta = mu_sq + e_1s + tmp_arg = mu_sq * e_1s * (dx*dx + dy*dy + dz*dz) / beta + coef = c_1s * dexp(-tmp_arg) + B_center(1) = (rmu_sq(1) + e_1s * R_1s(1)) / beta + B_center(2) = (rmu_sq(2) + e_1s * R_1s(2)) / beta + B_center(3) = (rmu_sq(3) + e_1s * R_1s(3)) / beta + + tmp_Du2 += coef * overlap_gauss_r12_ao(B_center, beta, j, i) + enddo + + Ir2_Mu_gauss_Du2(j,i,ipoint) = tmp_Du2 + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 2, ao_num + do j = 1, i-1 + + Ir2_Mu_gauss_Du2(j,i,ipoint) = Ir2_Mu_gauss_Du2(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for Ir2_Mu_gauss_Du2 (min) = ', (wall1 - wall0) / 60.d0 + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [double precision, Ir2_Mu_short_Du2_0, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_short_Du2_x, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_short_Du2_y, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_short_Du2_z, (ao_num, ao_num, n_points_final_grid)] +&BEGIN_PROVIDER [double precision, Ir2_Mu_short_Du2_2, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! Ir2_Mu_short_Du2_0 = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 [(1 - erf(mu r_12)]^2 + ! + ! Ir2_Mu_short_Du2_x = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 [(1 - erf(mu r_12)]^2 * x2 + ! Ir2_Mu_short_Du2_y = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 [(1 - erf(mu r_12)]^2 * y2 + ! Ir2_Mu_short_Du2_z = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 [(1 - erf(mu r_12)]^2 * z2 + ! + ! Ir2_Mu_short_Du2_2 = int dr2 phi_i(r2) phi_j(r2) [fc_env(r2)]^2 [(1 - erf(mu r_12)]^2 * r2^2 + ! + END_DOC + + implicit none + + integer :: i, j, ipoint, i_1s, i_fit + double precision :: r(3), ints(7) + double precision :: coef, beta, B_center(3) + double precision :: tmp_Du2_0, tmp_Du2_x, tmp_Du2_y, tmp_Du2_z, tmp_Du2_2 + double precision :: tmp_arg, dx, dy, dz + double precision :: expo_fit, coef_fit, e_1s, c_1s, R_1s(3) + double precision :: wall0, wall1 + + PROVIDE final_grid_points + PROVIDE List_env1s_square_size List_env1s_square_expo List_env1s_square_coef List_env1s_square_cent + PROVIDE ng_fit_jast expo_gauss_1_erf_x_2 coef_gauss_1_erf_x_2 + + print *, ' providing Ir2_Mu_short_Du2 ...' + call wall_time(wall0) + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, dx, dy, dz, & + !$OMP expo_fit, coef_fit, e_1s, c_1s, R_1s, & + !$OMP tmp_arg, coef, beta, B_center, ints, & + !$OMP tmp_Du2_0, tmp_Du2_x, tmp_Du2_y, tmp_Du2_z, tmp_Du2_2) & + !$OMP SHARED (n_points_final_grid, ao_num, final_grid_points, & + !$OMP ng_fit_jast, expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & + !$OMP List_env1s_square_size, List_env1s_square_expo, & + !$OMP List_env1s_square_coef, List_env1s_square_cent, & + !$OMP Ir2_Mu_short_Du2_0, Ir2_Mu_short_Du2_x, & + !$OMP Ir2_Mu_short_Du2_y, Ir2_Mu_short_Du2_z, & + !$OMP Ir2_Mu_short_Du2_2) + !$OMP DO + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i = 1, ao_num + do j = i, ao_num + + tmp_Du2_0 = 0.d0 + tmp_Du2_x = 0.d0 + tmp_Du2_y = 0.d0 + tmp_Du2_z = 0.d0 + tmp_Du2_2 = 0.d0 + do i_fit = 1, ng_fit_jast + + expo_fit = expo_gauss_1_erf_x_2(i_fit) + coef_fit = coef_gauss_1_erf_x_2(i_fit) + + call overlap_gauss_r12_ao_012(r, expo_fit, i, j, ints) + + tmp_Du2_0 += coef_fit * ints(1) + tmp_Du2_x += coef_fit * ints(2) + tmp_Du2_y += coef_fit * ints(3) + tmp_Du2_z += coef_fit * ints(4) + tmp_Du2_2 += coef_fit * (ints(5) + ints(6) + ints(7)) + + do i_1s = 2, List_env1s_square_size + + e_1s = List_env1s_square_expo(i_1s) + c_1s = List_env1s_square_coef(i_1s) + R_1s(1) = List_env1s_square_cent(1,i_1s) + R_1s(2) = List_env1s_square_cent(2,i_1s) + R_1s(3) = List_env1s_square_cent(3,i_1s) + + dx = r(1) - R_1s(1) + dy = r(2) - R_1s(2) + dz = r(3) - R_1s(3) + + beta = expo_fit + e_1s + tmp_arg = expo_fit * e_1s * (dx*dx + dy*dy + dz*dz) / beta + coef = coef_fit * c_1s * dexp(-tmp_arg) + B_center(1) = (expo_fit * r(1) + e_1s * R_1s(1)) / beta + B_center(2) = (expo_fit * r(2) + e_1s * R_1s(2)) / beta + B_center(3) = (expo_fit * r(3) + e_1s * R_1s(3)) / beta + + call overlap_gauss_r12_ao_012(B_center, beta, i, j, ints) + + tmp_Du2_0 += coef * ints(1) + tmp_Du2_x += coef * ints(2) + tmp_Du2_y += coef * ints(3) + tmp_Du2_z += coef * ints(4) + tmp_Du2_2 += coef * (ints(5) + ints(6) + ints(7)) + enddo ! i_1s + enddo ! i_fit + + Ir2_Mu_short_Du2_0(j,i,ipoint) = tmp_Du2_0 + Ir2_Mu_short_Du2_x(j,i,ipoint) = tmp_Du2_x + Ir2_Mu_short_Du2_y(j,i,ipoint) = tmp_Du2_y + Ir2_Mu_short_Du2_z(j,i,ipoint) = tmp_Du2_z + Ir2_Mu_short_Du2_2(j,i,ipoint) = tmp_Du2_2 + enddo ! j + enddo ! i + enddo ! ipoint + !$OMP END DO + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 2, ao_num + do j = 1, i-1 + Ir2_Mu_short_Du2_0(j,i,ipoint) = Ir2_Mu_short_Du2_0(i,j,ipoint) + Ir2_Mu_short_Du2_x(j,i,ipoint) = Ir2_Mu_short_Du2_x(i,j,ipoint) + Ir2_Mu_short_Du2_y(j,i,ipoint) = Ir2_Mu_short_Du2_y(i,j,ipoint) + Ir2_Mu_short_Du2_z(j,i,ipoint) = Ir2_Mu_short_Du2_z(i,j,ipoint) + Ir2_Mu_short_Du2_2(j,i,ipoint) = Ir2_Mu_short_Du2_2(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for Ir2_Mu_short_Du2 (min) = ', (wall1 - wall0) / 60.d0 + +END_PROVIDER + +! --- + diff --git a/plugins/local/ao_many_one_e_ints/listj1b.irp.f b/plugins/local/ao_many_one_e_ints/listj1b.irp.f deleted file mode 100644 index 33ca8085..00000000 --- a/plugins/local/ao_many_one_e_ints/listj1b.irp.f +++ /dev/null @@ -1,366 +0,0 @@ - -! --- - -BEGIN_PROVIDER [integer, List_all_comb_b2_size] - - implicit none - - PROVIDE j1b_type - - if((j1b_type .eq. 3) .or. (j1b_type .eq. 103)) then - - List_all_comb_b2_size = 2**nucl_num - - elseif((j1b_type .eq. 4) .or. (j1b_type .eq. 104)) then - - List_all_comb_b2_size = nucl_num + 1 - - else - - print *, 'j1b_type = ', j1b_type, 'is not implemented' - stop - - endif - - print *, ' nb of linear terms in the envelope is ', List_all_comb_b2_size - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [integer, List_all_comb_b2, (nucl_num, List_all_comb_b2_size)] - - implicit none - integer :: i, j - - if(nucl_num .gt. 32) then - print *, ' nucl_num = ', nucl_num, '> 32' - stop - endif - - List_all_comb_b2 = 0 - - do i = 0, List_all_comb_b2_size-1 - do j = 0, nucl_num-1 - if (btest(i,j)) then - List_all_comb_b2(j+1,i+1) = 1 - endif - enddo - enddo - -END_PROVIDER - -! --- - - BEGIN_PROVIDER [ double precision, List_all_comb_b2_coef, ( List_all_comb_b2_size)] -&BEGIN_PROVIDER [ double precision, List_all_comb_b2_expo, ( List_all_comb_b2_size)] -&BEGIN_PROVIDER [ double precision, List_all_comb_b2_cent, (3, List_all_comb_b2_size)] - - implicit none - integer :: i, j, k, phase - double precision :: tmp_alphaj, tmp_alphak - double precision :: tmp_cent_x, tmp_cent_y, tmp_cent_z - - provide j1b_pen - provide j1b_pen_coef - - List_all_comb_b2_coef = 0.d0 - List_all_comb_b2_expo = 0.d0 - List_all_comb_b2_cent = 0.d0 - - if((j1b_type .eq. 3) .or. (j1b_type .eq. 103)) then - - do i = 1, List_all_comb_b2_size - - tmp_cent_x = 0.d0 - tmp_cent_y = 0.d0 - tmp_cent_z = 0.d0 - do j = 1, nucl_num - tmp_alphaj = dble(List_all_comb_b2(j,i)) * j1b_pen(j) - List_all_comb_b2_expo(i) += tmp_alphaj - tmp_cent_x += tmp_alphaj * nucl_coord(j,1) - tmp_cent_y += tmp_alphaj * nucl_coord(j,2) - tmp_cent_z += tmp_alphaj * nucl_coord(j,3) - enddo - - if(List_all_comb_b2_expo(i) .lt. 1d-10) cycle - - List_all_comb_b2_cent(1,i) = tmp_cent_x / List_all_comb_b2_expo(i) - List_all_comb_b2_cent(2,i) = tmp_cent_y / List_all_comb_b2_expo(i) - List_all_comb_b2_cent(3,i) = tmp_cent_z / List_all_comb_b2_expo(i) - enddo - - ! --- - - do i = 1, List_all_comb_b2_size - - do j = 2, nucl_num, 1 - tmp_alphaj = dble(List_all_comb_b2(j,i)) * j1b_pen(j) - do k = 1, j-1, 1 - tmp_alphak = dble(List_all_comb_b2(k,i)) * j1b_pen(k) - - List_all_comb_b2_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & - + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & - + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) - enddo - enddo - - if(List_all_comb_b2_expo(i) .lt. 1d-10) cycle - - List_all_comb_b2_coef(i) = List_all_comb_b2_coef(i) / List_all_comb_b2_expo(i) - enddo - - ! --- - - do i = 1, List_all_comb_b2_size - - phase = 0 - do j = 1, nucl_num - phase += List_all_comb_b2(j,i) - enddo - - List_all_comb_b2_coef(i) = (-1.d0)**dble(phase) * dexp(-List_all_comb_b2_coef(i)) - enddo - - elseif((j1b_type .eq. 4) .or. (j1b_type .eq. 104)) then - - List_all_comb_b2_coef( 1) = 1.d0 - List_all_comb_b2_expo( 1) = 0.d0 - List_all_comb_b2_cent(1:3,1) = 0.d0 - do i = 1, nucl_num - List_all_comb_b2_coef( i+1) = -1.d0 * j1b_pen_coef(i) - List_all_comb_b2_expo( i+1) = j1b_pen(i) - List_all_comb_b2_cent(1,i+1) = nucl_coord(i,1) - List_all_comb_b2_cent(2,i+1) = nucl_coord(i,2) - List_all_comb_b2_cent(3,i+1) = nucl_coord(i,3) - enddo - - else - - print *, 'j1b_type = ', j1b_type, 'is not implemented' - stop - - endif - - !print *, ' coeff, expo & cent of list b2' - !do i = 1, List_all_comb_b2_size - ! print*, i, List_all_comb_b2_coef(i), List_all_comb_b2_expo(i) - ! print*, List_all_comb_b2_cent(1,i), List_all_comb_b2_cent(2,i), List_all_comb_b2_cent(3,i) - !enddo - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ integer, List_all_comb_b3_size] - - implicit none - double precision :: tmp - - if((j1b_type .eq. 3) .or. (j1b_type .eq. 103)) then - - List_all_comb_b3_size = 3**nucl_num - - elseif((j1b_type .eq. 4) .or. (j1b_type .eq. 104)) then - - tmp = 0.5d0 * dble(nucl_num) * (dble(nucl_num) + 3.d0) - List_all_comb_b3_size = int(tmp) + 1 - - else - - print *, 'j1b_type = ', j1b_type, 'is not implemented' - stop - - endif - - print *, ' nb of linear terms in the square of the envelope is ', List_all_comb_b3_size - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [integer, List_all_comb_b3, (nucl_num, List_all_comb_b3_size)] - - implicit none - integer :: i, j, ii, jj - integer, allocatable :: M(:,:), p(:) - - if(nucl_num .gt. 32) then - print *, ' nucl_num = ', nucl_num, '> 32' - stop - endif - - List_all_comb_b3(:,:) = 0 - List_all_comb_b3(:,List_all_comb_b3_size) = 2 - - allocate(p(nucl_num)) - p = 0 - - do i = 2, List_all_comb_b3_size-1 - do j = 1, nucl_num - - ii = 0 - do jj = 1, j-1, 1 - ii = ii + p(jj) * 3**(jj-1) - enddo - p(j) = modulo(i-1-ii, 3**j) / 3**(j-1) - - List_all_comb_b3(j,i) = p(j) - enddo - enddo - -END_PROVIDER - -! --- - - BEGIN_PROVIDER [ double precision, List_all_comb_b3_coef, ( List_all_comb_b3_size)] -&BEGIN_PROVIDER [ double precision, List_all_comb_b3_expo, ( List_all_comb_b3_size)] -&BEGIN_PROVIDER [ double precision, List_all_comb_b3_cent, (3, List_all_comb_b3_size)] - - implicit none - integer :: i, j, k, phase - integer :: ii - double precision :: tmp_alphaj, tmp_alphak, facto - double precision :: tmp1, tmp2, tmp3, tmp4 - double precision :: xi, yi, zi, xj, yj, zj - double precision :: dx, dy, dz, r2 - - provide j1b_pen - provide j1b_pen_coef - - List_all_comb_b3_coef = 0.d0 - List_all_comb_b3_expo = 0.d0 - List_all_comb_b3_cent = 0.d0 - - if((j1b_type .eq. 3) .or. (j1b_type .eq. 103)) then - - do i = 1, List_all_comb_b3_size - - do j = 1, nucl_num - tmp_alphaj = dble(List_all_comb_b3(j,i)) * j1b_pen(j) - List_all_comb_b3_expo(i) += tmp_alphaj - List_all_comb_b3_cent(1,i) += tmp_alphaj * nucl_coord(j,1) - List_all_comb_b3_cent(2,i) += tmp_alphaj * nucl_coord(j,2) - List_all_comb_b3_cent(3,i) += tmp_alphaj * nucl_coord(j,3) - - enddo - - if(List_all_comb_b3_expo(i) .lt. 1d-10) cycle - ASSERT(List_all_comb_b3_expo(i) .gt. 0d0) - - List_all_comb_b3_cent(1,i) = List_all_comb_b3_cent(1,i) / List_all_comb_b3_expo(i) - List_all_comb_b3_cent(2,i) = List_all_comb_b3_cent(2,i) / List_all_comb_b3_expo(i) - List_all_comb_b3_cent(3,i) = List_all_comb_b3_cent(3,i) / List_all_comb_b3_expo(i) - enddo - - ! --- - - do i = 1, List_all_comb_b3_size - - do j = 2, nucl_num, 1 - tmp_alphaj = dble(List_all_comb_b3(j,i)) * j1b_pen(j) - do k = 1, j-1, 1 - tmp_alphak = dble(List_all_comb_b3(k,i)) * j1b_pen(k) - - List_all_comb_b3_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & - + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & - + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) - enddo - enddo - - if(List_all_comb_b3_expo(i) .lt. 1d-10) cycle - - List_all_comb_b3_coef(i) = List_all_comb_b3_coef(i) / List_all_comb_b3_expo(i) - enddo - - ! --- - - do i = 1, List_all_comb_b3_size - - facto = 1.d0 - phase = 0 - do j = 1, nucl_num - tmp_alphaj = dble(List_all_comb_b3(j,i)) - - facto *= 2.d0 / (gamma(tmp_alphaj+1.d0) * gamma(3.d0-tmp_alphaj)) - phase += List_all_comb_b3(j,i) - enddo - - List_all_comb_b3_coef(i) = (-1.d0)**dble(phase) * facto * dexp(-List_all_comb_b3_coef(i)) - enddo - - elseif((j1b_type .eq. 4) .or. (j1b_type .eq. 104)) then - - ii = 1 - List_all_comb_b3_coef( ii) = 1.d0 - List_all_comb_b3_expo( ii) = 0.d0 - List_all_comb_b3_cent(1:3,ii) = 0.d0 - - do i = 1, nucl_num - ii = ii + 1 - List_all_comb_b3_coef( ii) = -2.d0 * j1b_pen_coef(i) - List_all_comb_b3_expo( ii) = j1b_pen(i) - List_all_comb_b3_cent(1,ii) = nucl_coord(i,1) - List_all_comb_b3_cent(2,ii) = nucl_coord(i,2) - List_all_comb_b3_cent(3,ii) = nucl_coord(i,3) - enddo - - do i = 1, nucl_num - ii = ii + 1 - List_all_comb_b3_coef( ii) = 1.d0 * j1b_pen_coef(i) * j1b_pen_coef(i) - List_all_comb_b3_expo( ii) = 2.d0 * j1b_pen(i) - List_all_comb_b3_cent(1,ii) = nucl_coord(i,1) - List_all_comb_b3_cent(2,ii) = nucl_coord(i,2) - List_all_comb_b3_cent(3,ii) = nucl_coord(i,3) - enddo - - do i = 1, nucl_num-1 - - tmp1 = j1b_pen(i) - - xi = nucl_coord(i,1) - yi = nucl_coord(i,2) - zi = nucl_coord(i,3) - - do j = i+1, nucl_num - - tmp2 = j1b_pen(j) - tmp3 = tmp1 + tmp2 - tmp4 = 1.d0 / tmp3 - - xj = nucl_coord(j,1) - yj = nucl_coord(j,2) - zj = nucl_coord(j,3) - - dx = xi - xj - dy = yi - yj - dz = zi - zj - r2 = dx*dx + dy*dy + dz*dz - - ii = ii + 1 - ! x 2 to avoid doing integrals twice - List_all_comb_b3_coef( ii) = 2.d0 * dexp(-tmp1*tmp2*tmp4*r2) * j1b_pen_coef(i) * j1b_pen_coef(j) - List_all_comb_b3_expo( ii) = tmp3 - List_all_comb_b3_cent(1,ii) = tmp4 * (tmp1 * xi + tmp2 * xj) - List_all_comb_b3_cent(2,ii) = tmp4 * (tmp1 * yi + tmp2 * yj) - List_all_comb_b3_cent(3,ii) = tmp4 * (tmp1 * zi + tmp2 * zj) - enddo - enddo - - else - - print *, 'j1b_type = ', j1b_type, 'is not implemented' - stop - - endif - - !print *, ' coeff, expo & cent of list b3' - !do i = 1, List_all_comb_b3_size - ! print*, i, List_all_comb_b3_coef(i), List_all_comb_b3_expo(i) - ! print*, List_all_comb_b3_cent(1,i), List_all_comb_b3_cent(2,i), List_all_comb_b3_cent(3,i) - !enddo - -END_PROVIDER - -! --- - diff --git a/plugins/local/ao_many_one_e_ints/listj1b_sorted.irp.f b/plugins/local/ao_many_one_e_ints/listj1b_sorted.irp.f index 9bcce449..ad57739b 100644 --- a/plugins/local/ao_many_one_e_ints/listj1b_sorted.irp.f +++ b/plugins/local/ao_many_one_e_ints/listj1b_sorted.irp.f @@ -1,181 +1,197 @@ - BEGIN_PROVIDER [ integer, List_comb_thr_b2_size, (ao_num, ao_num)] -&BEGIN_PROVIDER [ integer, max_List_comb_thr_b2_size] - implicit none - integer :: i_1s,i,j,ipoint - double precision :: coef,beta,center(3),int_j1b - double precision :: r(3),weight,dist - List_comb_thr_b2_size = 0 - print*,'List_all_comb_b2_size = ',List_all_comb_b2_size -! pause - do i = 1, ao_num - do j = i, ao_num - do i_1s = 1, List_all_comb_b2_size - coef = List_all_comb_b2_coef (i_1s) - if(dabs(coef).lt.thrsh_cycle_tc)cycle - beta = List_all_comb_b2_expo (i_1s) - beta = max(beta,1.d-12) - center(1:3) = List_all_comb_b2_cent(1:3,i_1s) - int_j1b = 0.d0 - do ipoint = 1, n_points_extra_final_grid - r(1:3) = final_grid_points_extra(1:3,ipoint) - weight = final_weight_at_r_vector_extra(ipoint) - dist = ( center(1) - r(1) )*( center(1) - r(1) ) - dist += ( center(2) - r(2) )*( center(2) - r(2) ) - dist += ( center(3) - r(3) )*( center(3) - r(3) ) - int_j1b += dabs(aos_in_r_array_extra_transp(ipoint,i) * aos_in_r_array_extra_transp(ipoint,j))*dexp(-beta*dist) * weight - enddo - if(dabs(coef)*dabs(int_j1b).gt.thrsh_cycle_tc)then - List_comb_thr_b2_size(j,i) += 1 - endif - enddo - enddo - enddo - do i = 1, ao_num - do j = 1, i-1 - List_comb_thr_b2_size(j,i) = List_comb_thr_b2_size(i,j) +! --- + + BEGIN_PROVIDER [integer, List_comb_thr_b2_size, (ao_num, ao_num)] +&BEGIN_PROVIDER [integer, max_List_comb_thr_b2_size] + + implicit none + integer :: i_1s, i, j, ipoint + integer :: list(ao_num) + double precision :: coef,beta,center(3),int_env + double precision :: r(3),weight,dist + + List_comb_thr_b2_size = 0 + print*,'List_env1s_size = ',List_env1s_size + + do i = 1, ao_num + do j = i, ao_num + do i_1s = 1, List_env1s_size + coef = List_env1s_coef(i_1s) + if(dabs(coef).lt.thrsh_cycle_tc) cycle + beta = List_env1s_expo(i_1s) + beta = max(beta,1.d-12) + center(1:3) = List_env1s_cent(1:3,i_1s) + int_env = 0.d0 + do ipoint = 1, n_points_extra_final_grid + r(1:3) = final_grid_points_extra(1:3,ipoint) + weight = final_weight_at_r_vector_extra(ipoint) + dist = ( center(1) - r(1) )*( center(1) - r(1) ) + dist += ( center(2) - r(2) )*( center(2) - r(2) ) + dist += ( center(3) - r(3) )*( center(3) - r(3) ) + int_env += dabs(aos_in_r_array_extra_transp(ipoint,i) * aos_in_r_array_extra_transp(ipoint,j))*dexp(-beta*dist) * weight + enddo + if(dabs(coef)*dabs(int_env).gt.thrsh_cycle_tc)then + List_comb_thr_b2_size(j,i) += 1 + endif + enddo + enddo enddo - enddo - integer :: list(ao_num) - do i = 1, ao_num - list(i) = maxval(List_comb_thr_b2_size(:,i)) - enddo - max_List_comb_thr_b2_size = maxval(list) - print*,'max_List_comb_thr_b2_size = ',max_List_comb_thr_b2_size - -END_PROVIDER - BEGIN_PROVIDER [ double precision, List_comb_thr_b2_coef, ( max_List_comb_thr_b2_size,ao_num, ao_num )] -&BEGIN_PROVIDER [ double precision, List_comb_thr_b2_expo, ( max_List_comb_thr_b2_size,ao_num, ao_num )] -&BEGIN_PROVIDER [ double precision, List_comb_thr_b2_cent, (3, max_List_comb_thr_b2_size,ao_num, ao_num )] -&BEGIN_PROVIDER [ double precision, ao_abs_comb_b2_j1b, ( max_List_comb_thr_b2_size ,ao_num, ao_num)] - implicit none - integer :: i_1s,i,j,ipoint,icount - double precision :: coef,beta,center(3),int_j1b - double precision :: r(3),weight,dist - ao_abs_comb_b2_j1b = 10000000.d0 - do i = 1, ao_num - do j = i, ao_num - icount = 0 - do i_1s = 1, List_all_comb_b2_size - coef = List_all_comb_b2_coef (i_1s) - if(dabs(coef).lt.thrsh_cycle_tc)cycle - beta = List_all_comb_b2_expo (i_1s) - center(1:3) = List_all_comb_b2_cent(1:3,i_1s) - int_j1b = 0.d0 - do ipoint = 1, n_points_extra_final_grid - r(1:3) = final_grid_points_extra(1:3,ipoint) - weight = final_weight_at_r_vector_extra(ipoint) - dist = ( center(1) - r(1) )*( center(1) - r(1) ) - dist += ( center(2) - r(2) )*( center(2) - r(2) ) - dist += ( center(3) - r(3) )*( center(3) - r(3) ) - int_j1b += dabs(aos_in_r_array_extra_transp(ipoint,i) * aos_in_r_array_extra_transp(ipoint,j))*dexp(-beta*dist) * weight - enddo - if(dabs(coef)*dabs(int_j1b).gt.thrsh_cycle_tc)then - icount += 1 - List_comb_thr_b2_coef(icount,j,i) = coef - List_comb_thr_b2_expo(icount,j,i) = beta - List_comb_thr_b2_cent(1:3,icount,j,i) = center(1:3) - ao_abs_comb_b2_j1b(icount,j,i) = int_j1b - endif - enddo - enddo - enddo - - do i = 1, ao_num - do j = 1, i-1 - do icount = 1, List_comb_thr_b2_size(j,i) - List_comb_thr_b2_coef(icount,j,i) = List_comb_thr_b2_coef(icount,i,j) - List_comb_thr_b2_expo(icount,j,i) = List_comb_thr_b2_expo(icount,i,j) - List_comb_thr_b2_cent(1:3,icount,j,i) = List_comb_thr_b2_cent(1:3,icount,i,j) + do i = 1, ao_num + do j = 1, i-1 + List_comb_thr_b2_size(j,i) = List_comb_thr_b2_size(i,j) enddo enddo - enddo + do i = 1, ao_num + list(i) = maxval(List_comb_thr_b2_size(:,i)) + enddo + + max_List_comb_thr_b2_size = maxval(list) + print*, ' max_List_comb_thr_b2_size = ',max_List_comb_thr_b2_size END_PROVIDER +! --- - BEGIN_PROVIDER [ integer, List_comb_thr_b3_size, (ao_num, ao_num)] -&BEGIN_PROVIDER [ integer, max_List_comb_thr_b3_size] - implicit none - integer :: i_1s,i,j,ipoint - double precision :: coef,beta,center(3),int_j1b - double precision :: r(3),weight,dist - List_comb_thr_b3_size = 0 - print*,'List_all_comb_b3_size = ',List_all_comb_b3_size - do i = 1, ao_num - do j = 1, ao_num - do i_1s = 1, List_all_comb_b3_size - coef = List_all_comb_b3_coef (i_1s) - beta = List_all_comb_b3_expo (i_1s) - center(1:3) = List_all_comb_b3_cent(1:3,i_1s) - if(dabs(coef).lt.thrsh_cycle_tc)cycle - int_j1b = 0.d0 - do ipoint = 1, n_points_extra_final_grid - r(1:3) = final_grid_points_extra(1:3,ipoint) - weight = final_weight_at_r_vector_extra(ipoint) - dist = ( center(1) - r(1) )*( center(1) - r(1) ) - dist += ( center(2) - r(2) )*( center(2) - r(2) ) - dist += ( center(3) - r(3) )*( center(3) - r(3) ) - int_j1b += dabs(aos_in_r_array_extra_transp(ipoint,i) * aos_in_r_array_extra_transp(ipoint,j))*dexp(-beta*dist) * weight - enddo - if(dabs(coef)*dabs(int_j1b).gt.thrsh_cycle_tc)then - List_comb_thr_b3_size(j,i) += 1 - endif - enddo - enddo - enddo -! do i = 1, ao_num -! do j = 1, i-1 -! List_comb_thr_b3_size(j,i) = List_comb_thr_b3_size(i,j) -! enddo -! enddo + BEGIN_PROVIDER [ double precision, List_comb_thr_b2_coef, ( max_List_comb_thr_b2_size,ao_num,ao_num)] +&BEGIN_PROVIDER [ double precision, List_comb_thr_b2_expo, ( max_List_comb_thr_b2_size,ao_num,ao_num)] +&BEGIN_PROVIDER [ double precision, List_comb_thr_b2_cent, (3,max_List_comb_thr_b2_size,ao_num,ao_num)] +&BEGIN_PROVIDER [ double precision, ao_abs_comb_b2_env , ( max_List_comb_thr_b2_size,ao_num,ao_num)] + + implicit none + integer :: i_1s,i,j,ipoint,icount + double precision :: coef,beta,center(3),int_env + double precision :: r(3),weight,dist + + ao_abs_comb_b2_env = 10000000.d0 + do i = 1, ao_num + do j = i, ao_num + icount = 0 + do i_1s = 1, List_env1s_size + coef = List_env1s_coef (i_1s) + if(dabs(coef).lt.thrsh_cycle_tc)cycle + beta = List_env1s_expo (i_1s) + center(1:3) = List_env1s_cent(1:3,i_1s) + int_env = 0.d0 + do ipoint = 1, n_points_extra_final_grid + r(1:3) = final_grid_points_extra(1:3,ipoint) + weight = final_weight_at_r_vector_extra(ipoint) + dist = ( center(1) - r(1) )*( center(1) - r(1) ) + dist += ( center(2) - r(2) )*( center(2) - r(2) ) + dist += ( center(3) - r(3) )*( center(3) - r(3) ) + int_env += dabs(aos_in_r_array_extra_transp(ipoint,i) * aos_in_r_array_extra_transp(ipoint,j))*dexp(-beta*dist) * weight + enddo + if(dabs(coef)*dabs(int_env).gt.thrsh_cycle_tc)then + icount += 1 + List_comb_thr_b2_coef(icount,j,i) = coef + List_comb_thr_b2_expo(icount,j,i) = beta + List_comb_thr_b2_cent(1:3,icount,j,i) = center(1:3) + ao_abs_comb_b2_env(icount,j,i) = int_env + endif + enddo + enddo + enddo + + do i = 1, ao_num + do j = 1, i-1 + do icount = 1, List_comb_thr_b2_size(j,i) + List_comb_thr_b2_coef(icount,j,i) = List_comb_thr_b2_coef(icount,i,j) + List_comb_thr_b2_expo(icount,j,i) = List_comb_thr_b2_expo(icount,i,j) + List_comb_thr_b2_cent(1:3,icount,j,i) = List_comb_thr_b2_cent(1:3,icount,i,j) + enddo + enddo + enddo + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [integer, List_comb_thr_b3_size, (ao_num,ao_num)] +&BEGIN_PROVIDER [integer, max_List_comb_thr_b3_size] + + implicit none + integer :: i_1s,i,j,ipoint integer :: list(ao_num) - do i = 1, ao_num - list(i) = maxval(List_comb_thr_b3_size(:,i)) - enddo - max_List_comb_thr_b3_size = maxval(list) - print*,'max_List_comb_thr_b3_size = ',max_List_comb_thr_b3_size + double precision :: coef,beta,center(3),int_env + double precision :: r(3),weight,dist + + List_comb_thr_b3_size = 0 + print*,'List_env1s_square_size = ',List_env1s_square_size + do i = 1, ao_num + do j = 1, ao_num + do i_1s = 1, List_env1s_square_size + coef = List_env1s_square_coef (i_1s) + beta = List_env1s_square_expo (i_1s) + center(1:3) = List_env1s_square_cent(1:3,i_1s) + if(dabs(coef).lt.thrsh_cycle_tc)cycle + int_env = 0.d0 + do ipoint = 1, n_points_extra_final_grid + r(1:3) = final_grid_points_extra(1:3,ipoint) + weight = final_weight_at_r_vector_extra(ipoint) + dist = ( center(1) - r(1) )*( center(1) - r(1) ) + dist += ( center(2) - r(2) )*( center(2) - r(2) ) + dist += ( center(3) - r(3) )*( center(3) - r(3) ) + int_env += dabs(aos_in_r_array_extra_transp(ipoint,i) * aos_in_r_array_extra_transp(ipoint,j))*dexp(-beta*dist) * weight + enddo + if(dabs(coef)*dabs(int_env).gt.thrsh_cycle_tc) then + List_comb_thr_b3_size(j,i) += 1 + endif + enddo + enddo + enddo + + do i = 1, ao_num + list(i) = maxval(List_comb_thr_b3_size(:,i)) + enddo + + max_List_comb_thr_b3_size = maxval(list) + print*, ' max_List_comb_thr_b3_size = ',max_List_comb_thr_b3_size END_PROVIDER - BEGIN_PROVIDER [ double precision, List_comb_thr_b3_coef, ( max_List_comb_thr_b3_size,ao_num, ao_num )] -&BEGIN_PROVIDER [ double precision, List_comb_thr_b3_expo, ( max_List_comb_thr_b3_size,ao_num, ao_num )] -&BEGIN_PROVIDER [ double precision, List_comb_thr_b3_cent, (3, max_List_comb_thr_b3_size,ao_num, ao_num )] -&BEGIN_PROVIDER [ double precision, ao_abs_comb_b3_j1b, ( max_List_comb_thr_b3_size ,ao_num, ao_num)] - implicit none - integer :: i_1s,i,j,ipoint,icount - double precision :: coef,beta,center(3),int_j1b - double precision :: r(3),weight,dist - ao_abs_comb_b3_j1b = 10000000.d0 - do i = 1, ao_num - do j = 1, ao_num - icount = 0 - do i_1s = 1, List_all_comb_b3_size - coef = List_all_comb_b3_coef (i_1s) - beta = List_all_comb_b3_expo (i_1s) - beta = max(beta,1.d-12) - center(1:3) = List_all_comb_b3_cent(1:3,i_1s) - if(dabs(coef).lt.thrsh_cycle_tc)cycle - int_j1b = 0.d0 - do ipoint = 1, n_points_extra_final_grid - r(1:3) = final_grid_points_extra(1:3,ipoint) - weight = final_weight_at_r_vector_extra(ipoint) - dist = ( center(1) - r(1) )*( center(1) - r(1) ) - dist += ( center(2) - r(2) )*( center(2) - r(2) ) - dist += ( center(3) - r(3) )*( center(3) - r(3) ) - int_j1b += dabs(aos_in_r_array_extra_transp(ipoint,i) * aos_in_r_array_extra_transp(ipoint,j))*dexp(-beta*dist) * weight - enddo - if(dabs(coef)*dabs(int_j1b).gt.thrsh_cycle_tc)then - icount += 1 - List_comb_thr_b3_coef(icount,j,i) = coef - List_comb_thr_b3_expo(icount,j,i) = beta - List_comb_thr_b3_cent(1:3,icount,j,i) = center(1:3) - ao_abs_comb_b3_j1b(icount,j,i) = int_j1b - endif - enddo - enddo - enddo +! --- + + BEGIN_PROVIDER [double precision, List_comb_thr_b3_coef, ( max_List_comb_thr_b3_size,ao_num,ao_num)] +&BEGIN_PROVIDER [double precision, List_comb_thr_b3_expo, ( max_List_comb_thr_b3_size,ao_num,ao_num)] +&BEGIN_PROVIDER [double precision, List_comb_thr_b3_cent, (3, max_List_comb_thr_b3_size,ao_num,ao_num)] +&BEGIN_PROVIDER [double precision, ao_abs_comb_b3_env , ( max_List_comb_thr_b3_size,ao_num,ao_num)] + + implicit none + integer :: i_1s,i,j,ipoint,icount + double precision :: coef,beta,center(3),int_env + double precision :: r(3),weight,dist + + ao_abs_comb_b3_env = 10000000.d0 + do i = 1, ao_num + do j = 1, ao_num + icount = 0 + do i_1s = 1, List_env1s_square_size + coef = List_env1s_square_coef (i_1s) + beta = List_env1s_square_expo (i_1s) + beta = max(beta,1.d-12) + center(1:3) = List_env1s_square_cent(1:3,i_1s) + if(dabs(coef).lt.thrsh_cycle_tc)cycle + int_env = 0.d0 + do ipoint = 1, n_points_extra_final_grid + r(1:3) = final_grid_points_extra(1:3,ipoint) + weight = final_weight_at_r_vector_extra(ipoint) + dist = ( center(1) - r(1) )*( center(1) - r(1) ) + dist += ( center(2) - r(2) )*( center(2) - r(2) ) + dist += ( center(3) - r(3) )*( center(3) - r(3) ) + int_env += dabs(aos_in_r_array_extra_transp(ipoint,i) * aos_in_r_array_extra_transp(ipoint,j))*dexp(-beta*dist) * weight + enddo + if(dabs(coef)*dabs(int_env).gt.thrsh_cycle_tc)then + icount += 1 + List_comb_thr_b3_coef(icount,j,i) = coef + List_comb_thr_b3_expo(icount,j,i) = beta + List_comb_thr_b3_cent(1:3,icount,j,i) = center(1:3) + ao_abs_comb_b3_env(icount,j,i) = int_env + endif + enddo + enddo + enddo END_PROVIDER +! --- + diff --git a/plugins/local/ao_many_one_e_ints/prim_int_gauss_gauss.irp.f b/plugins/local/ao_many_one_e_ints/prim_int_gauss_gauss.irp.f index 54c2d95b..0eaad715 100644 --- a/plugins/local/ao_many_one_e_ints/prim_int_gauss_gauss.irp.f +++ b/plugins/local/ao_many_one_e_ints/prim_int_gauss_gauss.irp.f @@ -200,7 +200,7 @@ subroutine overlap_gauss_r12_v(D_center, LD_D, delta, A_center, B_center, power_ deallocate(A_new, A_center_new, fact_a_new, iorder_a_new, overlap) -end subroutine overlap_gauss_r12_v +end !--- diff --git a/plugins/local/ao_tc_eff_map/NEED b/plugins/local/ao_tc_eff_map/NEED index f768b75f..b12b0999 100644 --- a/plugins/local/ao_tc_eff_map/NEED +++ b/plugins/local/ao_tc_eff_map/NEED @@ -3,3 +3,5 @@ mo_one_e_ints ao_many_one_e_ints dft_utils_in_r tc_keywords +hamiltonian +jastrow diff --git a/plugins/local/ao_tc_eff_map/compute_ints_eff_pot.irp.f b/plugins/local/ao_tc_eff_map/compute_ints_eff_pot.irp.f index 963a49a6..8097cbc2 100644 --- a/plugins/local/ao_tc_eff_map/compute_ints_eff_pot.irp.f +++ b/plugins/local/ao_tc_eff_map/compute_ints_eff_pot.irp.f @@ -23,10 +23,9 @@ subroutine compute_ao_tc_sym_two_e_pot_jl(j, l, n_integrals, buffer_i, buffer_va logical, external :: ao_two_e_integral_zero double precision :: ao_tc_sym_two_e_pot, ao_two_e_integral_erf - double precision :: j1b_gauss_2e_j1, j1b_gauss_2e_j2 + double precision :: env_gauss_2e_j1, env_gauss_2e_j2 - PROVIDE j1b_type thr = ao_integrals_threshold @@ -53,14 +52,6 @@ subroutine compute_ao_tc_sym_two_e_pot_jl(j, l, n_integrals, buffer_i, buffer_va integral_erf = ao_two_e_integral_erf(i, k, j, l) integral = integral_erf + integral_pot - !if( j1b_type .eq. 1 ) then - ! !print *, ' j1b type 1 is added' - ! integral = integral + j1b_gauss_2e_j1(i, k, j, l) - !elseif( j1b_type .eq. 2 ) then - ! !print *, ' j1b type 2 is added' - ! integral = integral + j1b_gauss_2e_j2(i, k, j, l) - !endif - if(abs(integral) < thr) then cycle endif diff --git a/plugins/local/ao_tc_eff_map/one_e_1bgauss_grad2.irp.f b/plugins/local/ao_tc_eff_map/one_e_1bgauss_grad2.irp.f index 50c396de..bcd2a9a5 100644 --- a/plugins/local/ao_tc_eff_map/one_e_1bgauss_grad2.irp.f +++ b/plugins/local/ao_tc_eff_map/one_e_1bgauss_grad2.irp.f @@ -1,10 +1,10 @@ ! --- -BEGIN_PROVIDER [ double precision, j1b_gauss_hermII, (ao_num,ao_num)] +BEGIN_PROVIDER [double precision, env_gauss_hermII, (ao_num,ao_num)] BEGIN_DOC ! - ! :math:`\langle \chi_A | -0.5 \grad \tau_{1b} \cdot \grad \tau_{1b} | \chi_B \rangle` + ! :math:`\langle \chi_A | -0.5 \grad \tau_{env} \cdot \grad \tau_{env} | \chi_B \rangle` ! END_DOC @@ -22,8 +22,6 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_hermII, (ao_num,ao_num)] double precision :: int_gauss_4G - PROVIDE j1b_type j1b_pen j1b_coeff - ! -------------------------------------------------------------------------------- ! -- Dummy call to provide everything dim1 = 100 @@ -38,10 +36,7 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_hermII, (ao_num,ao_num)] ! -------------------------------------------------------------------------------- - j1b_gauss_hermII(1:ao_num,1:ao_num) = 0.d0 - - if(j1b_type .eq. 1) then - ! \tau_1b = \sum_iA -[1 - exp(-alpha_A r_iA^2)] + env_gauss_hermII(1:ao_num,1:ao_num) = 0.d0 !$OMP PARALLEL & !$OMP DEFAULT (NONE) & @@ -51,113 +46,51 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_hermII, (ao_num,ao_num)] !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & !$OMP ao_power, ao_nucl, nucl_coord, & !$OMP ao_coef_normalized_ordered_transp, & - !$OMP nucl_num, j1b_pen, j1b_gauss_hermII) + !$OMP nucl_num, env_expo, env_gauss_hermII) !$OMP DO SCHEDULE (dynamic) - do j = 1, ao_num - num_A = ao_nucl(j) - power_A(1:3) = ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - - do i = 1, ao_num - num_B = ao_nucl(i) - power_B(1:3) = ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - - do l = 1, ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - - do m = 1, ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - - c = 0.d0 - do k1 = 1, nucl_num - gama1 = j1b_pen(k1) - C_center1(1:3) = nucl_coord(k1,1:3) - - do k2 = 1, nucl_num - gama2 = j1b_pen(k2) - C_center2(1:3) = nucl_coord(k2,1:3) - - ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] r_C1 \cdot r_C2 | XB > - c1 = int_gauss_4G( A_center, B_center, C_center1, C_center2 & - , power_A, power_B, alpha, beta, gama1, gama2 ) - - c = c - 2.d0 * gama1 * gama2 * c1 - enddo + do j = 1, ao_num + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + do i = 1, ao_num + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + + c = 0.d0 + do k1 = 1, nucl_num + gama1 = env_expo(k1) + C_center1(1:3) = nucl_coord(k1,1:3) + + do k2 = 1, nucl_num + gama2 = env_expo(k2) + C_center2(1:3) = nucl_coord(k2,1:3) + + ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] r_C1 \cdot r_C2 | XB > + c1 = int_gauss_4G( A_center, B_center, C_center1, C_center2 & + , power_A, power_B, alpha, beta, gama1, gama2 ) + + c = c - 2.d0 * gama1 * gama2 * c1 enddo - - j1b_gauss_hermII(i,j) = j1b_gauss_hermII(i,j) & - + ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) * c enddo + + env_gauss_hermII(i,j) = env_gauss_hermII(i,j) & + + ao_coef_normalized_ordered_transp(l,j) & + * ao_coef_normalized_ordered_transp(m,i) * c enddo enddo enddo + enddo !$OMP END DO !$OMP END PARALLEL - elseif(j1b_type .eq. 2) then - ! \tau_1b = \sum_iA [c_A exp(-alpha_A r_iA^2)] - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, k1, k2, l, m, alpha, beta, gama1, gama2, & - !$OMP A_center, B_center, C_center1, C_center2, & - !$OMP power_A, power_B, num_A, num_B, c1, c, & - !$OMP coef1, coef2) & - !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & - !$OMP ao_power, ao_nucl, nucl_coord, & - !$OMP ao_coef_normalized_ordered_transp, & - !$OMP nucl_num, j1b_pen, j1b_gauss_hermII, & - !$OMP j1b_coeff) - !$OMP DO SCHEDULE (dynamic) - do j = 1, ao_num - num_A = ao_nucl(j) - power_A(1:3) = ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - - do i = 1, ao_num - num_B = ao_nucl(i) - power_B(1:3) = ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - - do l = 1, ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - - do m = 1, ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - - c = 0.d0 - do k1 = 1, nucl_num - gama1 = j1b_pen (k1) - coef1 = j1b_coeff(k1) - C_center1(1:3) = nucl_coord(k1,1:3) - - do k2 = 1, nucl_num - gama2 = j1b_pen (k2) - coef2 = j1b_coeff(k2) - C_center2(1:3) = nucl_coord(k2,1:3) - - ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] r_C1 \cdot r_C2 | XB > - c1 = int_gauss_4G( A_center, B_center, C_center1, C_center2 & - , power_A, power_B, alpha, beta, gama1, gama2 ) - - c = c - 2.d0 * gama1 * gama2 * coef1 * coef2 * c1 - enddo - enddo - - j1b_gauss_hermII(i,j) = j1b_gauss_hermII(i,j) & - + ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) * c - enddo - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - endif - END_PROVIDER diff --git a/plugins/local/ao_tc_eff_map/one_e_1bgauss_lap.irp.f b/plugins/local/ao_tc_eff_map/one_e_1bgauss_lap.irp.f index 0a0b7610..6c9365c9 100644 --- a/plugins/local/ao_tc_eff_map/one_e_1bgauss_lap.irp.f +++ b/plugins/local/ao_tc_eff_map/one_e_1bgauss_lap.irp.f @@ -1,10 +1,10 @@ ! --- -BEGIN_PROVIDER [ double precision, j1b_gauss_hermI, (ao_num,ao_num)] +BEGIN_PROVIDER [double precision, env_gauss_hermI, (ao_num,ao_num)] BEGIN_DOC ! - ! :math:`\langle \chi_A | -0.5 \Delta \tau_{1b} | \chi_B \rangle` + ! :math:`\langle \chi_A | -0.5 \Delta \tau_{env} | \chi_B \rangle` ! END_DOC @@ -22,8 +22,6 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_hermI, (ao_num,ao_num)] double precision :: int_gauss_r0, int_gauss_r2 - PROVIDE j1b_type j1b_pen j1b_coeff - ! -------------------------------------------------------------------------------- ! -- Dummy call to provide everything dim1 = 100 @@ -37,10 +35,7 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_hermI, (ao_num,ao_num)] , overlap_y, d_a_2, overlap_z, overlap, dim1 ) ! -------------------------------------------------------------------------------- - j1b_gauss_hermI(1:ao_num,1:ao_num) = 0.d0 - - if(j1b_type .eq. 1) then - ! \tau_1b = \sum_iA -[1 - exp(-alpha_A r_iA^2)] + env_gauss_hermI(1:ao_num,1:ao_num) = 0.d0 !$OMP PARALLEL & !$OMP DEFAULT (NONE) & @@ -50,109 +45,50 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_hermI, (ao_num,ao_num)] !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & !$OMP ao_power, ao_nucl, nucl_coord, & !$OMP ao_coef_normalized_ordered_transp, & - !$OMP nucl_num, j1b_pen, j1b_gauss_hermI) + !$OMP nucl_num, env_expo, env_gauss_hermI) !$OMP DO SCHEDULE (dynamic) - do j = 1, ao_num - num_A = ao_nucl(j) - power_A(1:3) = ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - - do i = 1, ao_num - num_B = ao_nucl(i) - power_B(1:3) = ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - - do l = 1, ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - - do m = 1, ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - - c = 0.d0 - do k = 1, nucl_num - gama = j1b_pen(k) - C_center(1:3) = nucl_coord(k,1:3) - - ! < XA | exp[-gama r_C^2] | XB > - c1 = int_gauss_r0( A_center, B_center, C_center & - , power_A, power_B, alpha, beta, gama ) - - ! < XA | r_A^2 exp[-gama r_C^2] | XB > - c2 = int_gauss_r2( A_center, B_center, C_center & - , power_A, power_B, alpha, beta, gama ) - - c = c + 3.d0 * gama * c1 - 2.d0 * gama * gama * c2 - enddo - - j1b_gauss_hermI(i,j) = j1b_gauss_hermI(i,j) & - + ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) * c + do j = 1, ao_num + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + do i = 1, ao_num + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + + c = 0.d0 + do k = 1, nucl_num + gama = env_expo(k) + C_center(1:3) = nucl_coord(k,1:3) + + ! < XA | exp[-gama r_C^2] | XB > + c1 = int_gauss_r0( A_center, B_center, C_center & + , power_A, power_B, alpha, beta, gama ) + + ! < XA | r_A^2 exp[-gama r_C^2] | XB > + c2 = int_gauss_r2( A_center, B_center, C_center & + , power_A, power_B, alpha, beta, gama ) + + c = c + 3.d0 * gama * c1 - 2.d0 * gama * gama * c2 enddo + + env_gauss_hermI(i,j) = env_gauss_hermI(i,j) & + + ao_coef_normalized_ordered_transp(l,j) & + * ao_coef_normalized_ordered_transp(m,i) * c enddo enddo enddo + enddo !$OMP END DO !$OMP END PARALLEL - elseif(j1b_type .eq. 2) then - ! \tau_1b = \sum_iA [c_A exp(-alpha_A r_iA^2)] - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, k, l, m, alpha, beta, gama, coef, & - !$OMP A_center, B_center, C_center, power_A, power_B, & - !$OMP num_A, num_B, c1, c2, c) & - !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & - !$OMP ao_power, ao_nucl, nucl_coord, & - !$OMP ao_coef_normalized_ordered_transp, & - !$OMP nucl_num, j1b_pen, j1b_gauss_hermI, & - !$OMP j1b_coeff) - !$OMP DO SCHEDULE (dynamic) - do j = 1, ao_num - num_A = ao_nucl(j) - power_A(1:3) = ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - - do i = 1, ao_num - num_B = ao_nucl(i) - power_B(1:3) = ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - - do l = 1, ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - - do m = 1, ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - - c = 0.d0 - do k = 1, nucl_num - gama = j1b_pen (k) - coef = j1b_coeff(k) - C_center(1:3) = nucl_coord(k,1:3) - - ! < XA | exp[-gama r_C^2] | XB > - c1 = int_gauss_r0( A_center, B_center, C_center & - , power_A, power_B, alpha, beta, gama ) - - ! < XA | r_A^2 exp[-gama r_C^2] | XB > - c2 = int_gauss_r2( A_center, B_center, C_center & - , power_A, power_B, alpha, beta, gama ) - - c = c + 3.d0 * gama * coef * c1 - 2.d0 * gama * gama * coef * c2 - enddo - - j1b_gauss_hermI(i,j) = j1b_gauss_hermI(i,j) & - + ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) * c - enddo - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - endif - END_PROVIDER diff --git a/plugins/local/ao_tc_eff_map/one_e_1bgauss_nonherm.irp.f b/plugins/local/ao_tc_eff_map/one_e_1bgauss_nonherm.irp.f index bd881d32..0ff23716 100644 --- a/plugins/local/ao_tc_eff_map/one_e_1bgauss_nonherm.irp.f +++ b/plugins/local/ao_tc_eff_map/one_e_1bgauss_nonherm.irp.f @@ -1,10 +1,11 @@ + ! --- -BEGIN_PROVIDER [ double precision, j1b_gauss_nonherm, (ao_num,ao_num)] +BEGIN_PROVIDER [double precision, env_gauss_nonherm, (ao_num,ao_num)] BEGIN_DOC ! - ! j1b_gauss_nonherm(i,j) = \langle \chi_j | - grad \tau_{1b} \cdot grad | \chi_i \rangle + ! env_gauss_nonherm(i,j) = \langle \chi_j | - grad \tau_{env} \cdot grad | \chi_i \rangle ! END_DOC @@ -22,8 +23,6 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_nonherm, (ao_num,ao_num)] double precision :: int_gauss_deriv - PROVIDE j1b_type j1b_pen j1b_coeff - ! -------------------------------------------------------------------------------- ! -- Dummy call to provide everything dim1 = 100 @@ -38,10 +37,8 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_nonherm, (ao_num,ao_num)] ! -------------------------------------------------------------------------------- - j1b_gauss_nonherm(1:ao_num,1:ao_num) = 0.d0 + env_gauss_nonherm(1:ao_num,1:ao_num) = 0.d0 - if(j1b_type .eq. 1) then - ! \tau_1b = \sum_iA -[1 - exp(-alpha_A r_iA^2)] !$OMP PARALLEL & !$OMP DEFAULT (NONE) & @@ -51,101 +48,46 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_nonherm, (ao_num,ao_num)] !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & !$OMP ao_power, ao_nucl, nucl_coord, & !$OMP ao_coef_normalized_ordered_transp, & - !$OMP nucl_num, j1b_pen, j1b_gauss_nonherm) + !$OMP nucl_num, env_expo, env_gauss_nonherm) !$OMP DO SCHEDULE (dynamic) - do j = 1, ao_num - num_A = ao_nucl(j) - power_A(1:3) = ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - - do i = 1, ao_num - num_B = ao_nucl(i) - power_B(1:3) = ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - - do l = 1, ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - - do m = 1, ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - - c = 0.d0 - do k = 1, nucl_num - gama = j1b_pen(k) - C_center(1:3) = nucl_coord(k,1:3) - - ! \langle \chi_A | exp[-gama r_C^2] r_C \cdot grad | \chi_B \rangle - c1 = int_gauss_deriv( A_center, B_center, C_center & - , power_A, power_B, alpha, beta, gama ) - - c = c + 2.d0 * gama * c1 - enddo - - j1b_gauss_nonherm(i,j) = j1b_gauss_nonherm(i,j) & - + ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) * c + do j = 1, ao_num + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + do i = 1, ao_num + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + + c = 0.d0 + do k = 1, nucl_num + gama = env_expo(k) + C_center(1:3) = nucl_coord(k,1:3) + + ! \langle \chi_A | exp[-gama r_C^2] r_C \cdot grad | \chi_B \rangle + c1 = int_gauss_deriv( A_center, B_center, C_center & + , power_A, power_B, alpha, beta, gama ) + + c = c + 2.d0 * gama * c1 enddo + + env_gauss_nonherm(i,j) = env_gauss_nonherm(i,j) & + + ao_coef_normalized_ordered_transp(l,j) & + * ao_coef_normalized_ordered_transp(m,i) * c enddo enddo enddo + enddo !$OMP END DO !$OMP END PARALLEL - elseif(j1b_type .eq. 2) then - ! \tau_1b = \sum_iA [c_A exp(-alpha_A r_iA^2)] - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, k, l, m, alpha, beta, gama, coef, & - !$OMP A_center, B_center, C_center, power_A, power_B, & - !$OMP num_A, num_B, c1, c) & - !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & - !$OMP ao_power, ao_nucl, nucl_coord, & - !$OMP ao_coef_normalized_ordered_transp, & - !$OMP nucl_num, j1b_pen, j1b_gauss_nonherm, & - !$OMP j1b_coeff) - !$OMP DO SCHEDULE (dynamic) - do j = 1, ao_num - num_A = ao_nucl(j) - power_A(1:3) = ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - - do i = 1, ao_num - num_B = ao_nucl(i) - power_B(1:3) = ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - - do l = 1, ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - - do m = 1, ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - - c = 0.d0 - do k = 1, nucl_num - gama = j1b_pen (k) - coef = j1b_coeff(k) - C_center(1:3) = nucl_coord(k,1:3) - - ! \langle \chi_A | exp[-gama r_C^2] r_C \cdot grad | \chi_B \rangle - c1 = int_gauss_deriv( A_center, B_center, C_center & - , power_A, power_B, alpha, beta, gama ) - - c = c + 2.d0 * gama * coef * c1 - enddo - - j1b_gauss_nonherm(i,j) = j1b_gauss_nonherm(i,j) & - + ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) * c - enddo - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - endif - END_PROVIDER diff --git a/plugins/local/ao_tc_eff_map/providers_ao_eff_pot.irp.f b/plugins/local/ao_tc_eff_map/providers_ao_eff_pot.irp.f index 055bf323..1c454e40 100644 --- a/plugins/local/ao_tc_eff_map/providers_ao_eff_pot.irp.f +++ b/plugins/local/ao_tc_eff_map/providers_ao_eff_pot.irp.f @@ -22,9 +22,6 @@ BEGIN_PROVIDER [ logical, ao_tc_sym_two_e_pot_in_map ] integer :: kk, m, j1, i1, lmax character*(64) :: fmt - !double precision :: j1b_gauss_coul_debug - !integral = j1b_gauss_coul_debug(1,1,1,1) - integral = ao_tc_sym_two_e_pot(1,1,1,1) double precision :: map_mb diff --git a/plugins/local/ao_tc_eff_map/two_e_1bgauss_j1.irp.f b/plugins/local/ao_tc_eff_map/two_e_1bgauss_j1.irp.f index c36ee9b4..572406e2 100644 --- a/plugins/local/ao_tc_eff_map/two_e_1bgauss_j1.irp.f +++ b/plugins/local/ao_tc_eff_map/two_e_1bgauss_j1.irp.f @@ -1,6 +1,6 @@ ! --- -double precision function j1b_gauss_2e_j1(i, j, k, l) +double precision function env_gauss_2e_j1(i, j, k, l) BEGIN_DOC ! @@ -36,10 +36,10 @@ double precision function j1b_gauss_2e_j1(i, j, k, l) double precision :: I_center(3), J_center(3), K_center(3), L_center(3) double precision :: ff, gg, cx, cy, cz - double precision :: j1b_gauss_2e_j1_schwartz + double precision :: env_gauss_2e_j1_schwartz if( ao_prim_num(i) * ao_prim_num(j) * ao_prim_num(k) * ao_prim_num(l) > 1024 ) then - j1b_gauss_2e_j1 = j1b_gauss_2e_j1_schwartz(i, j, k, l) + env_gauss_2e_j1 = env_gauss_2e_j1_schwartz(i, j, k, l) return endif @@ -59,7 +59,7 @@ double precision function j1b_gauss_2e_j1(i, j, k, l) L_center(p) = nucl_coord(num_l,p) enddo - j1b_gauss_2e_j1 = 0.d0 + env_gauss_2e_j1 = 0.d0 do p = 1, ao_prim_num(i) coef1 = ao_coef_normalized_ordered_transp(p, i) @@ -89,18 +89,18 @@ double precision function j1b_gauss_2e_j1(i, j, k, l) , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) - j1b_gauss_2e_j1 = j1b_gauss_2e_j1 + coef4 * ( cx + cy + cz ) + env_gauss_2e_j1 = env_gauss_2e_j1 + coef4 * ( cx + cy + cz ) enddo ! s enddo ! r enddo ! q enddo ! p return -end function j1b_gauss_2e_j1 +end ! --- -double precision function j1b_gauss_2e_j1_schwartz(i, j, k, l) +double precision function env_gauss_2e_j1_schwartz(i, j, k, l) BEGIN_DOC ! @@ -137,8 +137,6 @@ double precision function j1b_gauss_2e_j1_schwartz(i, j, k, l) double precision :: schwartz_ij, thr double precision, allocatable :: schwartz_kl(:,:) - PROVIDE j1b_pen - dim1 = n_pt_max_integrals thr = ao_integrals_threshold * ao_integrals_threshold @@ -186,8 +184,7 @@ double precision function j1b_gauss_2e_j1_schwartz(i, j, k, l) schwartz_kl(0,0) = max( schwartz_kl(0,r) , schwartz_kl(0,0) ) enddo - - j1b_gauss_2e_j1_schwartz = 0.d0 + env_gauss_2e_j1_schwartz = 0.d0 do p = 1, ao_prim_num(i) expo1 = ao_expo_ordered_transp(p, i) @@ -226,7 +223,7 @@ double precision function j1b_gauss_2e_j1_schwartz(i, j, k, l) , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) - j1b_gauss_2e_j1_schwartz = j1b_gauss_2e_j1_schwartz + coef4 * ( cx + cy + cz ) + env_gauss_2e_j1_schwartz = env_gauss_2e_j1_schwartz + coef4 * ( cx + cy + cz ) enddo ! s enddo ! r enddo ! q @@ -235,7 +232,7 @@ double precision function j1b_gauss_2e_j1_schwartz(i, j, k, l) deallocate( schwartz_kl ) return -end function j1b_gauss_2e_j1_schwartz +end ! --- @@ -263,14 +260,12 @@ subroutine get_cxcycz_j1( dim1, cx, cy, cz & double precision :: general_primitive_integral_erf_shifted double precision :: general_primitive_integral_coul_shifted - PROVIDE j1b_pen - cx = 0.d0 cy = 0.d0 cz = 0.d0 do ii = 1, nucl_num - expoii = j1b_pen(ii) + expoii = env_expo(ii) Centerii(1:3) = nucl_coord(ii, 1:3) call gaussian_product(pp1, P1_center, expoii, Centerii, factii, pp2, P2_center) diff --git a/plugins/local/ao_tc_eff_map/two_e_1bgauss_j2.irp.f b/plugins/local/ao_tc_eff_map/two_e_1bgauss_j2.irp.f index a61b5336..a04656c3 100644 --- a/plugins/local/ao_tc_eff_map/two_e_1bgauss_j2.irp.f +++ b/plugins/local/ao_tc_eff_map/two_e_1bgauss_j2.irp.f @@ -1,6 +1,6 @@ ! --- -double precision function j1b_gauss_2e_j2(i, j, k, l) +double precision function env_gauss_2e_j2(i, j, k, l) BEGIN_DOC ! @@ -36,12 +36,12 @@ double precision function j1b_gauss_2e_j2(i, j, k, l) double precision :: I_center(3), J_center(3), K_center(3), L_center(3) double precision :: ff, gg, cx, cy, cz - double precision :: j1b_gauss_2e_j2_schwartz + double precision :: env_gauss_2e_j2_schwartz dim1 = n_pt_max_integrals if( ao_prim_num(i) * ao_prim_num(j) * ao_prim_num(k) * ao_prim_num(l) > 1024 ) then - j1b_gauss_2e_j2 = j1b_gauss_2e_j2_schwartz(i, j, k, l) + env_gauss_2e_j2 = env_gauss_2e_j2_schwartz(i, j, k, l) return endif @@ -61,7 +61,7 @@ double precision function j1b_gauss_2e_j2(i, j, k, l) L_center(p) = nucl_coord(num_l,p) enddo - j1b_gauss_2e_j2 = 0.d0 + env_gauss_2e_j2 = 0.d0 do p = 1, ao_prim_num(i) coef1 = ao_coef_normalized_ordered_transp(p, i) @@ -91,18 +91,18 @@ double precision function j1b_gauss_2e_j2(i, j, k, l) , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) - j1b_gauss_2e_j2 = j1b_gauss_2e_j2 + coef4 * ( cx + cy + cz ) + env_gauss_2e_j2 = env_gauss_2e_j2 + coef4 * ( cx + cy + cz ) enddo ! s enddo ! r enddo ! q enddo ! p return -end function j1b_gauss_2e_j2 +end ! --- -double precision function j1b_gauss_2e_j2_schwartz(i, j, k, l) +double precision function env_gauss_2e_j2_schwartz(i, j, k, l) BEGIN_DOC ! @@ -187,7 +187,7 @@ double precision function j1b_gauss_2e_j2_schwartz(i, j, k, l) enddo - j1b_gauss_2e_j2_schwartz = 0.d0 + env_gauss_2e_j2_schwartz = 0.d0 do p = 1, ao_prim_num(i) expo1 = ao_expo_ordered_transp(p, i) @@ -226,7 +226,7 @@ double precision function j1b_gauss_2e_j2_schwartz(i, j, k, l) , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) - j1b_gauss_2e_j2_schwartz = j1b_gauss_2e_j2_schwartz + coef4 * ( cx + cy + cz ) + env_gauss_2e_j2_schwartz = env_gauss_2e_j2_schwartz + coef4 * ( cx + cy + cz ) enddo ! s enddo ! r enddo ! q @@ -235,7 +235,7 @@ double precision function j1b_gauss_2e_j2_schwartz(i, j, k, l) deallocate( schwartz_kl ) return -end function j1b_gauss_2e_j2_schwartz +end ! --- @@ -263,15 +263,13 @@ subroutine get_cxcycz_j2( dim1, cx, cy, cz & double precision :: general_primitive_integral_erf_shifted double precision :: general_primitive_integral_coul_shifted - PROVIDE j1b_pen j1b_coeff - cx = 0.d0 cy = 0.d0 cz = 0.d0 do ii = 1, nucl_num - expoii = j1b_pen (ii) - coefii = j1b_coeff(ii) + expoii = env_expo(ii) + coefii = env_coef(ii) Centerii(1:3) = nucl_coord(ii, 1:3) call gaussian_product(pp1, P1_center, expoii, Centerii, factii, pp2, P2_center) diff --git a/plugins/local/ao_tc_eff_map/useful_sub.irp.f b/plugins/local/ao_tc_eff_map/useful_sub.irp.f index 4cfdcad2..4c5efac1 100644 --- a/plugins/local/ao_tc_eff_map/useful_sub.irp.f +++ b/plugins/local/ao_tc_eff_map/useful_sub.irp.f @@ -174,7 +174,7 @@ double precision function general_primitive_integral_coul_shifted( dim general_primitive_integral_coul_shifted = fact_p * fact_q * accu * pi_5_2 * p_inv * q_inv / dsqrt(p_plus_q) return -end function general_primitive_integral_coul_shifted +end !______________________________________________________________________________________________________________________ !______________________________________________________________________________________________________________________ @@ -354,7 +354,7 @@ double precision function general_primitive_integral_erf_shifted( dim general_primitive_integral_erf_shifted = fact_p * fact_q * accu * pi_5_2 * p_inv * q_inv / dsqrt(p_plus_q) return -end function general_primitive_integral_erf_shifted +end !______________________________________________________________________________________________________________________ !______________________________________________________________________________________________________________________ @@ -362,3 +362,48 @@ end function general_primitive_integral_erf_shifted + +! --- + +subroutine inv_r_times_poly(r, dist_r, dist_vec, poly) + + BEGIN_DOC + ! + ! returns + ! + ! poly(1) = x / sqrt(x^2+y^2+z^2), poly(2) = y / sqrt(x^2+y^2+z^2), poly(3) = z / sqrt(x^2+y^2+z^2) + ! + ! with the arguments + ! + ! r(1) = x, r(2) = y, r(3) = z, dist_r = sqrt(x^2+y^2+z^2) + ! + ! dist_vec(1) = sqrt(y^2+z^2), dist_vec(2) = sqrt(x^2+z^2), dist_vec(3) = sqrt(x^2+y^2) + ! + END_DOC + + implicit none + double precision, intent(in) :: r(3), dist_r, dist_vec(3) + double precision, intent(out) :: poly(3) + integer :: i + double precision :: inv_dist + + if (dist_r .gt. 1.d-8)then + inv_dist = 1.d0/dist_r + do i = 1, 3 + poly(i) = r(i) * inv_dist + enddo + else + do i = 1, 3 + if(dabs(r(i)).lt.dist_vec(i)) then + inv_dist = 1.d0/dist_r + poly(i) = r(i) * inv_dist + else + poly(i) = 1.d0 + endif + enddo + endif + +end + +! --- + diff --git a/plugins/local/bi_ort_ints/biorthog_mo_for_h.irp.f b/plugins/local/bi_ort_ints/biorthog_mo_for_h.irp.f index 452c13f1..613a684f 100644 --- a/plugins/local/bi_ort_ints/biorthog_mo_for_h.irp.f +++ b/plugins/local/bi_ort_ints/biorthog_mo_for_h.irp.f @@ -1,4 +1,39 @@ + +! --- + +BEGIN_PROVIDER [double precision, ao_two_e_coul, (ao_num, ao_num, ao_num, ao_num) ] + + BEGIN_DOC + ! + ! ao_two_e_coul(k,i,l,j) = ( k i | 1/r12 | l j ) = < l k | 1/r12 | j i > + ! + END_DOC + + integer :: i, j, k, l + double precision, external :: get_ao_two_e_integral + + PROVIDE ao_integrals_map + + !$OMP PARALLEL DEFAULT(NONE) & + !$OMP SHARED(ao_num, ao_two_e_coul, ao_integrals_map) & + !$OMP PRIVATE(i, j, k, l) + !$OMP DO + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + ! < 1:k, 2:l | 1:i, 2:j > + ao_two_e_coul(k,i,l,j) = get_ao_two_e_integral(i, j, k, l, ao_integrals_map) + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + +END_PROVIDER + ! --- double precision function bi_ortho_mo_coul_ints(l, k, j, i) @@ -25,7 +60,7 @@ double precision function bi_ortho_mo_coul_ints(l, k, j, i) enddo enddo -end function bi_ortho_mo_coul_ints +end ! --- diff --git a/plugins/local/bi_ort_ints/one_e_bi_ort.irp.f b/plugins/local/bi_ort_ints/one_e_bi_ort.irp.f index 0ecc2a84..85cae273 100644 --- a/plugins/local/bi_ort_ints/one_e_bi_ort.irp.f +++ b/plugins/local/bi_ort_ints/one_e_bi_ort.irp.f @@ -8,23 +8,6 @@ BEGIN_PROVIDER [double precision, ao_one_e_integrals_tc_tot, (ao_num,ao_num)] ao_one_e_integrals_tc_tot = ao_one_e_integrals - !provide j1b_type - - !if( (j1b_type .eq. 1) .or. (j1b_type .eq. 2) ) then - ! - ! print *, ' do things properly !' - ! stop - - ! !do i = 1, ao_num - ! ! do j = 1, ao_num - ! ! ao_one_e_integrals_tc_tot(j,i) += ( j1b_gauss_hermI (j,i) & - ! ! + j1b_gauss_hermII (j,i) & - ! ! + j1b_gauss_nonherm(j,i) ) - ! ! enddo - ! !enddo - - !endif - END_PROVIDER ! --- diff --git a/plugins/local/bi_ort_ints/total_twoe_pot.irp.f b/plugins/local/bi_ort_ints/total_twoe_pot.irp.f index 37a31a51..5e6a24e9 100644 --- a/plugins/local/bi_ort_ints/total_twoe_pot.irp.f +++ b/plugins/local/bi_ort_ints/total_twoe_pot.irp.f @@ -1,91 +1,4 @@ - -! --- - -BEGIN_PROVIDER [double precision, ao_two_e_vartc_tot, (ao_num, ao_num, ao_num, ao_num) ] - - integer :: i, j, k, l - - provide j1b_type - provide mo_r_coef mo_l_coef - - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - ao_two_e_vartc_tot(k,i,l,j) = ao_vartc_int_chemist(k,i,l,j) - enddo - enddo - enddo - enddo - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [double precision, ao_two_e_tc_tot, (ao_num, ao_num, ao_num, ao_num) ] - - BEGIN_DOC - ! - ! ao_two_e_tc_tot(k,i,l,j) = (ki|V^TC(r_12)|lj) = where V^TC(r_12) is the total TC operator - ! - ! including both hermitian and non hermitian parts. THIS IS IN CHEMIST NOTATION. - ! - ! WARNING :: non hermitian ! acts on "the right functions" (i,j) - ! - END_DOC - - integer :: i, j, k, l - double precision :: integral_sym, integral_nsym - double precision, external :: get_ao_tc_sym_two_e_pot - - provide j1b_type - - if(j1b_type .eq. 0) then - - PROVIDE ao_tc_sym_two_e_pot_in_map - - !!! TODO :: OPENMP - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - - integral_sym = get_ao_tc_sym_two_e_pot(i, j, k, l, ao_tc_sym_two_e_pot_map) - ! ao_non_hermit_term_chemist(k,i,l,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the AO basis - integral_nsym = ao_non_hermit_term_chemist(k,i,l,j) - - !print *, ' sym integ = ', integral_sym - !print *, ' non-sym integ = ', integral_nsym - - ao_two_e_tc_tot(k,i,l,j) = integral_sym + integral_nsym - !write(111,*) ao_two_e_tc_tot(k,i,l,j) - enddo - enddo - enddo - enddo - - else - - PROVIDE ao_tc_int_chemist - - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - ao_two_e_tc_tot(k,i,l,j) = ao_tc_int_chemist(k,i,l,j) - !write(222,*) ao_two_e_tc_tot(k,i,l,j) - enddo - enddo - enddo - enddo - - FREE ao_tc_int_chemist - - endif - -END_PROVIDER - ! --- double precision function bi_ortho_mo_ints(l, k, j, i) @@ -118,8 +31,6 @@ end function bi_ortho_mo_ints ! --- -! TODO :: transform into DEGEMM - BEGIN_PROVIDER [double precision, mo_bi_ortho_tc_two_e_chemist, (mo_num, mo_num, mo_num, mo_num)] BEGIN_DOC @@ -267,7 +178,6 @@ END_PROVIDER ! --- - BEGIN_PROVIDER [ double precision, mo_bi_ortho_tc_two_e_jj, (mo_num,mo_num)] &BEGIN_PROVIDER [ double precision, mo_bi_ortho_tc_two_e_jj_exchange, (mo_num,mo_num)] &BEGIN_PROVIDER [ double precision, mo_bi_ortho_tc_two_e_jj_anti, (mo_num,mo_num)] diff --git a/plugins/local/jastrow/EZFIO.cfg b/plugins/local/jastrow/EZFIO.cfg index b41185a3..23dde8ea 100644 --- a/plugins/local/jastrow/EZFIO.cfg +++ b/plugins/local/jastrow/EZFIO.cfg @@ -1,9 +1,22 @@ -[jast_type] -doc: Type of Jastrow [None| Mu | Qmckl] + +[j2e_type] type: character*(32) -interface: ezfio, provider, ocaml +doc: type of the 2e-Jastrow: [ None | Mu | Mur | Qmckl ] +interface: ezfio,provider,ocaml +default: Mu + +[j1e_type] +type: character*(32) +doc: type of the 1e-Jastrow: [ None | Gauss | Charge_Harmonizer ] +interface: ezfio,provider,ocaml default: None +[env_type] +type: character*(32) +doc: type of envelop for Jastrow: [ None | Prod_Gauss | Sum_Gauss | Sum_Slat | Sum_Quartic ] +interface: ezfio, provider, ocaml +default: Sum_Gauss + [jast_qmckl_type_nucl_num] doc: Number of different nuclei types in QMCkl jastrow type: integer @@ -64,6 +77,77 @@ type: double precision size: (jastrow.jast_qmckl_c_vector_size) interface: ezfio, provider +[j1e_size] +type: integer +doc: number of functions per atom in 1e-Jastrow +interface: ezfio,provider,ocaml +default: 1 +[j1e_coef] +type: double precision +doc: linear coef of functions in 1e-Jastrow +interface: ezfio +size: (jastrow.j1e_size,nuclei.nucl_num) +[j1e_coef_ao] +type: double precision +doc: coefficients of the 1-electrob Jastrow in AOs +interface: ezfio +size: (ao_basis.ao_num) + +[j1e_coef_ao2] +type: double precision +doc: coefficients of the 1-electron Jastrow in AOsxAOs +interface: ezfio +size: (ao_basis.ao_num,ao_basis.ao_num) + +[j1e_coef_ao3] +type: double precision +doc: coefficients of the 1-electron Jastrow in AOsxAOs +interface: ezfio +size: (ao_basis.ao_num,3) + +[j1e_expo] +type: double precision +doc: exponenets of functions in 1e-Jastrow +interface: ezfio +size: (jastrow.j1e_size,nuclei.nucl_num) + +[env_expo] +type: double precision +doc: exponents of the envelop for Jastrow +interface: ezfio +size: (nuclei.nucl_num) + +[env_coef] +type: double precision +doc: coefficients of the envelop for Jastrow +interface: ezfio +size: (nuclei.nucl_num) + +[murho_type] +type: integer +doc: type of mu(rho) Jastrow +interface: ezfio, provider, ocaml +default: 0 + +[ng_fit_jast] +type: integer +doc: nb of Gaussians used to fit Jastrow fcts +interface: ezfio,provider,ocaml +default: 20 + +[a_boys] +type: double precision +doc: cutting of the interaction in the range separated model +interface: ezfio,provider,ocaml +default: 1.0 +ezfio_name: a_boys + +[nu_erf] +type: double precision +doc: e-e correlation in the core +interface: ezfio,provider,ocaml +default: 1.0 +ezfio_name: nu_erf diff --git a/plugins/local/jastrow/NEED b/plugins/local/jastrow/NEED index f03c11fd..7d8fe789 100644 --- a/plugins/local/jastrow/NEED +++ b/plugins/local/jastrow/NEED @@ -1,2 +1,3 @@ nuclei electrons +ao_basis diff --git a/plugins/local/jastrow/README.md b/plugins/local/jastrow/README.md index aefb6ad5..a9e568db 100644 --- a/plugins/local/jastrow/README.md +++ b/plugins/local/jastrow/README.md @@ -1,3 +1,78 @@ # Jastrow -Information relative to the Jastrow factor in trans-correlated calculations. +Information related to the Jastrow factor in trans-correlated calculations. + +The main keywords are: +- `j2e_type` +- `j1e_type` +- `env_type` + +## j2e_type Options + +1. **None:** No 2e-Jastrow is used. + +2. **Mu:** 2e-Jastrow inspired by Range Separated Density Functional Theory. It has the following shape: +

+ +

+ with, +

+ +

+ +3. **Mu_Nu:** A valence and a core correlation terms are used +

+ +

+ with envelop \(v\). + + +## env_type Options + +The 2-electron Jastrow is multiplied by an envelope \(v\): +

+ +

+ +- if `env_type` is **None**: No envelope is used. + +- if `env_type` is **Prod_Gauss**: +

+ +

+ +- if `env_type` is **Sum_Gauss**: +

+ +

+ +Here, \(A\) designates the nuclei, and the coefficients and exponents are defined in the tables `env_coef` and `env_expo` respectively. + + +## j1e_type Options + +The 1-electron Jastrow used is: +

+ +

+ +- if `j1e_type` is **None**: No one-electron Jastrow is used. + +- if `j1e_type` is **Gauss**: We use +

+ +

+ + +are defined by the tables `j1e_coef` and `j1e_expo`, respectively. + +- if `j1e_type` is **Charge_Harmonizer**: The one-electron Jastrow factor aims to offset the adverse impact of modifying the charge density induced by the two-electron factor +

+ +

+ +- if `j1e_type` is **Charge_Harmonizer_AO**: The one-electron Jastrow factor **Charge_Harmonizer** is fitted by the product of atomic orbitals: +

+ +

+ diff --git a/plugins/local/jastrow/env_param.irp.f b/plugins/local/jastrow/env_param.irp.f new file mode 100644 index 00000000..689b22cd --- /dev/null +++ b/plugins/local/jastrow/env_param.irp.f @@ -0,0 +1,102 @@ + +! --- + + BEGIN_PROVIDER [double precision, env_expo, (nucl_num)] +&BEGIN_PROVIDER [double precision, env_coef, (nucl_num)] + + BEGIN_DOC + ! + ! parameters of the env of the 2e-Jastrow + ! + END_DOC + + implicit none + logical :: exists + integer :: i + integer :: ierr + + PROVIDE ezfio_filename + + ! --- + + if (mpi_master) then + call ezfio_has_jastrow_env_expo(exists) + endif + + IRP_IF MPI_DEBUG + print *, irp_here, mpi_rank + call MPI_BARRIER(MPI_COMM_WORLD, ierr) + IRP_ENDIF + + IRP_IF MPI + include 'mpif.h' + call MPI_BCAST(env_expo, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read env_expo with MPI' + endif + IRP_ENDIF + + if (exists) then + if (mpi_master) then + write(6,'(A)') '.. >>>>> [ IO READ: env_expo ] <<<<< ..' + call ezfio_get_jastrow_env_expo(env_expo) + IRP_IF MPI + call MPI_BCAST(env_expo, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read env_expo with MPI' + endif + IRP_ENDIF + endif + else + + env_expo = 1d5 + call ezfio_set_jastrow_env_expo(env_expo) + endif + + ! --- + + if (mpi_master) then + call ezfio_has_jastrow_env_coef(exists) + endif + + IRP_IF MPI_DEBUG + print *, irp_here, mpi_rank + call MPI_BARRIER(MPI_COMM_WORLD, ierr) + IRP_ENDIF + + IRP_IF MPI + call MPI_BCAST(env_coef, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read env_coef with MPI' + endif + IRP_ENDIF + + if (exists) then + if (mpi_master) then + write(6,'(A)') '.. >>>>> [ IO READ: env_coef ] <<<<< ..' + call ezfio_get_jastrow_env_coef(env_coef) + IRP_IF MPI + call MPI_BCAST(env_coef, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read env_coef with MPI' + endif + IRP_ENDIF + endif + else + + env_coef = 1d0 + call ezfio_set_jastrow_env_coef(env_coef) + endif + + ! --- + + print *, ' parameters for nuclei jastrow' + print *, ' i, Z, env_expo, env_coef' + do i = 1, nucl_num + write(*,'(I4, 2x, 3(E15.7, 2X))') i, nucl_charge(i), env_expo(i), env_coef(i) + enddo + +END_PROVIDER + +! --- + diff --git a/plugins/local/ao_tc_eff_map/fit_j.irp.f b/plugins/local/jastrow/fit_j.irp.f similarity index 83% rename from plugins/local/ao_tc_eff_map/fit_j.irp.f rename to plugins/local/jastrow/fit_j.irp.f index 0fc3da2f..8a2d0036 100644 --- a/plugins/local/ao_tc_eff_map/fit_j.irp.f +++ b/plugins/local/jastrow/fit_j.irp.f @@ -1,41 +1,67 @@ - BEGIN_PROVIDER [ double precision, expo_j_xmu_1gauss ] -&BEGIN_PROVIDER [ double precision, coef_j_xmu_1gauss ] - implicit none - BEGIN_DOC - ! Upper bound long range fit of F(x) = x * (1 - erf(x)) - 1/sqrt(pi) * exp(-x**2) - ! - ! with a single gaussian. - ! - ! Such a function can be used to screen integrals with F(x). - END_DOC - expo_j_xmu_1gauss = 0.5d0 - coef_j_xmu_1gauss = 1.d0 -END_PROVIDER + ! --- -BEGIN_PROVIDER [ double precision, expo_erfc_gauss ] - implicit none - expo_erfc_gauss = 1.41211d0 + BEGIN_PROVIDER [double precision, expo_j_xmu_1gauss] +&BEGIN_PROVIDER [double precision, coef_j_xmu_1gauss] + + implicit none + + BEGIN_DOC + ! Upper bound long range fit of F(x) = x * (1 - erf(x)) - 1/sqrt(pi) * exp(-x**2) + ! + ! with a single gaussian. + ! + ! Such a function can be used to screen integrals with F(x). + END_DOC + + expo_j_xmu_1gauss = 0.5d0 + coef_j_xmu_1gauss = 1.d0 + END_PROVIDER -BEGIN_PROVIDER [ double precision, expo_erfc_mu_gauss ] - implicit none - expo_erfc_mu_gauss = expo_erfc_gauss * mu_erf * mu_erf +! --- + +BEGIN_PROVIDER [double precision, expo_erfc_gauss] + + implicit none + + expo_erfc_gauss = 1.41211d0 + END_PROVIDER - BEGIN_PROVIDER [ double precision, expo_good_j_mu_1gauss ] -&BEGIN_PROVIDER [ double precision, coef_good_j_mu_1gauss ] - implicit none - BEGIN_DOC - ! exponent of Gaussian in order to obtain an upper bound of J(r12,mu) - ! - ! Can be used to scree integrals with J(r12,mu) - END_DOC - expo_good_j_mu_1gauss = 2.D0 * mu_erf * expo_j_xmu_1gauss - coef_good_j_mu_1gauss = 0.5d0/mu_erf * coef_j_xmu_1gauss - END_PROVIDER +! --- -BEGIN_PROVIDER [ double precision, expo_j_xmu, (n_fit_1_erf_x) ] +BEGIN_PROVIDER [double precision, expo_erfc_mu_gauss] + + implicit none + + expo_erfc_mu_gauss = expo_erfc_gauss * mu_erf * mu_erf + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [double precision, expo_good_j_mu_1gauss] +&BEGIN_PROVIDER [double precision, coef_good_j_mu_1gauss] + + BEGIN_DOC + ! + ! exponent of Gaussian in order to obtain an upper bound of J(r12,mu) + ! + ! Can be used to scree integrals with J(r12,mu) + ! + END_DOC + + implicit none + + expo_good_j_mu_1gauss = 2.d0 * mu_erf * expo_j_xmu_1gauss + coef_good_j_mu_1gauss = 0.5d0/mu_erf * coef_j_xmu_1gauss + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, expo_j_xmu, (n_fit_1_erf_x)] BEGIN_DOC ! F(x) = x * (1 - erf(x)) - 1/sqrt(pi) * exp(-x**2) is fitted with a gaussian and a Slater @@ -465,53 +491,86 @@ END_PROVIDER ! --- double precision function F_x_j(x) - implicit none - BEGIN_DOC - ! F_x_j(x) = dimension-less correlation factor = x (1 - erf(x)) - 1/sqrt(pi) exp(-x^2) - END_DOC - double precision, intent(in) :: x - F_x_j = x * (1.d0 - derf(x)) - 1/dsqrt(dacos(-1.d0)) * dexp(-x**2) + + BEGIN_DOC + ! + ! dimension-less correlation factor: + ! + ! F_x_j(x) = x (1 - erf(x)) - 1/sqrt(pi) exp(-x^2) + ! + END_DOC + + implicit none + double precision, intent(in) :: x + + F_x_j = x * (1.d0 - derf(x)) - 1/dsqrt(dacos(-1.d0)) * dexp(-x**2) end +! --- + double precision function j_mu_F_x_j(x) - implicit none - BEGIN_DOC - ! j_mu_F_x_j(x) = correlation factor = 1/2 r12 * (1 - erf(mu*r12)) - 1/(2 sqrt(pi)*mu) exp(-(mu*r12)^2) - ! - ! = 1/(2*mu) * F_x_j(mu*x) - END_DOC - double precision :: F_x_j - double precision, intent(in) :: x - j_mu_F_x_j = 0.5d0/mu_erf * F_x_j(x*mu_erf) + + BEGIN_DOC + ! + ! correlation factor: + ! + ! j_mu_F_x_j(x) = 1/2 r12 * (1 - erf(mu*r12)) - 1/(2 sqrt(pi)*mu) exp(-(mu*r12)^2) + ! = 1/(2*mu) * F_x_j(mu*x) + ! + END_DOC + + implicit none + double precision, intent(in) :: x + double precision :: F_x_j + + j_mu_F_x_j = 0.5d0/mu_erf * F_x_j(x*mu_erf) + end +! --- + double precision function j_mu(x) - implicit none - double precision, intent(in) :: x - BEGIN_DOC - ! j_mu(x) = correlation factor = 1/2 r12 * (1 - erf(mu*r12)) - 1/(2 sqrt(pi)*mu) exp(-(mu*r12)^2) - END_DOC - j_mu = 0.5d0* x * (1.d0 - derf(mu_erf*x)) - 0.5d0/( dsqrt(dacos(-1.d0))*mu_erf) * dexp(-(mu_erf*x)*(mu_erf*x)) - -end -double precision function j_mu_fit_gauss(x) - implicit none - BEGIN_DOC - ! j_mu_fit_gauss(x) = correlation factor = 1/2 r12 * (1 - erf(mu*r12)) - 1/(2 sqrt(pi)*mu) exp(-(mu*r12)^2) - ! - ! but fitted with gaussians - END_DOC - double precision, intent(in) :: x - integer :: i - double precision :: alpha,coef - j_mu_fit_gauss = 0.d0 - do i = 1, n_max_fit_slat - alpha = expo_gauss_j_mu_x(i) - coef = coef_gauss_j_mu_x(i) - j_mu_fit_gauss += coef * dexp(-alpha*x*x) - enddo + BEGIN_DOC + ! + ! correlation factor: + ! + ! j_mu(x) = 1/2 r12 * (1 - erf(mu*r12)) - 1/(2 sqrt(pi)*mu) exp(-(mu*r12)^2) + ! + END_DOC + + implicit none + double precision, intent(in) :: x + + j_mu = 0.5d0* x * (1.d0 - derf(mu_erf*x)) - 0.5d0/( dsqrt(dacos(-1.d0))*mu_erf) * dexp(-(mu_erf*x)*(mu_erf*x)) + +end + +! --- + +double precision function j_mu_fit_gauss(x) + + BEGIN_DOC + ! + ! correlation factor fitted with gaussians: + ! + ! j_mu_fit_gauss(x) = 1/2 r12 * (1 - erf(mu*r12)) - 1/(2 sqrt(pi)*mu) exp(-(mu*r12)^2) + ! + ! + END_DOC + + implicit none + double precision, intent(in) :: x + integer :: i + double precision :: alpha, coef + + j_mu_fit_gauss = 0.d0 + do i = 1, n_max_fit_slat + alpha = expo_gauss_j_mu_x(i) + coef = coef_gauss_j_mu_x(i) + j_mu_fit_gauss += coef * dexp(-alpha*x*x) + enddo end diff --git a/plugins/local/ao_tc_eff_map/potential.irp.f b/plugins/local/jastrow/fit_potential.irp.f similarity index 78% rename from plugins/local/ao_tc_eff_map/potential.irp.f rename to plugins/local/jastrow/fit_potential.irp.f index 5b72b567..0bdf9c5b 100644 --- a/plugins/local/ao_tc_eff_map/potential.irp.f +++ b/plugins/local/jastrow/fit_potential.irp.f @@ -1,13 +1,16 @@ + ! --- BEGIN_PROVIDER [integer, n_gauss_eff_pot] BEGIN_DOC + ! ! number of gaussians to represent the effective potential : ! ! V(mu,r12) = -0.25 * (1 - erf(mu*r12))^2 + 1/(\sqrt(pi)mu) * exp(-(mu*r12)^2) ! ! Here (1 - erf(mu*r12))^2 is expanded in Gaussians as Eqs A11-A20 in JCP 154, 084119 (2021) + ! END_DOC implicit none @@ -21,10 +24,13 @@ END_PROVIDER BEGIN_PROVIDER [integer, n_gauss_eff_pot_deriv] BEGIN_DOC + ! ! V(r12) = -(1 - erf(mu*r12))^2 is expanded in Gaussians as Eqs A11-A20 in JCP 154, 084119 (2021) + ! END_DOC implicit none + n_gauss_eff_pot_deriv = ng_fit_jast END_PROVIDER @@ -35,11 +41,13 @@ END_PROVIDER &BEGIN_PROVIDER [double precision, coef_gauss_eff_pot, (n_gauss_eff_pot)] BEGIN_DOC + ! ! Coefficients and exponents of the Fit on Gaussians of V(X) = -(1 - erf(mu*X))^2 + 1/(\sqrt(pi)mu) * exp(-(mu*X)^2) ! ! V(X) = \sum_{i=1,n_gauss_eff_pot} coef_gauss_eff_pot(i) * exp(-expo_gauss_eff_pot(i) * X^2) ! ! Relies on the fit proposed in Eqs A11-A20 in JCP 154, 084119 (2021) + ! END_DOC include 'constants.include.F' @@ -64,7 +72,9 @@ END_PROVIDER double precision function eff_pot_gauss(x, mu) BEGIN_DOC + ! ! V(mu,r12) = -0.25 * (1 - erf(mu*r12))^2 + 1/(\sqrt(pi)mu) * exp(-(mu*r12)^2) + ! END_DOC implicit none @@ -74,44 +84,58 @@ double precision function eff_pot_gauss(x, mu) end -! ------------------------------------------------------------------------------------------------- ! --- double precision function eff_pot_fit_gauss(x) - implicit none - BEGIN_DOC - ! V(mu,r12) = -0.25 * (1 - erf(mu*r12))^2 + 1/(\sqrt(pi)mu) * exp(-(mu*r12)^2) - ! - ! but fitted with gaussians - END_DOC - double precision, intent(in) :: x - integer :: i - double precision :: alpha - eff_pot_fit_gauss = derf(mu_erf*x)/x - do i = 1, n_gauss_eff_pot - alpha = expo_gauss_eff_pot(i) - eff_pot_fit_gauss += coef_gauss_eff_pot(i) * dexp(-alpha*x*x) - enddo + + BEGIN_DOC + ! + ! V(mu,r12) = -0.25 * (1 - erf(mu*r12))^2 + 1/(\sqrt(pi)mu) * exp(-(mu*r12)^2) + ! + ! but fitted with gaussians + ! + END_DOC + + implicit none + double precision, intent(in) :: x + integer :: i + double precision :: alpha + + eff_pot_fit_gauss = derf(mu_erf*x)/x + do i = 1, n_gauss_eff_pot + alpha = expo_gauss_eff_pot(i) + eff_pot_fit_gauss += coef_gauss_eff_pot(i) * dexp(-alpha*x*x) + enddo + end +! --- + BEGIN_PROVIDER [integer, n_fit_1_erf_x] - implicit none - BEGIN_DOC -! - END_DOC - n_fit_1_erf_x = 2 + + implicit none + + n_fit_1_erf_x = 2 END_PROVIDER +! --- + BEGIN_PROVIDER [double precision, expos_slat_gauss_1_erf_x, (n_fit_1_erf_x)] - implicit none - BEGIN_DOC -! 1 - erf(mu*x) is fitted with a Slater and gaussian as in Eq.A15 of JCP 154, 084119 (2021) -! -! 1 - erf(mu*x) = e^{-expos_slat_gauss_1_erf_x(1) * mu *x} * e^{-expos_slat_gauss_1_erf_x(2) * mu^2 * x^2} - END_DOC - expos_slat_gauss_1_erf_x(1) = 1.09529d0 - expos_slat_gauss_1_erf_x(2) = 0.756023d0 + + BEGIN_DOC + ! + ! 1 - erf(mu*x) is fitted with a Slater and gaussian as in Eq.A15 of JCP 154, 084119 (2021) + ! + ! 1 - erf(mu*x) = e^{-expos_slat_gauss_1_erf_x(1) * mu *x} * e^{-expos_slat_gauss_1_erf_x(2) * mu^2 * x^2} + ! + END_DOC + + implicit none + + expos_slat_gauss_1_erf_x(1) = 1.09529d0 + expos_slat_gauss_1_erf_x(2) = 0.756023d0 + END_PROVIDER ! --- @@ -151,12 +175,14 @@ END_PROVIDER double precision function fit_1_erf_x(x) BEGIN_DOC + ! ! fit_1_erf_x(x) = \sum_i c_i exp (-alpha_i x^2) \approx (1 - erf(mu*x)) + ! END_DOC implicit none - integer :: i double precision, intent(in) :: x + integer :: i fit_1_erf_x = 0.d0 do i = 1, n_max_fit_slat @@ -171,11 +197,13 @@ end &BEGIN_PROVIDER [double precision, coef_gauss_1_erf_x_2, (ng_fit_jast)] BEGIN_DOC + ! ! (1 - erf(mu*x))^2 = \sum_i coef_gauss_1_erf_x_2(i) * exp(-expo_gauss_1_erf_x_2(i) * x^2) ! ! This is based on a fit of (1 - erf(mu*x)) by exp(-alpha * x) exp(-beta*mu^2x^2) ! ! and the slater function exp(-alpha * x) is fitted with n_max_fit_slat gaussians + ! END_DOC implicit none @@ -286,50 +314,22 @@ END_PROVIDER ! --- double precision function fit_1_erf_x_2(x) - implicit none - double precision, intent(in) :: x - BEGIN_DOC -! fit_1_erf_x_2(x) = \sum_i c_i exp (-alpha_i x^2) \approx (1 - erf(mu*x))^2 - END_DOC - integer :: i - fit_1_erf_x_2 = 0.d0 - do i = 1, n_max_fit_slat - fit_1_erf_x_2 += dexp(-expo_gauss_1_erf_x_2(i) *x*x) * coef_gauss_1_erf_x_2(i) - enddo + + BEGIN_DOC + ! + ! fit_1_erf_x_2(x) = \sum_i c_i exp (-alpha_i x^2) \approx (1 - erf(mu*x))^2 + ! + END_DOC + + implicit none + double precision, intent(in) :: x + integer :: i + + fit_1_erf_x_2 = 0.d0 + do i = 1, n_max_fit_slat + fit_1_erf_x_2 += dexp(-expo_gauss_1_erf_x_2(i) *x*x) * coef_gauss_1_erf_x_2(i) + enddo end -subroutine inv_r_times_poly(r, dist_r, dist_vec, poly) - implicit none - BEGIN_DOC -! returns -! -! poly(1) = x / sqrt(x^2+y^2+z^2), poly(2) = y / sqrt(x^2+y^2+z^2), poly(3) = z / sqrt(x^2+y^2+z^2) -! -! with the arguments -! -! r(1) = x, r(2) = y, r(3) = z, dist_r = sqrt(x^2+y^2+z^2) -! -! dist_vec(1) = sqrt(y^2+z^2), dist_vec(2) = sqrt(x^2+z^2), dist_vec(3) = sqrt(x^2+y^2) - END_DOC - double precision, intent(in) :: r(3), dist_r, dist_vec(3) - double precision, intent(out):: poly(3) - double precision :: inv_dist - integer :: i - if (dist_r.gt. 1.d-8)then - inv_dist = 1.d0/dist_r - do i = 1, 3 - poly(i) = r(i) * inv_dist - enddo - else - do i = 1, 3 - if(dabs(r(i)).lt.dist_vec(i))then - inv_dist = 1.d0/dist_r - poly(i) = r(i) * inv_dist - else !if(dabs(r(i)))then - poly(i) = 1.d0 -! poly(i) = 0.d0 - endif - enddo - endif -end +! --- diff --git a/plugins/local/ao_many_one_e_ints/fit_slat_gauss.irp.f b/plugins/local/jastrow/fit_slat_gauss.irp.f similarity index 100% rename from plugins/local/ao_many_one_e_ints/fit_slat_gauss.irp.f rename to plugins/local/jastrow/fit_slat_gauss.irp.f diff --git a/plugins/local/jastrow/jast_1e_param.irp.f b/plugins/local/jastrow/jast_1e_param.irp.f new file mode 100644 index 00000000..eca150be --- /dev/null +++ b/plugins/local/jastrow/jast_1e_param.irp.f @@ -0,0 +1,104 @@ + +! --- + + BEGIN_PROVIDER [double precision, j1e_expo, (j1e_size, nucl_num)] +&BEGIN_PROVIDER [double precision, j1e_coef, (j1e_size, nucl_num)] + + BEGIN_DOC + ! + ! parameters of the 1e-Jastrow + ! + END_DOC + + implicit none + logical :: exists + integer :: i, j + integer :: ierr + + PROVIDE ezfio_filename + + ! --- + + if (mpi_master) then + call ezfio_has_jastrow_j1e_expo(exists) + endif + + IRP_IF MPI_DEBUG + print *, irp_here, mpi_rank + call MPI_BARRIER(MPI_COMM_WORLD, ierr) + IRP_ENDIF + + IRP_IF MPI + include 'mpif.h' + call MPI_BCAST(j1e_expo, (j1e_size*nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read j1e_expo with MPI' + endif + IRP_ENDIF + + if (exists) then + if (mpi_master) then + write(6,'(A)') '.. >>>>> [ IO READ: j1e_expo ] <<<<< ..' + call ezfio_get_jastrow_j1e_expo(j1e_expo) + IRP_IF MPI + call MPI_BCAST(j1e_expo, (j1e_size*nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read j1e_expo with MPI' + endif + IRP_ENDIF + endif + else + + j1e_expo = 1.d0 + call ezfio_set_jastrow_j1e_expo(j1e_expo) + endif + + ! --- + + if (mpi_master) then + call ezfio_has_jastrow_j1e_coef(exists) + endif + + IRP_IF MPI_DEBUG + print *, irp_here, mpi_rank + call MPI_BARRIER(MPI_COMM_WORLD, ierr) + IRP_ENDIF + + IRP_IF MPI + call MPI_BCAST(j1e_coef, (j1e_size*nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read j1e_coef with MPI' + endif + IRP_ENDIF + + if (exists) then + if (mpi_master) then + write(6,'(A)') '.. >>>>> [ IO READ: j1e_coef ] <<<<< ..' + call ezfio_get_jastrow_j1e_coef(j1e_coef) + IRP_IF MPI + call MPI_BCAST(j1e_coef, (j1e_size*nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read j1e_coef with MPI' + endif + IRP_ENDIF + endif + else + + j1e_coef = 0.d0 + call ezfio_set_jastrow_j1e_coef(j1e_coef) + endif + + ! --- + + print *, ' parameters of the 1e-Jastrow' + do i = 1, nucl_num + print*, ' for Z = ', nucl_charge(i) + do j = 1, j1e_size + write(*,'(I4, 2x, 2(E15.7, 2X))') j, j1e_coef(j,i), j1e_expo(j,i) + enddo + enddo + +END_PROVIDER + +! --- + diff --git a/plugins/local/jastrow/listj1b.irp.f b/plugins/local/jastrow/listj1b.irp.f new file mode 100644 index 00000000..49954d47 --- /dev/null +++ b/plugins/local/jastrow/listj1b.irp.f @@ -0,0 +1,371 @@ + +! --- + +BEGIN_PROVIDER [integer, List_env1s_size] + + implicit none + + PROVIDE env_type + + if(env_type .eq. "None") then + + List_env1s_size = 1 + + elseif(env_type .eq. "Prod_Gauss") then + + List_env1s_size = 2**nucl_num + + elseif(env_type .eq. "Sum_Gauss") then + + List_env1s_size = nucl_num + 1 + + else + + print *, ' Error in List_env1s_size: Unknown env_type = ', env_type + stop + + endif + + print *, ' nb of 1s-Gaussian in the envelope = ', List_env1s_size + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [integer, List_env1s, (nucl_num, List_env1s_size)] + + implicit none + integer :: i, j + + if(nucl_num .gt. 32) then + print *, ' nucl_num = ', nucl_num, '> 32' + stop + endif + + List_env1s = 0 + + do i = 0, List_env1s_size-1 + do j = 0, nucl_num-1 + if (btest(i,j)) then + List_env1s(j+1,i+1) = 1 + endif + enddo + enddo + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [ double precision, List_env1s_coef, ( List_env1s_size)] +&BEGIN_PROVIDER [ double precision, List_env1s_expo, ( List_env1s_size)] +&BEGIN_PROVIDER [ double precision, List_env1s_cent, (3, List_env1s_size)] + + implicit none + integer :: i, j, k, phase + double precision :: tmp_alphaj, tmp_alphak + double precision :: tmp_cent_x, tmp_cent_y, tmp_cent_z + + provide env_type env_expo env_coef + + if(env_type .eq. "None") then + + List_env1s_coef( 1) = 1.d0 + List_env1s_expo( 1) = 0.d0 + List_env1s_cent(1:3,1) = 0.d0 + + elseif(env_type .eq. "Prod_Gauss") then + + List_env1s_coef = 0.d0 + List_env1s_expo = 0.d0 + List_env1s_cent = 0.d0 + + do i = 1, List_env1s_size + + tmp_cent_x = 0.d0 + tmp_cent_y = 0.d0 + tmp_cent_z = 0.d0 + do j = 1, nucl_num + tmp_alphaj = dble(List_env1s(j,i)) * env_expo(j) + List_env1s_expo(i) += tmp_alphaj + tmp_cent_x += tmp_alphaj * nucl_coord(j,1) + tmp_cent_y += tmp_alphaj * nucl_coord(j,2) + tmp_cent_z += tmp_alphaj * nucl_coord(j,3) + enddo + + if(List_env1s_expo(i) .lt. 1d-10) cycle + + List_env1s_cent(1,i) = tmp_cent_x / List_env1s_expo(i) + List_env1s_cent(2,i) = tmp_cent_y / List_env1s_expo(i) + List_env1s_cent(3,i) = tmp_cent_z / List_env1s_expo(i) + enddo + + ! --- + + do i = 1, List_env1s_size + + do j = 2, nucl_num, 1 + tmp_alphaj = dble(List_env1s(j,i)) * env_expo(j) + do k = 1, j-1, 1 + tmp_alphak = dble(List_env1s(k,i)) * env_expo(k) + + List_env1s_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & + + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & + + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) + enddo + enddo + + if(List_env1s_expo(i) .lt. 1d-10) cycle + + List_env1s_coef(i) = List_env1s_coef(i) / List_env1s_expo(i) + enddo + + ! --- + + do i = 1, List_env1s_size + + phase = 0 + do j = 1, nucl_num + phase += List_env1s(j,i) + enddo + + List_env1s_coef(i) = (-1.d0)**dble(phase) * dexp(-List_env1s_coef(i)) + enddo + + elseif(env_type .eq. "Sum_Gauss") then + + List_env1s_coef( 1) = 1.d0 + List_env1s_expo( 1) = 0.d0 + List_env1s_cent(1:3,1) = 0.d0 + do i = 1, nucl_num + List_env1s_coef( i+1) = -1.d0 * env_coef(i) + List_env1s_expo( i+1) = env_expo(i) + List_env1s_cent(1,i+1) = nucl_coord(i,1) + List_env1s_cent(2,i+1) = nucl_coord(i,2) + List_env1s_cent(3,i+1) = nucl_coord(i,3) + enddo + + else + + print *, ' Error in List_env1s: Unknown env_type = ', env_type + stop + + endif + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [integer, List_env1s_square_size] + + implicit none + double precision :: tmp + + if(env_type .eq. "None") then + + List_env1s_square_size = 1 + + elseif(env_type .eq. "Prod_Gauss") then + + List_env1s_square_size = 3**nucl_num + + elseif(env_type .eq. "Sum_Gauss") then + + tmp = 0.5d0 * dble(nucl_num) * (dble(nucl_num) + 3.d0) + List_env1s_square_size = int(tmp) + 1 + + else + + print *, ' Error in List_env1s_square_size: Unknown env_type = ', env_type + stop + + endif + + print *, ' nb of 1s-Gaussian in the square of envelope = ', List_env1s_square_size + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [integer, List_env1s_square, (nucl_num, List_env1s_square_size)] + + implicit none + integer :: i, j, ii, jj + integer, allocatable :: M(:,:), p(:) + + if(nucl_num .gt. 32) then + print *, ' nucl_num = ', nucl_num, '> 32' + stop + endif + + List_env1s_square(:,:) = 0 + List_env1s_square(:,List_env1s_square_size) = 2 + + allocate(p(nucl_num)) + p = 0 + + do i = 2, List_env1s_square_size-1 + do j = 1, nucl_num + + ii = 0 + do jj = 1, j-1, 1 + ii = ii + p(jj) * 3**(jj-1) + enddo + p(j) = modulo(i-1-ii, 3**j) / 3**(j-1) + + List_env1s_square(j,i) = p(j) + enddo + enddo + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [ double precision, List_env1s_square_coef, ( List_env1s_square_size)] +&BEGIN_PROVIDER [ double precision, List_env1s_square_expo, ( List_env1s_square_size)] +&BEGIN_PROVIDER [ double precision, List_env1s_square_cent, (3, List_env1s_square_size)] + + implicit none + integer :: i, j, k, phase + integer :: ii + double precision :: tmp_alphaj, tmp_alphak, facto + double precision :: tmp1, tmp2, tmp3, tmp4 + double precision :: xi, yi, zi, xj, yj, zj + double precision :: dx, dy, dz, r2 + + provide env_type env_expo env_coef + + if(env_type .eq. "None") then + + List_env1s_square_coef( 1) = 1.d0 + List_env1s_square_expo( 1) = 0.d0 + List_env1s_square_cent(1:3,1) = 0.d0 + + elseif(env_type .eq. "Prod_Gauss") then + + List_env1s_square_coef = 0.d0 + List_env1s_square_expo = 0.d0 + List_env1s_square_cent = 0.d0 + + do i = 1, List_env1s_square_size + + do j = 1, nucl_num + tmp_alphaj = dble(List_env1s_square(j,i)) * env_expo(j) + List_env1s_square_expo(i) += tmp_alphaj + List_env1s_square_cent(1,i) += tmp_alphaj * nucl_coord(j,1) + List_env1s_square_cent(2,i) += tmp_alphaj * nucl_coord(j,2) + List_env1s_square_cent(3,i) += tmp_alphaj * nucl_coord(j,3) + + enddo + + if(List_env1s_square_expo(i) .lt. 1d-10) cycle + + List_env1s_square_cent(1,i) = List_env1s_square_cent(1,i) / List_env1s_square_expo(i) + List_env1s_square_cent(2,i) = List_env1s_square_cent(2,i) / List_env1s_square_expo(i) + List_env1s_square_cent(3,i) = List_env1s_square_cent(3,i) / List_env1s_square_expo(i) + enddo + + ! --- + + do i = 1, List_env1s_square_size + + do j = 2, nucl_num, 1 + tmp_alphaj = dble(List_env1s_square(j,i)) * env_expo(j) + do k = 1, j-1, 1 + tmp_alphak = dble(List_env1s_square(k,i)) * env_expo(k) + + List_env1s_square_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & + + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & + + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) + enddo + enddo + + if(List_env1s_square_expo(i) .lt. 1d-10) cycle + + List_env1s_square_coef(i) = List_env1s_square_coef(i) / List_env1s_square_expo(i) + enddo + + ! --- + + do i = 1, List_env1s_square_size + + facto = 1.d0 + phase = 0 + do j = 1, nucl_num + tmp_alphaj = dble(List_env1s_square(j,i)) + + facto *= 2.d0 / (gamma(tmp_alphaj+1.d0) * gamma(3.d0-tmp_alphaj)) + phase += List_env1s_square(j,i) + enddo + + List_env1s_square_coef(i) = (-1.d0)**dble(phase) * facto * dexp(-List_env1s_square_coef(i)) + enddo + + elseif(env_type .eq. "Sum_Gauss") then + + ii = 1 + List_env1s_square_coef( ii) = 1.d0 + List_env1s_square_expo( ii) = 0.d0 + List_env1s_square_cent(1:3,ii) = 0.d0 + + do i = 1, nucl_num + ii = ii + 1 + List_env1s_square_coef( ii) = -2.d0 * env_coef(i) + List_env1s_square_expo( ii) = env_expo(i) + List_env1s_square_cent(1,ii) = nucl_coord(i,1) + List_env1s_square_cent(2,ii) = nucl_coord(i,2) + List_env1s_square_cent(3,ii) = nucl_coord(i,3) + enddo + + do i = 1, nucl_num + ii = ii + 1 + List_env1s_square_coef( ii) = 1.d0 * env_coef(i) * env_coef(i) + List_env1s_square_expo( ii) = 2.d0 * env_expo(i) + List_env1s_square_cent(1,ii) = nucl_coord(i,1) + List_env1s_square_cent(2,ii) = nucl_coord(i,2) + List_env1s_square_cent(3,ii) = nucl_coord(i,3) + enddo + + do i = 1, nucl_num-1 + + tmp1 = env_expo(i) + + xi = nucl_coord(i,1) + yi = nucl_coord(i,2) + zi = nucl_coord(i,3) + + do j = i+1, nucl_num + + tmp2 = env_expo(j) + tmp3 = tmp1 + tmp2 + tmp4 = 1.d0 / tmp3 + + xj = nucl_coord(j,1) + yj = nucl_coord(j,2) + zj = nucl_coord(j,3) + + dx = xi - xj + dy = yi - yj + dz = zi - zj + r2 = dx*dx + dy*dy + dz*dz + + ii = ii + 1 + ! x 2 to avoid doing integrals twice + List_env1s_square_coef( ii) = 2.d0 * dexp(-tmp1*tmp2*tmp4*r2) * env_coef(i) * env_coef(j) + List_env1s_square_expo( ii) = tmp3 + List_env1s_square_cent(1,ii) = tmp4 * (tmp1 * xi + tmp2 * xj) + List_env1s_square_cent(2,ii) = tmp4 * (tmp1 * yi + tmp2 * yj) + List_env1s_square_cent(3,ii) = tmp4 * (tmp1 * zi + tmp2 * zj) + enddo + enddo + + else + + print *, ' Error in List_env1s_square: Unknown env_type = ', env_type + stop + + endif + +END_PROVIDER + +! --- + diff --git a/plugins/local/non_h_ints_mu/NEED b/plugins/local/non_h_ints_mu/NEED index c44c65af..48c1c24b 100644 --- a/plugins/local/non_h_ints_mu/NEED +++ b/plugins/local/non_h_ints_mu/NEED @@ -1,4 +1,5 @@ qmckl +hamiltonian jastrow ao_tc_eff_map bi_ortho_mos diff --git a/plugins/local/non_h_ints_mu/deb_aos.irp.f b/plugins/local/non_h_ints_mu/deb_aos.irp.f new file mode 100644 index 00000000..c9bc9c9a --- /dev/null +++ b/plugins/local/non_h_ints_mu/deb_aos.irp.f @@ -0,0 +1,56 @@ + +! --- + +program deb_Aos + + implicit none + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + if(tc_integ_type .eq. "numeric") then + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + endif + + call print_aos() + +end + +! --- + +subroutine print_aos() + + implicit none + integer :: i, ipoint + double precision :: r(3) + double precision :: ao_val, ao_der(3), ao_lap + + PROVIDE final_grid_points aos_in_r_array aos_grad_in_r_array aos_lapl_in_r_array + + do ipoint = 1, n_points_final_grid + r(:) = final_grid_points(:,ipoint) + print*, r + enddo + + do ipoint = 1, n_points_final_grid + r(:) = final_grid_points(:,ipoint) + do i = 1, ao_num + ao_val = aos_in_r_array (i,ipoint) + ao_der(:) = aos_grad_in_r_array(i,ipoint,:) + ao_lap = aos_lapl_in_r_array(1,i,ipoint) + aos_lapl_in_r_array(2,i,ipoint) + aos_lapl_in_r_array(3,i,ipoint) + write(*, '(5(f15.7, 3X))') ao_val, ao_der, ao_lap + enddo + enddo + + return +end + +! --- + diff --git a/plugins/local/non_h_ints_mu/debug_fit.irp.f b/plugins/local/non_h_ints_mu/debug_fit.irp.f index d3152836..d4b917ec 100644 --- a/plugins/local/non_h_ints_mu/debug_fit.irp.f +++ b/plugins/local/non_h_ints_mu/debug_fit.irp.f @@ -11,9 +11,12 @@ program debug_fit my_n_pt_a_grid = tc_grid1_a touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid - PROVIDE mu_erf j1b_pen + PROVIDE j2e_type mu_erf + PROVIDE j1e_type j1e_coef j1e_expo + PROVIDE env_type env_coef env_expo + provide tc_integ_type - if(j1b_type .ge. 100) then + if(tc_integ_type .eq. "numeric") then my_extra_grid_becke = .True. PROVIDE tc_grid2_a tc_grid2_r my_n_pt_r_extra_grid = tc_grid2_r @@ -21,12 +24,8 @@ program debug_fit touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid endif - !call test_j1b_nucl() - !call test_grad_j1b_nucl() - !call test_lapl_j1b_nucl() - - !call test_list_b2() - !call test_list_b3() + !call test_env_nucl() + !call test_grad_env_nucl() !call test_fit_u() !call test_fit_u2() @@ -38,17 +37,17 @@ end ! --- -subroutine test_j1b_nucl() +subroutine test_env_nucl() implicit none integer :: ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz double precision :: r(3) - double precision, external :: j1b_nucl + double precision, external :: env_nucl - print*, ' test_j1b_nucl ...' + print*, ' test_env_nucl ...' - PROVIDE v_1b + PROVIDE env_val eps_ij = 1d-7 acc_tot = 0.d0 @@ -60,11 +59,11 @@ subroutine test_j1b_nucl() r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) - i_exc = v_1b(ipoint) - i_num = j1b_nucl(r) + i_exc = env_val(ipoint) + i_num = env_nucl(r) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in v_1b on', ipoint + print *, ' problem in env_val on', ipoint print *, ' analyt = ', i_exc print *, ' numeri = ', i_num print *, ' diff = ', acc_ij @@ -78,23 +77,23 @@ subroutine test_j1b_nucl() print*, ' normalz = ', normalz return -end subroutine test_j1b_nucl +end ! --- -subroutine test_grad_j1b_nucl() +subroutine test_grad_env_nucl() implicit none integer :: ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz double precision :: r(3) - double precision, external :: grad_x_j1b_nucl_num - double precision, external :: grad_y_j1b_nucl_num - double precision, external :: grad_z_j1b_nucl_num + double precision, external :: grad_x_env_nucl_num + double precision, external :: grad_y_env_nucl_num + double precision, external :: grad_z_env_nucl_num - print*, ' test_grad_j1b_nucl ...' + PROVIDE env_grad - PROVIDE v_1b_grad + print*, ' test_grad_env_nucl ...' eps_ij = 1d-7 acc_tot = 0.d0 @@ -106,31 +105,31 @@ subroutine test_grad_j1b_nucl() r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) - i_exc = v_1b_grad(1,ipoint) - i_num = grad_x_j1b_nucl_num(r) + i_exc = env_grad(1,ipoint) + i_num = grad_x_env_nucl_num(r) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in x of v_1b_grad on', ipoint + print *, ' problem in x of env_grad on', ipoint print *, ' analyt = ', i_exc print *, ' numeri = ', i_num print *, ' diff = ', acc_ij endif - i_exc = v_1b_grad(2,ipoint) - i_num = grad_y_j1b_nucl_num(r) + i_exc = env_grad(2,ipoint) + i_num = grad_y_env_nucl_num(r) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in y of v_1b_grad on', ipoint + print *, ' problem in y of env_grad on', ipoint print *, ' analyt = ', i_exc print *, ' numeri = ', i_num print *, ' diff = ', acc_ij endif - i_exc = v_1b_grad(3,ipoint) - i_num = grad_z_j1b_nucl_num(r) + i_exc = env_grad(3,ipoint) + i_num = grad_z_env_nucl_num(r) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in z of v_1b_grad on', ipoint + print *, ' problem in z of env_grad on', ipoint print *, ' analyt = ', i_exc print *, ' numeri = ', i_num print *, ' diff = ', acc_ij @@ -144,278 +143,7 @@ subroutine test_grad_j1b_nucl() print*, ' normalz = ', normalz return -end subroutine test_grad_j1b_nucl - -! --- - -subroutine test_lapl_j1b_nucl() - - implicit none - integer :: ipoint - double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision :: r(3) - double precision, external :: lapl_j1b_nucl - - print*, ' test_lapl_j1b_nucl ...' - - PROVIDE v_1b_lapl - - eps_ij = 1d-5 - acc_tot = 0.d0 - normalz = 0.d0 - - do ipoint = 1, n_points_final_grid - - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - - i_exc = v_1b_lapl(ipoint) - i_num = lapl_j1b_nucl(r) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in v_1b_lapl on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_num) - enddo - - print*, ' acc_tot = ', acc_tot - print*, ' normalz = ', normalz - - return -end subroutine test_lapl_j1b_nucl - -! --- - -subroutine test_list_b2() - - implicit none - integer :: ipoint - double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision :: r(3) - double precision, external :: j1b_nucl - - print*, ' test_list_b2 ...' - - PROVIDE v_1b_list_b2 - - eps_ij = 1d-7 - acc_tot = 0.d0 - normalz = 0.d0 - - do ipoint = 1, n_points_final_grid - - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - - i_exc = v_1b_list_b2(ipoint) - i_num = j1b_nucl(r) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in list_b2 on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_num) - enddo - - print*, ' acc_tot = ', acc_tot - print*, ' normalz = ', normalz - - return -end subroutine test_list_b2 - -! --- - -subroutine test_list_b3() - - implicit none - integer :: ipoint - double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_tmp, i_num, normalz - double precision :: r(3) - double precision :: grad_num(3), eps_der, eps_lap, tmp_der, tmp_lap, i0, ip, im - double precision, external :: j1b_nucl_square - - print*, ' test_list_b3 ...' - - eps_ij = 1d-7 - - eps_der = 1d-5 - tmp_der = 0.5d0 / eps_der - - eps_lap = 1d-4 - tmp_lap = 1.d0 / (eps_lap*eps_lap) - - ! --- - - PROVIDE v_1b_list_b3 - - acc_tot = 0.d0 - normalz = 0.d0 - do ipoint = 1, n_points_final_grid - - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - - i_exc = v_1b_list_b3(ipoint) - i_num = j1b_nucl_square(r) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in list_b3 on', ipoint - print *, ' r = ', r - print *, ' r2 = ', r(1)*r(1) + r(2)*r(2) + r(3)*r(3) - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_num) - enddo - - print*, ' acc_tot on val = ', acc_tot - print*, ' normalz on val = ', normalz - - ! --- - - PROVIDE v_1b_square_grad - - acc_tot = 0.d0 - normalz = 0.d0 - do ipoint = 1, n_points_final_grid - - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - - i_exc = v_1b_square_grad(ipoint,1) - r(1) = r(1) + eps_der - ip = j1b_nucl_square(r) - r(1) = r(1) - 2.d0 * eps_der - im = j1b_nucl_square(r) - r(1) = r(1) + eps_der - i_num = tmp_der * (ip - im) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in grad_x list_b3 on', ipoint - print *, ' r = ', r - print *, ' r2 = ', r(1)*r(1) + r(2)*r(2) + r(3)*r(3) - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - acc_tot += acc_ij - normalz += dabs(i_num) - - i_exc = v_1b_square_grad(ipoint,2) - r(2) = r(2) + eps_der - ip = j1b_nucl_square(r) - r(2) = r(2) - 2.d0 * eps_der - im = j1b_nucl_square(r) - r(2) = r(2) + eps_der - i_num = tmp_der * (ip - im) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in grad_y list_b3 on', ipoint - print *, ' r = ', r - print *, ' r2 = ', r(1)*r(1) + r(2)*r(2) + r(3)*r(3) - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - acc_tot += acc_ij - normalz += dabs(i_num) - - i_exc = v_1b_square_grad(ipoint,3) - r(3) = r(3) + eps_der - ip = j1b_nucl_square(r) - r(3) = r(3) - 2.d0 * eps_der - im = j1b_nucl_square(r) - r(3) = r(3) + eps_der - i_num = tmp_der * (ip - im) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in grad_z list_b3 on', ipoint - print *, ' r = ', r - print *, ' r2 = ', r(1)*r(1) + r(2)*r(2) + r(3)*r(3) - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - acc_tot += acc_ij - normalz += dabs(i_num) - enddo - - print*, ' acc_tot on grad = ', acc_tot - print*, ' normalz on grad = ', normalz - - ! --- - - PROVIDE v_1b_square_lapl - - acc_tot = 0.d0 - normalz = 0.d0 - do ipoint = 1, n_points_final_grid - - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - i0 = j1b_nucl_square(r) - - i_exc = v_1b_square_lapl(ipoint) - - r(1) = r(1) + eps_lap - ip = j1b_nucl_square(r) - r(1) = r(1) - 2.d0 * eps_lap - im = j1b_nucl_square(r) - r(1) = r(1) + eps_lap - i_num = tmp_lap * (ip - 2.d0 * i0 + im) - - r(2) = r(2) + eps_lap - ip = j1b_nucl_square(r) - r(2) = r(2) - 2.d0 * eps_lap - im = j1b_nucl_square(r) - r(2) = r(2) + eps_lap - i_num = i_num + tmp_lap * (ip - 2.d0 * i0 + im) - - r(3) = r(3) + eps_lap - ip = j1b_nucl_square(r) - r(3) = r(3) - 2.d0 * eps_lap - im = j1b_nucl_square(r) - r(3) = r(3) + eps_lap - i_num = i_num + tmp_lap * (ip - 2.d0 * i0 + im) - - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in lapl list_b3 on', ipoint - print *, ' r = ', r - print *, ' r2 = ', r(1)*r(1) + r(2)*r(2) + r(3)*r(3) - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_num) - enddo - - print*, ' acc_tot on lapl = ', acc_tot - print*, ' normalz on lapl = ', normalz - - ! --- - - return -end subroutine test_list_b3 +end ! --- @@ -516,7 +244,7 @@ subroutine test_fit_ugradu() enddo return -end subroutine test_fit_ugradu +end ! --- @@ -582,7 +310,7 @@ subroutine test_fit_u() enddo return -end subroutine test_fit_u +end ! --- @@ -649,7 +377,7 @@ subroutine test_fit_u2() enddo return -end subroutine test_fit_u2 +end ! --- @@ -673,10 +401,10 @@ subroutine test_grad1_u12_withsq_num() do ipoint = 1, n_points_final_grid - call get_grad1_u12_withsq_r1_seq(final_grid_points(1,ipoint), n_points_extra_final_grid, tmp_grad1_u12(1,ipoint,1) & - , tmp_grad1_u12(1,ipoint,2) & - , tmp_grad1_u12(1,ipoint,3) & - , tmp_grad1_u12_squared(1,ipoint)) + call get_grad1_u12_withsq_r1_seq(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,ipoint,1) & + , tmp_grad1_u12(1,ipoint,2) & + , tmp_grad1_u12(1,ipoint,3) & + , tmp_grad1_u12_squared(1,ipoint)) do jpoint = 1, n_points_extra_final_grid i_exc = grad1_u12_squared_num(jpoint,ipoint) @@ -714,7 +442,7 @@ subroutine test_grad1_u12_withsq_num() print*, ' accuracy (%) = ', 100.d0 * acc_tot / normalz return -end subroutine test_grad1_u12_withsq_num +end ! --- diff --git a/plugins/local/non_h_ints_mu/debug_integ_jmu_modif.irp.f b/plugins/local/non_h_ints_mu/debug_integ_jmu_modif.irp.f index b9e8df25..8d3a163c 100644 --- a/plugins/local/non_h_ints_mu/debug_integ_jmu_modif.irp.f +++ b/plugins/local/non_h_ints_mu/debug_integ_jmu_modif.irp.f @@ -11,40 +11,40 @@ program debug_integ_jmu_modif my_n_pt_a_grid = tc_grid1_a touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid - PROVIDE mu_erf j1b_pen + PROVIDE mu_erf -! call test_v_ij_u_cst_mu_j1b() -! call test_v_ij_erf_rk_cst_mu_j1b() -! call test_x_v_ij_erf_rk_cst_mu_j1b() -! call test_int2_u2_j1b2() -! call test_int2_grad1u2_grad2u2_j1b2() -! call test_int2_u_grad1u_total_j1b2() +! call test_v_ij_u_cst_mu_env() +! call test_v_ij_erf_rk_cst_mu_env() +! call test_x_v_ij_erf_rk_cst_mu_env() +! call test_int2_u2_env2() +! call test_int2_grad1u2_grad2u2_env2() +! call test_int2_u_grad1u_total_env2() ! -! call test_int2_grad1_u12_ao() +! call test_int2_grad1_u12_ao_num() ! ! call test_grad12_j12() - call test_tchint_rsdft() -! call test_u12sq_j1bsq() -! call test_u12_grad1_u12_j1b_grad1_j1b() -! !call test_gradu_squared_u_ij_mu() +! call test_u12sq_envsq() +! call test_u12_grad1_u12_env_grad1_env() !call test_vect_overlap_gauss_r12_ao() !call test_vect_overlap_gauss_r12_ao_with1s() + !call test_Ir2_Mu_long_Du_0() + end ! --- -subroutine test_v_ij_u_cst_mu_j1b() +subroutine test_v_ij_u_cst_mu_env() implicit none integer :: i, j, ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision, external :: num_v_ij_u_cst_mu_j1b + double precision, external :: num_v_ij_u_cst_mu_env - print*, ' test_v_ij_u_cst_mu_j1b ...' + print*, ' test_v_ij_u_cst_mu_env ...' - PROVIDE v_ij_u_cst_mu_j1b_fit + PROVIDE v_ij_u_cst_mu_env_fit eps_ij = 1d-3 acc_tot = 0.d0 @@ -54,11 +54,11 @@ subroutine test_v_ij_u_cst_mu_j1b() do j = 1, ao_num do i = 1, ao_num - i_exc = v_ij_u_cst_mu_j1b_fit(i,j,ipoint) - i_num = num_v_ij_u_cst_mu_j1b (i,j,ipoint) + i_exc = v_ij_u_cst_mu_env_fit(i,j,ipoint) + i_num = num_v_ij_u_cst_mu_env (i,j,ipoint) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in v_ij_u_cst_mu_j1b_fit on', i, j, ipoint + print *, ' problem in v_ij_u_cst_mu_env_fit on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -70,24 +70,23 @@ subroutine test_v_ij_u_cst_mu_j1b() enddo enddo - print*, ' acc_tot = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot (%) = ', 100.d0 * acc_tot / normalz return -end subroutine test_v_ij_u_cst_mu_j1b +end ! --- -subroutine test_v_ij_erf_rk_cst_mu_j1b() +subroutine test_v_ij_erf_rk_cst_mu_env() implicit none integer :: i, j, ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision, external :: num_v_ij_erf_rk_cst_mu_j1b + double precision, external :: num_v_ij_erf_rk_cst_mu_env - print*, ' test_v_ij_erf_rk_cst_mu_j1b ...' + print*, ' test_v_ij_erf_rk_cst_mu_env ...' - PROVIDE v_ij_erf_rk_cst_mu_j1b + PROVIDE v_ij_erf_rk_cst_mu_env eps_ij = 1d-3 acc_tot = 0.d0 @@ -98,11 +97,11 @@ subroutine test_v_ij_erf_rk_cst_mu_j1b() do j = 1, ao_num do i = 1, ao_num - i_exc = v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) - i_num = num_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) + i_exc = v_ij_erf_rk_cst_mu_env(i,j,ipoint) + i_num = num_v_ij_erf_rk_cst_mu_env(i,j,ipoint) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in v_ij_erf_rk_cst_mu_j1b on', i, j, ipoint + print *, ' problem in v_ij_erf_rk_cst_mu_env on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -118,20 +117,20 @@ subroutine test_v_ij_erf_rk_cst_mu_j1b() print*, ' normalz = ', normalz return -end subroutine test_v_ij_erf_rk_cst_mu_j1b +end ! --- -subroutine test_x_v_ij_erf_rk_cst_mu_j1b() +subroutine test_x_v_ij_erf_rk_cst_mu_env() implicit none integer :: i, j, ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz double precision :: integ(3) - print*, ' test_x_v_ij_erf_rk_cst_mu_j1b ...' + print*, ' test_x_v_ij_erf_rk_cst_mu_env ...' - PROVIDE x_v_ij_erf_rk_cst_mu_j1b + PROVIDE x_v_ij_erf_rk_cst_mu_env eps_ij = 1d-3 acc_tot = 0.d0 @@ -142,13 +141,13 @@ subroutine test_x_v_ij_erf_rk_cst_mu_j1b() do j = 1, ao_num do i = 1, ao_num - call num_x_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint, integ) + call num_x_v_ij_erf_rk_cst_mu_env(i, j, ipoint, integ) - i_exc = x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) + i_exc = x_v_ij_erf_rk_cst_mu_env(i,j,ipoint,1) i_num = integ(1) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in x part of x_v_ij_erf_rk_cst_mu_j1b on', i, j, ipoint + print *, ' problem in x part of x_v_ij_erf_rk_cst_mu_env on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -156,11 +155,11 @@ subroutine test_x_v_ij_erf_rk_cst_mu_j1b() acc_tot += acc_ij normalz += dabs(i_num) - i_exc = x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) + i_exc = x_v_ij_erf_rk_cst_mu_env(i,j,ipoint,2) i_num = integ(2) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in y part of x_v_ij_erf_rk_cst_mu_j1b on', i, j, ipoint + print *, ' problem in y part of x_v_ij_erf_rk_cst_mu_env on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -168,11 +167,11 @@ subroutine test_x_v_ij_erf_rk_cst_mu_j1b() acc_tot += acc_ij normalz += dabs(i_num) - i_exc = x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + i_exc = x_v_ij_erf_rk_cst_mu_env(i,j,ipoint,3) i_num = integ(3) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in z part of x_v_ij_erf_rk_cst_mu_j1b on', i, j, ipoint + print *, ' problem in z part of x_v_ij_erf_rk_cst_mu_env on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -188,35 +187,34 @@ subroutine test_x_v_ij_erf_rk_cst_mu_j1b() print*, ' normalz = ', normalz return -end subroutine test_x_v_ij_erf_rk_cst_mu_j1b +end ! --- -subroutine test_int2_u2_j1b2() +subroutine test_int2_u2_env2() implicit none integer :: i, j, ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision, external :: num_int2_u2_j1b2 + double precision, external :: num_int2_u2_env2 - print*, ' test_int2_u2_j1b2 ...' + print*, ' test_int2_u2_env2 ...' - PROVIDE int2_u2_j1b2 + PROVIDE int2_u2_env2 eps_ij = 1d-3 acc_tot = 0.d0 normalz = 0.d0 - !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid do j = 1, ao_num do i = 1, ao_num - i_exc = int2_u2_j1b2(i,j,ipoint) - i_num = num_int2_u2_j1b2(i,j,ipoint) + i_exc = int2_u2_env2(i,j,ipoint) + i_num = num_int2_u2_env2(i,j,ipoint) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in int2_u2_j1b2 on', i, j, ipoint + print *, ' problem in int2_u2_env2 on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -233,20 +231,20 @@ subroutine test_int2_u2_j1b2() print*, ' normalz = ', normalz return -end subroutine test_int2_u2_j1b2 +end ! --- -subroutine test_int2_grad1u2_grad2u2_j1b2() +subroutine test_int2_grad1u2_grad2u2_env2() implicit none integer :: i, j, ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision, external :: num_int2_grad1u2_grad2u2_j1b2 + double precision, external :: num_int2_grad1u2_grad2u2_env2 - print*, ' test_int2_grad1u2_grad2u2_j1b2 ...' + print*, ' test_int2_grad1u2_grad2u2_env2 ...' - PROVIDE int2_grad1u2_grad2u2_j1b2 + PROVIDE int2_grad1u2_grad2u2_env2 eps_ij = 1d-3 acc_tot = 0.d0 @@ -257,11 +255,11 @@ subroutine test_int2_grad1u2_grad2u2_j1b2() do j = 1, ao_num do i = 1, ao_num - i_exc = int2_grad1u2_grad2u2_j1b2(i,j,ipoint) - i_num = num_int2_grad1u2_grad2u2_j1b2(i,j,ipoint) + i_exc = int2_grad1u2_grad2u2_env2(i,j,ipoint) + i_num = num_int2_grad1u2_grad2u2_env2(i,j,ipoint) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in int2_grad1u2_grad2u2_j1b2 on', i, j, ipoint + print *, ' problem in int2_grad1u2_grad2u2_env2 on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -277,18 +275,18 @@ subroutine test_int2_grad1u2_grad2u2_j1b2() print*, ' normalz = ', normalz return -end subroutine test_int2_grad1u2_grad2u2_j1b2 +end ! --- -subroutine test_int2_grad1_u12_ao() +subroutine test_int2_grad1_u12_ao_num() implicit none integer :: i, j, ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz double precision :: integ(3) - print*, ' test_int2_grad1_u12_ao ...' + print*, ' test_int2_grad1_u12_ao_num ...' PROVIDE int2_grad1_u12_ao @@ -346,11 +344,11 @@ subroutine test_int2_grad1_u12_ao() print*, ' normalz = ', normalz return -end subroutine test_int2_grad1_u12_ao +end ! --- -subroutine test_int2_u_grad1u_total_j1b2() +subroutine test_int2_u_grad1u_total_env2() implicit none integer :: i, j, ipoint @@ -358,10 +356,10 @@ subroutine test_int2_u_grad1u_total_j1b2() double precision :: x, y, z double precision :: integ(3) - print*, ' test_int2_u_grad1u_total_j1b2 ...' + print*, ' test_int2_u_grad1u_total_env2 ...' - PROVIDE int2_u_grad1u_j1b2 - PROVIDE int2_u_grad1u_x_j1b2 + PROVIDE int2_u_grad1u_env2 + PROVIDE int2_u_grad1u_x_env2 eps_ij = 1d-3 acc_tot = 0.d0 @@ -376,13 +374,13 @@ subroutine test_int2_u_grad1u_total_j1b2() do j = 1, ao_num do i = 1, ao_num - call num_int2_u_grad1u_total_j1b2(i, j, ipoint, integ) + call num_int2_u_grad1u_total_env2(i, j, ipoint, integ) - i_exc = x * int2_u_grad1u_j1b2(i,j,ipoint) - int2_u_grad1u_x_j1b2(i,j,ipoint,1) + i_exc = x * int2_u_grad1u_env2(i,j,ipoint) - int2_u_grad1u_x_env2(i,j,ipoint,1) i_num = integ(1) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in x part of int2_u_grad1u_total_j1b2 on', i, j, ipoint + print *, ' problem in x part of int2_u_grad1u_total_env2 on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -390,11 +388,11 @@ subroutine test_int2_u_grad1u_total_j1b2() acc_tot += acc_ij normalz += dabs(i_num) - i_exc = y * int2_u_grad1u_j1b2(i,j,ipoint) - int2_u_grad1u_x_j1b2(i,j,ipoint,2) + i_exc = y * int2_u_grad1u_env2(i,j,ipoint) - int2_u_grad1u_x_env2(i,j,ipoint,2) i_num = integ(2) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in y part of int2_u_grad1u_total_j1b2 on', i, j, ipoint + print *, ' problem in y part of int2_u_grad1u_total_env2 on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -402,11 +400,11 @@ subroutine test_int2_u_grad1u_total_j1b2() acc_tot += acc_ij normalz += dabs(i_num) - i_exc = z * int2_u_grad1u_j1b2(i,j,ipoint) - int2_u_grad1u_x_j1b2(i,j,ipoint,3) + i_exc = z * int2_u_grad1u_env2(i,j,ipoint) - int2_u_grad1u_x_env2(i,j,ipoint,3) i_num = integ(3) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in z part of int2_u_grad1u_total_j1b2 on', i, j, ipoint + print *, ' problem in z part of int2_u_grad1u_total_env2 on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -422,109 +420,7 @@ subroutine test_int2_u_grad1u_total_j1b2() print*, ' normalz = ', normalz return -end subroutine test_int2_u_grad1u_total_j1b2 - -! --- - -subroutine test_gradu_squared_u_ij_mu() - - implicit none - integer :: i, j, ipoint - double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision, external :: num_gradu_squared_u_ij_mu - - print*, ' test_gradu_squared_u_ij_mu ...' - - PROVIDE gradu_squared_u_ij_mu - - eps_ij = 1d-3 - acc_tot = 0.d0 - normalz = 0.d0 - - do ipoint = 1, n_points_final_grid - do j = 1, ao_num - do i = 1, ao_num - - i_exc = gradu_squared_u_ij_mu(i,j,ipoint) - i_num = num_gradu_squared_u_ij_mu(i, j, ipoint) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in gradu_squared_u_ij_mu on', i, j, ipoint - print *, ' analyt integ = ', i_exc - print *, ' numeri integ = ', i_num - print *, ' diff = ', acc_ij - endif - acc_tot += acc_ij - normalz += dabs(i_num) - - enddo - enddo - enddo - - print*, ' acc_tot = ', acc_tot - print*, ' normalz = ', normalz - - return -end subroutine test_gradu_squared_u_ij_mu - -! --- - -subroutine test_tchint_rsdft() - - implicit none - integer :: i, j, m, ipoint, jpoint - double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision :: x(3), y(3), dj_1(3), dj_2(3), dj_3(3) - - print*, ' test rsdft_jastrow ...' - - PROVIDE grad1_u12_num - - eps_ij = 1d-4 - acc_tot = 0.d0 - normalz = 0.d0 - - do ipoint = 1, n_points_final_grid - x(1) = final_grid_points(1,ipoint) - x(2) = final_grid_points(2,ipoint) - x(3) = final_grid_points(3,ipoint) - - do jpoint = 1, n_points_extra_final_grid - y(1) = final_grid_points_extra(1,jpoint) - y(2) = final_grid_points_extra(2,jpoint) - y(3) = final_grid_points_extra(3,jpoint) - - dj_1(1) = grad1_u12_num(jpoint,ipoint,1) - dj_1(2) = grad1_u12_num(jpoint,ipoint,2) - dj_1(3) = grad1_u12_num(jpoint,ipoint,3) - - call get_tchint_rsdft_jastrow(x, y, dj_2) - - do m = 1, 3 - i_exc = dj_1(m) - i_num = dj_2(m) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem on', ipoint, jpoint, m - print *, ' x = ', x - print *, ' y = ', y - print *, ' exc, num, diff = ', i_exc, i_num, acc_ij - call grad1_jmu_modif_num(x, y, dj_3) - print *, ' check = ', dj_3(m) - stop - endif - - acc_tot += acc_ij - normalz += dabs(i_exc) - enddo - enddo - enddo - - print*, ' acc_tot = ', acc_tot - print*, ' normalz = ', normalz - - return -end subroutine test_tchint_rsdft +end ! --- @@ -567,20 +463,20 @@ subroutine test_grad12_j12() print*, ' normalz = ', normalz return -end subroutine test_grad12_j12 +end ! --- -subroutine test_u12sq_j1bsq() +subroutine test_u12sq_envsq() implicit none integer :: i, j, ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision, external :: num_u12sq_j1bsq + double precision, external :: num_u12sq_envsq - print*, ' test_u12sq_j1bsq ...' + print*, ' test_u12sq_envsq ...' - PROVIDE u12sq_j1bsq + PROVIDE u12sq_envsq eps_ij = 1d-3 acc_tot = 0.d0 @@ -590,11 +486,11 @@ subroutine test_u12sq_j1bsq() do j = 1, ao_num do i = 1, ao_num - i_exc = u12sq_j1bsq(i,j,ipoint) - i_num = num_u12sq_j1bsq(i, j, ipoint) + i_exc = u12sq_envsq(i,j,ipoint) + i_num = num_u12sq_envsq(i, j, ipoint) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in u12sq_j1bsq on', i, j, ipoint + print *, ' problem in u12sq_envsq on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -610,20 +506,20 @@ subroutine test_u12sq_j1bsq() print*, ' normalz = ', normalz return -end subroutine test_u12sq_j1bsq +end ! --- -subroutine test_u12_grad1_u12_j1b_grad1_j1b() +subroutine test_u12_grad1_u12_env_grad1_env() implicit none integer :: i, j, ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision, external :: num_u12_grad1_u12_j1b_grad1_j1b + double precision, external :: num_u12_grad1_u12_env_grad1_env - print*, ' test_u12_grad1_u12_j1b_grad1_j1b ...' + print*, ' test_u12_grad1_u12_env_grad1_env ...' - PROVIDE u12_grad1_u12_j1b_grad1_j1b + PROVIDE u12_grad1_u12_env_grad1_env eps_ij = 1d-3 acc_tot = 0.d0 @@ -633,11 +529,11 @@ subroutine test_u12_grad1_u12_j1b_grad1_j1b() do j = 1, ao_num do i = 1, ao_num - i_exc = u12_grad1_u12_j1b_grad1_j1b(i,j,ipoint) - i_num = num_u12_grad1_u12_j1b_grad1_j1b(i, j, ipoint) + i_exc = u12_grad1_u12_env_grad1_env(i,j,ipoint) + i_num = num_u12_grad1_u12_env_grad1_env(i, j, ipoint) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in u12_grad1_u12_j1b_grad1_j1b on', i, j, ipoint + print *, ' problem in u12_grad1_u12_env_grad1_env on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -653,7 +549,7 @@ subroutine test_u12_grad1_u12_j1b_grad1_j1b() print*, ' normalz = ', normalz return -end subroutine test_u12_grad1_u12_j1b_grad1_j1b +end ! --- @@ -670,7 +566,7 @@ subroutine test_vect_overlap_gauss_r12_ao() print *, ' test_vect_overlap_gauss_r12_ao ...' - provide mu_erf final_grid_points_transp j1b_pen + provide mu_erf final_grid_points_transp expo_fit = expo_gauss_j_mu_x_2(1) @@ -740,7 +636,7 @@ subroutine test_vect_overlap_gauss_r12_ao() print*, ' normalz = ', normalz return -end subroutine test_vect_overlap_gauss_r12_ao +end ! --- @@ -757,13 +653,13 @@ subroutine test_vect_overlap_gauss_r12_ao_with1s() print *, ' test_vect_overlap_gauss_r12_ao_with1s ...' - provide mu_erf final_grid_points_transp j1b_pen + provide mu_erf final_grid_points_transp expo_fit = expo_gauss_j_mu_x_2(1) - beta = List_all_comb_b3_expo (2) - B_center(1) = List_all_comb_b3_cent(1,2) - B_center(2) = List_all_comb_b3_cent(2,2) - B_center(3) = List_all_comb_b3_cent(3,2) + beta = List_env1s_square_expo (2) + B_center(1) = List_env1s_square_cent(1,2) + B_center(2) = List_env1s_square_cent(2,2) + B_center(3) = List_env1s_square_cent(3,2) ! --- @@ -831,5 +727,52 @@ subroutine test_vect_overlap_gauss_r12_ao_with1s() print*, ' normalz = ', normalz return -end subroutine test_vect_overlap_gauss_r12_ao +end + +! --- + +subroutine test_Ir2_Mu_long_Du_0() + + implicit none + integer :: i, j, ipoint + double precision :: i_old, i_new + double precision :: acc_ij, acc_tot, eps_ij, normalz + + print*, ' test_Ir2_Mu_long_Du_0 ...' + + PROVIDE v_ij_erf_rk_cst_mu_env + PROVIDE Ir2_Mu_long_Du_0 + + eps_ij = 1d-10 + acc_tot = 0.d0 + normalz = 0.d0 + + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + + i_old = v_ij_erf_rk_cst_mu_env(i,j,ipoint) + i_new = Ir2_Mu_long_Du_0 (i,j,ipoint) + + acc_ij = dabs(i_old - i_new) + if(acc_ij .gt. eps_ij) then + print *, ' problem in Ir2_Mu_long_Du_0 on', i, j, ipoint + print *, ' old integ = ', i_old + print *, ' new integ = ', i_new + print *, ' diff = ', acc_ij + stop + endif + + acc_tot += acc_ij + normalz += dabs(i_old) + enddo + enddo + enddo + + print*, ' acc_tot (%) = ', 100.d0 * acc_tot / normalz + + return +end + +! --- diff --git a/plugins/local/non_h_ints_mu/grad_squared.irp.f b/plugins/local/non_h_ints_mu/grad_squared.irp.f index 8c6d35dc..342e1fe7 100644 --- a/plugins/local/non_h_ints_mu/grad_squared.irp.f +++ b/plugins/local/non_h_ints_mu/grad_squared.irp.f @@ -1,224 +1,7 @@ ! --- -! TODO : strong optmization : write the loops in a different way -! : for each couple of AO, the gaussian product are done once for all - -BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num, n_points_final_grid) ] - - BEGIN_DOC - ! - ! if J(r1,r2) = u12: - ! - ! gradu_squared_u_ij_mu = -0.50 x \int r2 [ (grad_1 u12)^2 + (grad_2 u12^2)] \phi_i(2) \phi_j(2) - ! = -0.25 x \int r2 (1 - erf(mu*r12))^2 \phi_i(2) \phi_j(2) - ! and - ! (1 - erf(mu*r12))^2 = \sum_i coef_gauss_1_erf_x_2(i) * exp(-expo_gauss_1_erf_x_2(i) * r12^2) - ! - ! if J(r1,r2) = u12 x v1 x v2 - ! - ! gradu_squared_u_ij_mu = -0.50 x \int r2 \phi_i(2) \phi_j(2) [ v1^2 v2^2 ((grad_1 u12)^2 + (grad_2 u12^2)) + u12^2 v2^2 (grad_1 v1)^2 + 2 u12 v1 v2^2 (grad_1 u12) . (grad_1 v1) ] - ! = -0.25 x v1^2 \int r2 \phi_i(2) \phi_j(2) [1 - erf(mu r12)]^2 v2^2 - ! + -0.50 x (grad_1 v1)^2 \int r2 \phi_i(2) \phi_j(2) u12^2 v2^2 - ! + -1.00 x v1 (grad_1 v1) \int r2 \phi_i(2) \phi_j(2) (grad_1 u12) v2^2 - ! = v1^2 x int2_grad1u2_grad2u2_j1b2 - ! + -0.5 x (grad_1 v1)^2 x int2_u2_j1b2 - ! + -1.0 X V1 x (grad_1 v1) \cdot [ int2_u_grad1u_j1b2 x r - int2_u_grad1u_x_j1b ] - ! - ! - END_DOC - - implicit none - integer :: ipoint, i, j, m, igauss - double precision :: x, y, z, r(3), delta, coef - double precision :: tmp_v, tmp_x, tmp_y, tmp_z - double precision :: tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9 - double precision :: time0, time1 - double precision, external :: overlap_gauss_r12_ao - - print*, ' providing gradu_squared_u_ij_mu ...' - call wall_time(time0) - - PROVIDE j1b_type - - if(j1b_type .eq. 3) then - - do ipoint = 1, n_points_final_grid - - x = final_grid_points(1,ipoint) - y = final_grid_points(2,ipoint) - z = final_grid_points(3,ipoint) - tmp_v = v_1b (ipoint) - tmp_x = v_1b_grad(1,ipoint) - tmp_y = v_1b_grad(2,ipoint) - tmp_z = v_1b_grad(3,ipoint) - - tmp1 = tmp_v * tmp_v - tmp2 = -0.5d0 * (tmp_x * tmp_x + tmp_y * tmp_y + tmp_z * tmp_z) - tmp3 = tmp_v * tmp_x - tmp4 = tmp_v * tmp_y - tmp5 = tmp_v * tmp_z - - tmp6 = -x * tmp3 - tmp7 = -y * tmp4 - tmp8 = -z * tmp5 - - do j = 1, ao_num - do i = 1, ao_num - - tmp9 = int2_u_grad1u_j1b2(i,j,ipoint) - - gradu_squared_u_ij_mu(i,j,ipoint) = tmp1 * int2_grad1u2_grad2u2_j1b2(i,j,ipoint) & - + tmp2 * int2_u2_j1b2 (i,j,ipoint) & - + tmp6 * tmp9 + tmp3 * int2_u_grad1u_x_j1b2(i,j,ipoint,1) & - + tmp7 * tmp9 + tmp4 * int2_u_grad1u_x_j1b2(i,j,ipoint,2) & - + tmp8 * tmp9 + tmp5 * int2_u_grad1u_x_j1b2(i,j,ipoint,3) - enddo - enddo - enddo - - else - - gradu_squared_u_ij_mu = 0.d0 - do ipoint = 1, n_points_final_grid - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - do j = 1, ao_num - do i = 1, ao_num - do igauss = 1, n_max_fit_slat - delta = expo_gauss_1_erf_x_2(igauss) - coef = coef_gauss_1_erf_x_2(igauss) - gradu_squared_u_ij_mu(i,j,ipoint) += -0.25d0 * coef * overlap_gauss_r12_ao(r, delta, i, j) - enddo - enddo - enddo - enddo - - endif - - call wall_time(time1) - print*, ' Wall time for gradu_squared_u_ij_mu = ', time1 - time0 - -END_PROVIDER - -! --- - -!BEGIN_PROVIDER [double precision, tc_grad_square_ao_loop, (ao_num, ao_num, ao_num, ao_num)] -! -! BEGIN_DOC -! ! -! ! tc_grad_square_ao_loop(k,i,l,j) = -1/2 -! ! -! END_DOC -! -! implicit none -! integer :: ipoint, i, j, k, l -! double precision :: weight1, ao_ik_r, ao_i_r -! double precision, allocatable :: ac_mat(:,:,:,:) -! -! allocate(ac_mat(ao_num,ao_num,ao_num,ao_num)) -! ac_mat = 0.d0 -! -! do ipoint = 1, n_points_final_grid -! weight1 = final_weight_at_r_vector(ipoint) -! -! do i = 1, ao_num -! ao_i_r = weight1 * aos_in_r_array_transp(ipoint,i) -! -! do k = 1, ao_num -! ao_ik_r = ao_i_r * aos_in_r_array_transp(ipoint,k) -! -! do j = 1, ao_num -! do l = 1, ao_num -! ac_mat(k,i,l,j) += ao_ik_r * gradu_squared_u_ij_mu(l,j,ipoint) -! enddo -! enddo -! enddo -! enddo -! enddo -! -! do j = 1, ao_num -! do l = 1, ao_num -! do i = 1, ao_num -! do k = 1, ao_num -! tc_grad_square_ao_loop(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) -! !write(11,*) tc_grad_square_ao_loop(k,i,l,j) -! enddo -! enddo -! enddo -! enddo -! -! deallocate(ac_mat) -! -!END_PROVIDER - -! --- - -BEGIN_PROVIDER [double precision, tc_grad_square_ao_loop, (ao_num, ao_num, ao_num, ao_num)] - - BEGIN_DOC - ! - ! tc_grad_square_ao_loop(k,i,l,j) = 1/2 - ! - END_DOC - - implicit none - integer :: ipoint, i, j, k, l - double precision :: weight1, ao_ik_r, ao_i_r - double precision :: time0, time1 - double precision, allocatable :: ac_mat(:,:,:,:), bc_mat(:,:,:,:) - - print*, ' providing tc_grad_square_ao_loop ...' - call wall_time(time0) - - allocate(ac_mat(ao_num,ao_num,ao_num,ao_num)) - ac_mat = 0.d0 - allocate(bc_mat(ao_num,ao_num,ao_num,ao_num)) - bc_mat = 0.d0 - - do ipoint = 1, n_points_final_grid - weight1 = final_weight_at_r_vector(ipoint) - - do i = 1, ao_num - !ao_i_r = weight1 * aos_in_r_array_transp(ipoint,i) - ao_i_r = weight1 * aos_in_r_array(i,ipoint) - - do k = 1, ao_num - !ao_ik_r = ao_i_r * aos_in_r_array_transp(ipoint,k) - ao_ik_r = ao_i_r * aos_in_r_array(k,ipoint) - - do j = 1, ao_num - do l = 1, ao_num - ac_mat(k,i,l,j) += ao_ik_r * ( u12sq_j1bsq(l,j,ipoint) + u12_grad1_u12_j1b_grad1_j1b(l,j,ipoint) ) - bc_mat(k,i,l,j) += ao_ik_r * grad12_j12(l,j,ipoint) - enddo - enddo - enddo - enddo - enddo - - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - tc_grad_square_ao_loop(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + bc_mat(k,i,l,j) - enddo - enddo - enddo - enddo - - deallocate(ac_mat) - deallocate(bc_mat) - - call wall_time(time1) - print*, ' Wall time for tc_grad_square_ao_loop = ', time1 - time0 - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ double precision, grad12_j12, (ao_num, ao_num, n_points_final_grid) ] +BEGIN_PROVIDER [double precision, grad12_j12, (ao_num, ao_num, n_points_final_grid)] implicit none integer :: ipoint, i, j, m, igauss @@ -230,48 +13,28 @@ BEGIN_PROVIDER [ double precision, grad12_j12, (ao_num, ao_num, n_points_final_g print*, ' providing grad12_j12 ...' call wall_time(time0) - PROVIDE j1b_type - PROVIDE int2_grad1u2_grad2u2_j1b2 + PROVIDE int2_grad1u2_grad2u2_env2 do ipoint = 1, n_points_final_grid - tmp1 = v_1b(ipoint) + tmp1 = env_val(ipoint) tmp1 = tmp1 * tmp1 do j = 1, ao_num do i = 1, ao_num - grad12_j12(i,j,ipoint) = tmp1 * int2_grad1u2_grad2u2_j1b2(i,j,ipoint) + grad12_j12(i,j,ipoint) = tmp1 * int2_grad1u2_grad2u2_env2(i,j,ipoint) enddo enddo enddo - FREE int2_grad1u2_grad2u2_j1b2 - - !if(j1b_type .eq. 0) then - ! grad12_j12 = 0.d0 - ! do ipoint = 1, n_points_final_grid - ! r(1) = final_grid_points(1,ipoint) - ! r(2) = final_grid_points(2,ipoint) - ! r(3) = final_grid_points(3,ipoint) - ! do j = 1, ao_num - ! do i = 1, ao_num - ! do igauss = 1, n_max_fit_slat - ! delta = expo_gauss_1_erf_x_2(igauss) - ! coef = coef_gauss_1_erf_x_2(igauss) - ! grad12_j12(i,j,ipoint) += -0.25d0 * coef * overlap_gauss_r12_ao(r, delta, i, j) - ! enddo - ! enddo - ! enddo - ! enddo - !endif + FREE int2_grad1u2_grad2u2_env2 call wall_time(time1) - print*, ' Wall time for grad12_j12 = ', time1 - time0 - call print_memory_usage() + print*, ' Wall time for grad12_j12 (min) = ', (time1 - time0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [double precision, u12sq_j1bsq, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [double precision, u12sq_envsq, (ao_num, ao_num, n_points_final_grid)] implicit none integer :: ipoint, i, j @@ -279,33 +42,32 @@ BEGIN_PROVIDER [double precision, u12sq_j1bsq, (ao_num, ao_num, n_points_final_g double precision :: tmp1 double precision :: time0, time1 - print*, ' providing u12sq_j1bsq ...' + print*, ' providing u12sq_envsq ...' call wall_time(time0) ! do not free here - PROVIDE int2_u2_j1b2 + PROVIDE int2_u2_env2 do ipoint = 1, n_points_final_grid - tmp_x = v_1b_grad(1,ipoint) - tmp_y = v_1b_grad(2,ipoint) - tmp_z = v_1b_grad(3,ipoint) + tmp_x = env_grad(1,ipoint) + tmp_y = env_grad(2,ipoint) + tmp_z = env_grad(3,ipoint) tmp1 = -0.5d0 * (tmp_x * tmp_x + tmp_y * tmp_y + tmp_z * tmp_z) do j = 1, ao_num do i = 1, ao_num - u12sq_j1bsq(i,j,ipoint) = tmp1 * int2_u2_j1b2(i,j,ipoint) + u12sq_envsq(i,j,ipoint) = tmp1 * int2_u2_env2(i,j,ipoint) enddo enddo enddo call wall_time(time1) - print*, ' Wall time for u12sq_j1bsq = ', time1 - time0 - call print_memory_usage() + print*, ' Wall time for u12sq_envsq (min) = ', (time1 - time0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, u12_grad1_u12_j1b_grad1_j1b, (ao_num, ao_num, n_points_final_grid) ] +BEGIN_PROVIDER [double precision, u12_grad1_u12_env_grad1_env, (ao_num, ao_num, n_points_final_grid)] implicit none integer :: ipoint, i, j, m, igauss @@ -315,21 +77,21 @@ BEGIN_PROVIDER [ double precision, u12_grad1_u12_j1b_grad1_j1b, (ao_num, ao_num, double precision :: time0, time1 double precision, external :: overlap_gauss_r12_ao - print*, ' providing u12_grad1_u12_j1b_grad1_j1b ...' + print*, ' providing u12_grad1_u12_env_grad1_env ...' call wall_time(time0) - PROVIDE int2_u_grad1u_j1b2 - PROVIDE int2_u_grad1u_x_j1b2 + PROVIDE int2_u_grad1u_env2 + PROVIDE int2_u_grad1u_x_env2 do ipoint = 1, n_points_final_grid x = final_grid_points(1,ipoint) y = final_grid_points(2,ipoint) z = final_grid_points(3,ipoint) - tmp_v = v_1b (ipoint) - tmp_x = v_1b_grad(1,ipoint) - tmp_y = v_1b_grad(2,ipoint) - tmp_z = v_1b_grad(3,ipoint) + tmp_v = env_val (ipoint) + tmp_x = env_grad(1,ipoint) + tmp_y = env_grad(2,ipoint) + tmp_z = env_grad(3,ipoint) tmp3 = tmp_v * tmp_x tmp4 = tmp_v * tmp_y @@ -342,143 +104,20 @@ BEGIN_PROVIDER [ double precision, u12_grad1_u12_j1b_grad1_j1b, (ao_num, ao_num, do j = 1, ao_num do i = 1, ao_num - tmp9 = int2_u_grad1u_j1b2(i,j,ipoint) + tmp9 = int2_u_grad1u_env2(i,j,ipoint) - u12_grad1_u12_j1b_grad1_j1b(i,j,ipoint) = tmp6 * tmp9 + tmp3 * int2_u_grad1u_x_j1b2(i,j,ipoint,1) & - + tmp7 * tmp9 + tmp4 * int2_u_grad1u_x_j1b2(i,j,ipoint,2) & - + tmp8 * tmp9 + tmp5 * int2_u_grad1u_x_j1b2(i,j,ipoint,3) + u12_grad1_u12_env_grad1_env(i,j,ipoint) = tmp6 * tmp9 + tmp3 * int2_u_grad1u_x_env2(i,j,ipoint,1) & + + tmp7 * tmp9 + tmp4 * int2_u_grad1u_x_env2(i,j,ipoint,2) & + + tmp8 * tmp9 + tmp5 * int2_u_grad1u_x_env2(i,j,ipoint,3) enddo enddo enddo - FREE int2_u_grad1u_j1b2 - FREE int2_u_grad1u_x_j1b2 + FREE int2_u_grad1u_env2 + FREE int2_u_grad1u_x_env2 call wall_time(time1) - print*, ' Wall time for u12_grad1_u12_j1b_grad1_j1b = ', time1 - time0 - call print_memory_usage() - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [double precision, tc_grad_square_ao, (ao_num, ao_num, ao_num, ao_num)] - - BEGIN_DOC - ! - ! tc_grad_square_ao(k,i,l,j) = -1/2 - ! - END_DOC - - implicit none - integer :: ipoint, i, j, k, l - double precision :: weight1, ao_k_r, ao_i_r - double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq - double precision :: time0, time1 - double precision, allocatable :: b_mat(:,:,:), tmp(:,:,:) - - print*, ' providing tc_grad_square_ao ...' - call wall_time(time0) - - if(read_tc_integ) then - - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/tc_grad_square_ao', action="read") - read(11) tc_grad_square_ao - close(11) - - else - - ! --- - - PROVIDE int2_grad1_u12_square_ao - - allocate(b_mat(n_points_final_grid,ao_num,ao_num)) - - b_mat = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, k, ipoint) & - !$OMP SHARED (aos_in_r_array_transp, b_mat, ao_num, n_points_final_grid, final_weight_at_r_vector) - !$OMP DO SCHEDULE (static) - do i = 1, ao_num - do k = 1, ao_num - do ipoint = 1, n_points_final_grid - b_mat(ipoint,k,i) = final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,k) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - tc_grad_square_ao = 0.d0 - call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & - , int2_grad1_u12_square_ao(1,1,1), ao_num*ao_num, b_mat(1,1,1), n_points_final_grid & - , 0.d0, tc_grad_square_ao, ao_num*ao_num) - - FREE int2_grad1_u12_square_ao - - ! --- - - if(((j1b_type .eq. 3) .or. (j1b_type .eq. 4)) .and. use_ipp) then - - print*, " going through Manu's IPP" - - ! an additional term is added here directly instead of - ! being added in int2_grad1_u12_square_ao for performance - - PROVIDE int2_u2_j1b2 - - b_mat = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & - !$OMP SHARED (aos_in_r_array_transp, b_mat, ao_num, n_points_final_grid, final_weight_at_r_vector, & - !$OMP v_1b_square_grad, v_1b_square_lapl, aos_grad_in_r_array_transp_bis) - !$OMP DO SCHEDULE (static) - do i = 1, ao_num - do k = 1, ao_num - do ipoint = 1, n_points_final_grid - - weight1 = 0.25d0 * final_weight_at_r_vector(ipoint) - - ao_i_r = aos_in_r_array_transp(ipoint,i) - ao_k_r = aos_in_r_array_transp(ipoint,k) - - b_mat(ipoint,k,i) = weight1 * ( ao_k_r * ao_i_r * v_1b_square_lapl(ipoint) & - + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,1) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,1)) * v_1b_square_grad(ipoint,1) & - + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,2) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,2)) * v_1b_square_grad(ipoint,2) & - + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,3) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,3)) * v_1b_square_grad(ipoint,3) ) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & - , int2_u2_j1b2(1,1,1), ao_num*ao_num, b_mat(1,1,1), n_points_final_grid & - , 1.d0, tc_grad_square_ao, ao_num*ao_num) - - FREE int2_u2_j1b2 - endif - - ! --- - - deallocate(b_mat) - call sum_A_At(tc_grad_square_ao(1,1,1,1), ao_num*ao_num) - - endif - - if(write_tc_integ.and.mpi_master) then - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/tc_grad_square_ao', action="write") - call ezfio_set_work_empty(.False.) - write(11) tc_grad_square_ao - close(11) - call ezfio_set_tc_keywords_io_tc_integ('Read') - endif - - call wall_time(time1) - print*, ' Wall time for tc_grad_square_ao = ', time1 - time0 - call print_memory_usage() + print*, ' Wall time for u12_grad1_u12_env_grad1_env (min) = ', (time1 - time0) / 60.d0 END_PROVIDER diff --git a/plugins/local/non_h_ints_mu/grad_squared_manu.irp.f b/plugins/local/non_h_ints_mu/grad_squared_manu.irp.f index dcfeff47..8bfddf7e 100644 --- a/plugins/local/non_h_ints_mu/grad_squared_manu.irp.f +++ b/plugins/local/non_h_ints_mu/grad_squared_manu.irp.f @@ -1,4 +1,6 @@ +! --- + BEGIN_PROVIDER [double precision, tc_grad_square_ao_test, (ao_num, ao_num, ao_num, ao_num)] BEGIN_DOC @@ -24,7 +26,7 @@ BEGIN_PROVIDER [double precision, tc_grad_square_ao_test, (ao_num, ao_num, ao_nu else - provide u12sq_j1bsq_test u12_grad1_u12_j1b_grad1_j1b_test grad12_j12_test + provide u12sq_envsq_test u12_grad1_u12_env_grad1_env_test grad12_j12_test allocate(b_mat(n_points_final_grid,ao_num,ao_num), tmp(ao_num,ao_num,n_points_final_grid)) @@ -48,12 +50,12 @@ BEGIN_PROVIDER [double precision, tc_grad_square_ao_test, (ao_num, ao_num, ao_nu !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (j, l, ipoint) & - !$OMP SHARED (tmp, ao_num, n_points_final_grid, u12sq_j1bsq_test, u12_grad1_u12_j1b_grad1_j1b_test, grad12_j12_test) + !$OMP SHARED (tmp, ao_num, n_points_final_grid, u12sq_envsq_test, u12_grad1_u12_env_grad1_env_test, grad12_j12_test) !$OMP DO SCHEDULE (static) do ipoint = 1, n_points_final_grid do j = 1, ao_num do l = 1, ao_num - tmp(l,j,ipoint) = u12sq_j1bsq_test(l,j,ipoint) + u12_grad1_u12_j1b_grad1_j1b_test(l,j,ipoint) + 0.5d0 * grad12_j12_test(l,j,ipoint) + tmp(l,j,ipoint) = u12sq_envsq_test(l,j,ipoint) + u12_grad1_u12_env_grad1_env_test(l,j,ipoint) + 0.5d0 * grad12_j12_test(l,j,ipoint) enddo enddo enddo @@ -102,7 +104,7 @@ BEGIN_PROVIDER [double precision, tc_grad_square_ao_test_ref, (ao_num, ao_num, a print*, ' providing tc_grad_square_ao_test_ref ...' call wall_time(time0) - provide u12sq_j1bsq_test u12_grad1_u12_j1b_grad1_j1b_test grad12_j12_test + provide u12sq_envsq_test u12_grad1_u12_env_grad1_env_test grad12_j12_test allocate(ac_mat(ao_num,ao_num,ao_num,ao_num), b_mat(n_points_final_grid,ao_num,ao_num), tmp(ao_num,ao_num,n_points_final_grid)) @@ -126,12 +128,12 @@ BEGIN_PROVIDER [double precision, tc_grad_square_ao_test_ref, (ao_num, ao_num, a !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (j, l, ipoint) & - !$OMP SHARED (tmp, ao_num, n_points_final_grid, u12sq_j1bsq_test, u12_grad1_u12_j1b_grad1_j1b_test, grad12_j12_test) + !$OMP SHARED (tmp, ao_num, n_points_final_grid, u12sq_envsq_test, u12_grad1_u12_env_grad1_env_test, grad12_j12_test) !$OMP DO SCHEDULE (static) do ipoint = 1, n_points_final_grid do j = 1, ao_num do l = 1, ao_num - tmp(l,j,ipoint) = u12sq_j1bsq_test(l,j,ipoint) + u12_grad1_u12_j1b_grad1_j1b_test(l,j,ipoint) + 0.5d0 * grad12_j12_test(l,j,ipoint) + tmp(l,j,ipoint) = u12sq_envsq_test(l,j,ipoint) + u12_grad1_u12_env_grad1_env_test(l,j,ipoint) + 0.5d0 * grad12_j12_test(l,j,ipoint) enddo enddo enddo @@ -170,7 +172,7 @@ END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, u12sq_j1bsq_test, (ao_num, ao_num, n_points_final_grid) ] +BEGIN_PROVIDER [ double precision, u12sq_envsq_test, (ao_num, ao_num, n_points_final_grid) ] implicit none integer :: ipoint, i, j @@ -178,29 +180,29 @@ BEGIN_PROVIDER [ double precision, u12sq_j1bsq_test, (ao_num, ao_num, n_points_f double precision :: tmp1 double precision :: time0, time1 - print*, ' providing u12sq_j1bsq_test ...' + print*, ' providing u12sq_envsq_test ...' call wall_time(time0) do ipoint = 1, n_points_final_grid - tmp_x = v_1b_grad(1,ipoint) - tmp_y = v_1b_grad(2,ipoint) - tmp_z = v_1b_grad(3,ipoint) + tmp_x = env_grad(1,ipoint) + tmp_y = env_grad(2,ipoint) + tmp_z = env_grad(3,ipoint) tmp1 = -0.5d0 * (tmp_x * tmp_x + tmp_y * tmp_y + tmp_z * tmp_z) do j = 1, ao_num do i = 1, ao_num - u12sq_j1bsq_test(i,j,ipoint) = tmp1 * int2_u2_j1b2_test(i,j,ipoint) + u12sq_envsq_test(i,j,ipoint) = tmp1 * int2_u2_env2_test(i,j,ipoint) enddo enddo enddo call wall_time(time1) - print*, ' Wall time for u12sq_j1bsq_test = ', time1 - time0 + print*, ' Wall time for u12sq_envsq_test (min) = ', (time1 - time0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, u12_grad1_u12_j1b_grad1_j1b_test, (ao_num, ao_num, n_points_final_grid) ] +BEGIN_PROVIDER [double precision, u12_grad1_u12_env_grad1_env_test, (ao_num, ao_num, n_points_final_grid)] implicit none integer :: ipoint, i, j, m, igauss @@ -210,9 +212,9 @@ BEGIN_PROVIDER [ double precision, u12_grad1_u12_j1b_grad1_j1b_test, (ao_num, ao double precision :: time0, time1 double precision, external :: overlap_gauss_r12_ao - print*, ' providing u12_grad1_u12_j1b_grad1_j1b_test ...' + print*, ' providing u12_grad1_u12_env_grad1_env_test ...' - provide int2_u_grad1u_x_j1b2_test + provide int2_u_grad1u_x_env2_test call wall_time(time0) do ipoint = 1, n_points_final_grid @@ -220,10 +222,10 @@ BEGIN_PROVIDER [ double precision, u12_grad1_u12_j1b_grad1_j1b_test, (ao_num, ao x = final_grid_points(1,ipoint) y = final_grid_points(2,ipoint) z = final_grid_points(3,ipoint) - tmp_v = v_1b (ipoint) - tmp_x = v_1b_grad(1,ipoint) - tmp_y = v_1b_grad(2,ipoint) - tmp_z = v_1b_grad(3,ipoint) + tmp_v = env_val (ipoint) + tmp_x = env_grad(1,ipoint) + tmp_y = env_grad(2,ipoint) + tmp_z = env_grad(3,ipoint) tmp3 = tmp_v * tmp_x tmp4 = tmp_v * tmp_y @@ -236,23 +238,23 @@ BEGIN_PROVIDER [ double precision, u12_grad1_u12_j1b_grad1_j1b_test, (ao_num, ao do j = 1, ao_num do i = 1, ao_num - tmp9 = int2_u_grad1u_j1b2_test(i,j,ipoint) + tmp9 = int2_u_grad1u_env2_test(i,j,ipoint) - u12_grad1_u12_j1b_grad1_j1b_test(i,j,ipoint) = tmp6 * tmp9 + tmp3 * int2_u_grad1u_x_j1b2_test(i,j,ipoint,1) & - + tmp7 * tmp9 + tmp4 * int2_u_grad1u_x_j1b2_test(i,j,ipoint,2) & - + tmp8 * tmp9 + tmp5 * int2_u_grad1u_x_j1b2_test(i,j,ipoint,3) + u12_grad1_u12_env_grad1_env_test(i,j,ipoint) = tmp6 * tmp9 + tmp3 * int2_u_grad1u_x_env2_test(i,j,ipoint,1) & + + tmp7 * tmp9 + tmp4 * int2_u_grad1u_x_env2_test(i,j,ipoint,2) & + + tmp8 * tmp9 + tmp5 * int2_u_grad1u_x_env2_test(i,j,ipoint,3) enddo enddo enddo call wall_time(time1) - print*, ' Wall time for u12_grad1_u12_j1b_grad1_j1b_test = ', time1 - time0 + print*, ' Wall time for u12_grad1_u12_env_grad1_env_test (min) = ', (time1 - time0) / 60.d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, grad12_j12_test, (ao_num, ao_num, n_points_final_grid) ] +BEGIN_PROVIDER [double precision, grad12_j12_test, (ao_num, ao_num, n_points_final_grid)] implicit none integer :: ipoint, i, j, m, igauss @@ -260,46 +262,32 @@ BEGIN_PROVIDER [ double precision, grad12_j12_test, (ao_num, ao_num, n_points_fi double precision :: tmp1 double precision :: time0, time1 double precision, external :: overlap_gauss_r12_ao - provide int2_grad1u2_grad2u2_j1b2_test + + provide int2_grad1u2_grad2u2_env2_test print*, ' providing grad12_j12_test ...' call wall_time(time0) - PROVIDE j1b_type - - if(j1b_type .eq. 3) then + if((j2e_type .eq. "Mu") .and. (env_type .eq. "Prod_Gauss")) then do ipoint = 1, n_points_final_grid - tmp1 = v_1b(ipoint) + tmp1 = env_val(ipoint) tmp1 = tmp1 * tmp1 do j = 1, ao_num do i = 1, ao_num - grad12_j12_test(i,j,ipoint) = tmp1 * int2_grad1u2_grad2u2_j1b2_test(i,j,ipoint) + grad12_j12_test(i,j,ipoint) = tmp1 * int2_grad1u2_grad2u2_env2_test(i,j,ipoint) enddo enddo enddo else - grad12_j12_test = 0.d0 - do ipoint = 1, n_points_final_grid - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - do j = 1, ao_num - do i = 1, ao_num - do igauss = 1, n_max_fit_slat - delta = expo_gauss_1_erf_x_2(igauss) - coef = coef_gauss_1_erf_x_2(igauss) - grad12_j12_test(i,j,ipoint) += -0.25d0 * coef * overlap_gauss_r12_ao(r, delta, i, j) - enddo - enddo - enddo - enddo + print *, ' Error in grad12_j12_test: Unknown Jastrow' + stop endif call wall_time(time1) - print*, ' Wall time for grad12_j12_test = ', time1 - time0 + print*, ' Wall time for grad12_j12_test (min) = ', (time1 - time0) / 60.d0 END_PROVIDER diff --git a/plugins/local/non_h_ints_mu/j12_nucl_utils.irp.f b/plugins/local/non_h_ints_mu/j12_nucl_utils.irp.f index 7dd13f14..27b92a13 100644 --- a/plugins/local/non_h_ints_mu/j12_nucl_utils.irp.f +++ b/plugins/local/non_h_ints_mu/j12_nucl_utils.irp.f @@ -1,14 +1,18 @@ ! --- -BEGIN_PROVIDER [ double precision, v_1b, (n_points_final_grid)] +BEGIN_PROVIDER [double precision, env_val, (n_points_final_grid)] implicit none integer :: ipoint, i, j, phase double precision :: x, y, z, dx, dy, dz double precision :: a, d, e, fact_r - if(j1b_type .eq. 3) then + if(env_type .eq. "None") then + + env_val = 1.d0 + + elseif(env_type .eq. "Prod_Gauss") then ! v(r) = \Pi_{a} [1 - \exp(-\alpha_a (r - r_a)^2)] @@ -20,7 +24,7 @@ BEGIN_PROVIDER [ double precision, v_1b, (n_points_final_grid)] fact_r = 1.d0 do j = 1, nucl_num - a = j1b_pen(j) + a = env_expo(j) dx = x - nucl_coord(j,1) dy = y - nucl_coord(j,2) dz = z - nucl_coord(j,3) @@ -30,10 +34,10 @@ BEGIN_PROVIDER [ double precision, v_1b, (n_points_final_grid)] fact_r = fact_r * e enddo - v_1b(ipoint) = fact_r + env_val(ipoint) = fact_r enddo - elseif(j1b_type .eq. 4) then + elseif(env_type .eq. "Sum_Gauss") then ! v(r) = 1 - \sum_{a} \beta_a \exp(-\alpha_a (r - r_a)^2) @@ -45,21 +49,21 @@ BEGIN_PROVIDER [ double precision, v_1b, (n_points_final_grid)] fact_r = 1.d0 do j = 1, nucl_num - a = j1b_pen(j) + a = env_expo(j) dx = x - nucl_coord(j,1) dy = y - nucl_coord(j,2) dz = z - nucl_coord(j,3) d = dx*dx + dy*dy + dz*dz - fact_r = fact_r - j1b_pen_coef(j) * dexp(-a*d) + fact_r = fact_r - env_coef(j) * dexp(-a*d) enddo - v_1b(ipoint) = fact_r + env_val(ipoint) = fact_r enddo else - print*, 'j1b_type = ', j1b_type, 'is not implemented for v_1b' + print *, ' Error in env_val: Unknown env_type = ', env_type stop endif @@ -68,7 +72,7 @@ END_PROVIDER ! --- -BEGIN_PROVIDER [double precision, v_1b_grad, (3, n_points_final_grid)] +BEGIN_PROVIDER [double precision, env_grad, (3, n_points_final_grid)] implicit none integer :: ipoint, i, j, phase @@ -77,9 +81,11 @@ BEGIN_PROVIDER [double precision, v_1b_grad, (3, n_points_final_grid)] double precision :: fact_x, fact_y, fact_z double precision :: ax_der, ay_der, az_der, a_expo - PROVIDE j1b_type + if(env_type .eq. "None") then - if(j1b_type .eq. 3) then + env_grad = 0.d0 + + elseif(env_type .eq. "Prod_Gauss") then ! v(r) = \Pi_{a} [1 - \exp(-\alpha_a (r - r_a)^2)] @@ -92,7 +98,7 @@ BEGIN_PROVIDER [double precision, v_1b_grad, (3, n_points_final_grid)] fact_x = 0.d0 fact_y = 0.d0 fact_z = 0.d0 - do i = 1, List_all_comb_b2_size + do i = 1, List_env1s_size phase = 0 a_expo = 0.d0 @@ -100,12 +106,12 @@ BEGIN_PROVIDER [double precision, v_1b_grad, (3, n_points_final_grid)] ay_der = 0.d0 az_der = 0.d0 do j = 1, nucl_num - a = dble(List_all_comb_b2(j,i)) * j1b_pen(j) + a = dble(List_env1s(j,i)) * env_expo(j) dx = x - nucl_coord(j,1) dy = y - nucl_coord(j,2) dz = z - nucl_coord(j,3) - phase += List_all_comb_b2(j,i) + phase += List_env1s(j,i) a_expo += a * (dx*dx + dy*dy + dz*dz) ax_der += a * dx ay_der += a * dy @@ -118,12 +124,12 @@ BEGIN_PROVIDER [double precision, v_1b_grad, (3, n_points_final_grid)] fact_z += e * az_der enddo - v_1b_grad(1,ipoint) = fact_x - v_1b_grad(2,ipoint) = fact_y - v_1b_grad(3,ipoint) = fact_z + env_grad(1,ipoint) = fact_x + env_grad(2,ipoint) = fact_y + env_grad(3,ipoint) = fact_z enddo - elseif(j1b_type .eq. 4) then + elseif(env_type .eq. "Sum_Gauss") then ! v(r) = 1 - \sum_{a} \beta_a \exp(-\alpha_a (r - r_a)^2) @@ -143,22 +149,22 @@ BEGIN_PROVIDER [double precision, v_1b_grad, (3, n_points_final_grid)] dz = z - nucl_coord(j,3) r2 = dx*dx + dy*dy + dz*dz - a = j1b_pen(j) - e = a * j1b_pen_coef(j) * dexp(-a * r2) + a = env_expo(j) + e = a * env_coef(j) * dexp(-a * r2) ax_der += e * dx ay_der += e * dy az_der += e * dz enddo - v_1b_grad(1,ipoint) = 2.d0 * ax_der - v_1b_grad(2,ipoint) = 2.d0 * ay_der - v_1b_grad(3,ipoint) = 2.d0 * az_der + env_grad(1,ipoint) = 2.d0 * ax_der + env_grad(2,ipoint) = 2.d0 * ay_der + env_grad(3,ipoint) = 2.d0 * az_der enddo else - print*, 'j1b_type = ', j1b_type, 'is not implemented' + print *, ' Error in env_grad: Unknown env_type = ', env_type stop endif @@ -167,126 +173,8 @@ END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, v_1b_lapl, (n_points_final_grid)] - - implicit none - integer :: ipoint, i, j, phase - double precision :: x, y, z, dx, dy, dz - double precision :: a, e, b - double precision :: fact_r - double precision :: ax_der, ay_der, az_der, a_expo - - do ipoint = 1, n_points_final_grid - - x = final_grid_points(1,ipoint) - y = final_grid_points(2,ipoint) - z = final_grid_points(3,ipoint) - - fact_r = 0.d0 - do i = 1, List_all_comb_b2_size - - phase = 0 - b = 0.d0 - a_expo = 0.d0 - ax_der = 0.d0 - ay_der = 0.d0 - az_der = 0.d0 - do j = 1, nucl_num - a = dble(List_all_comb_b2(j,i)) * j1b_pen(j) - dx = x - nucl_coord(j,1) - dy = y - nucl_coord(j,2) - dz = z - nucl_coord(j,3) - - phase += List_all_comb_b2(j,i) - b += a - a_expo += a * (dx*dx + dy*dy + dz*dz) - ax_der += a * dx - ay_der += a * dy - az_der += a * dz - enddo - - fact_r += (-1.d0)**dble(phase) * (-6.d0*b + 4.d0*(ax_der*ax_der + ay_der*ay_der + az_der*az_der) ) * dexp(-a_expo) - enddo - - v_1b_lapl(ipoint) = fact_r - enddo - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ double precision, v_1b_list_b2, (n_points_final_grid)] - - implicit none - integer :: i, ipoint - double precision :: x, y, z, coef, expo, dx, dy, dz - double precision :: fact_r - - PROVIDE List_all_comb_b2_coef List_all_comb_b2_expo List_all_comb_b2_cent - - do ipoint = 1, n_points_final_grid - - x = final_grid_points(1,ipoint) - y = final_grid_points(2,ipoint) - z = final_grid_points(3,ipoint) - - fact_r = 0.d0 - do i = 1, List_all_comb_b2_size - - coef = List_all_comb_b2_coef(i) - expo = List_all_comb_b2_expo(i) - - dx = x - List_all_comb_b2_cent(1,i) - dy = y - List_all_comb_b2_cent(2,i) - dz = z - List_all_comb_b2_cent(3,i) - - fact_r += coef * dexp(-expo * (dx*dx + dy*dy + dz*dz)) - enddo - - v_1b_list_b2(ipoint) = fact_r - enddo - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ double precision, v_1b_list_b3, (n_points_final_grid)] - - implicit none - integer :: i, ipoint - double precision :: x, y, z, coef, expo, dx, dy, dz - double precision :: fact_r - - PROVIDE List_all_comb_b3_coef List_all_comb_b3_expo List_all_comb_b3_cent - - do ipoint = 1, n_points_final_grid - - x = final_grid_points(1,ipoint) - y = final_grid_points(2,ipoint) - z = final_grid_points(3,ipoint) - - fact_r = 0.d0 - do i = 1, List_all_comb_b3_size - - coef = List_all_comb_b3_coef(i) - expo = List_all_comb_b3_expo(i) - - dx = x - List_all_comb_b3_cent(1,i) - dy = y - List_all_comb_b3_cent(2,i) - dz = z - List_all_comb_b3_cent(3,i) - - fact_r += coef * dexp(-expo * (dx*dx + dy*dy + dz*dz)) - enddo - - v_1b_list_b3(ipoint) = fact_r - enddo - -END_PROVIDER - -! --- - - BEGIN_PROVIDER [double precision, v_1b_square_grad, (n_points_final_grid,3)] -&BEGIN_PROVIDER [double precision, v_1b_square_lapl, (n_points_final_grid) ] + BEGIN_PROVIDER [double precision, env_square_grad, (n_points_final_grid,3)] +&BEGIN_PROVIDER [double precision, env_square_lapl, (n_points_final_grid) ] implicit none integer :: ipoint, i @@ -294,42 +182,56 @@ END_PROVIDER double precision :: coef, expo, a_expo, tmp double precision :: fact_x, fact_y, fact_z, fact_r - PROVIDE List_all_comb_b3_coef List_all_comb_b3_expo List_all_comb_b3_cent + PROVIDE List_env1s_square_coef List_env1s_square_expo List_env1s_square_cent - do ipoint = 1, n_points_final_grid + if(env_type .eq. "None") then - x = final_grid_points(1,ipoint) - y = final_grid_points(2,ipoint) - z = final_grid_points(3,ipoint) + env_square_grad = 0.d0 + env_square_lapl = 0.d0 - fact_x = 0.d0 - fact_y = 0.d0 - fact_z = 0.d0 - fact_r = 0.d0 - do i = 1, List_all_comb_b3_size + elseif((env_type .eq. "Prod_Gauss") .or. (env_type .eq. "Sum_Gauss")) then - coef = List_all_comb_b3_coef(i) - expo = List_all_comb_b3_expo(i) + do ipoint = 1, n_points_final_grid - dx = x - List_all_comb_b3_cent(1,i) - dy = y - List_all_comb_b3_cent(2,i) - dz = z - List_all_comb_b3_cent(3,i) - r2 = dx * dx + dy * dy + dz * dz + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) - a_expo = expo * r2 - tmp = coef * expo * dexp(-a_expo) + fact_x = 0.d0 + fact_y = 0.d0 + fact_z = 0.d0 + fact_r = 0.d0 + do i = 1, List_env1s_square_size - fact_x += tmp * dx - fact_y += tmp * dy - fact_z += tmp * dz - fact_r += tmp * (3.d0 - 2.d0 * a_expo) + coef = List_env1s_square_coef(i) + expo = List_env1s_square_expo(i) + + dx = x - List_env1s_square_cent(1,i) + dy = y - List_env1s_square_cent(2,i) + dz = z - List_env1s_square_cent(3,i) + r2 = dx * dx + dy * dy + dz * dz + + a_expo = expo * r2 + tmp = coef * expo * dexp(-a_expo) + + fact_x += tmp * dx + fact_y += tmp * dy + fact_z += tmp * dz + fact_r += tmp * (3.d0 - 2.d0 * a_expo) + enddo + + env_square_grad(ipoint,1) = -2.d0 * fact_x + env_square_grad(ipoint,2) = -2.d0 * fact_y + env_square_grad(ipoint,3) = -2.d0 * fact_z + env_square_lapl(ipoint) = -2.d0 * fact_r enddo - v_1b_square_grad(ipoint,1) = -2.d0 * fact_x - v_1b_square_grad(ipoint,2) = -2.d0 * fact_y - v_1b_square_grad(ipoint,3) = -2.d0 * fact_z - v_1b_square_lapl(ipoint) = -2.d0 * fact_r - enddo + else + + print *, ' Error in env_val_square_grad & env_val_square_lapl: Unknown env_type = ', env_type + stop + + endif END_PROVIDER @@ -348,7 +250,7 @@ double precision function j12_mu_r12(r12) j12_mu_r12 = 0.5d0 * r12 * (1.d0 - derf(mu_r12)) - inv_sq_pi_2 * dexp(-mu_r12*mu_r12) / mu_erf return -end function j12_mu_r12 +end ! --- @@ -361,7 +263,7 @@ double precision function jmu_modif(r1, r2) jmu_modif = j12_mu(r1, r2) * j12_nucl(r1, r2) return -end function jmu_modif +end ! --- @@ -385,7 +287,7 @@ double precision function j12_mu_gauss(r1, r2) enddo return -end function j12_mu_gauss +end ! --- @@ -393,140 +295,138 @@ double precision function j12_nucl(r1, r2) implicit none double precision, intent(in) :: r1(3), r2(3) - double precision, external :: j1b_nucl + double precision, external :: env_nucl - j12_nucl = j1b_nucl(r1) * j1b_nucl(r2) + j12_nucl = env_nucl(r1) * env_nucl(r2) return -end function j12_nucl +end ! --- -! --------------------------------------------------------------------------------------- - -double precision function grad_x_j1b_nucl_num(r) +double precision function grad_x_env_nucl_num(r) implicit none double precision, intent(in) :: r(3) double precision :: r_eps(3), eps, fp, fm, delta - double precision, external :: j1b_nucl + double precision, external :: env_nucl eps = 1d-6 r_eps = r delta = max(eps, dabs(eps*r(1))) r_eps(1) = r_eps(1) + delta - fp = j1b_nucl(r_eps) + fp = env_nucl(r_eps) r_eps(1) = r_eps(1) - 2.d0 * delta - fm = j1b_nucl(r_eps) + fm = env_nucl(r_eps) - grad_x_j1b_nucl_num = 0.5d0 * (fp - fm) / delta + grad_x_env_nucl_num = 0.5d0 * (fp - fm) / delta return -end function grad_x_j1b_nucl_num +end -double precision function grad_y_j1b_nucl_num(r) +! --- + +double precision function grad_y_env_nucl_num(r) implicit none double precision, intent(in) :: r(3) double precision :: r_eps(3), eps, fp, fm, delta - double precision, external :: j1b_nucl + double precision, external :: env_nucl eps = 1d-6 r_eps = r delta = max(eps, dabs(eps*r(2))) r_eps(2) = r_eps(2) + delta - fp = j1b_nucl(r_eps) + fp = env_nucl(r_eps) r_eps(2) = r_eps(2) - 2.d0 * delta - fm = j1b_nucl(r_eps) + fm = env_nucl(r_eps) - grad_y_j1b_nucl_num = 0.5d0 * (fp - fm) / delta + grad_y_env_nucl_num = 0.5d0 * (fp - fm) / delta return -end function grad_y_j1b_nucl_num +end -double precision function grad_z_j1b_nucl_num(r) +! --- + +double precision function grad_z_env_nucl_num(r) implicit none double precision, intent(in) :: r(3) double precision :: r_eps(3), eps, fp, fm, delta - double precision, external :: j1b_nucl + double precision, external :: env_nucl eps = 1d-6 r_eps = r delta = max(eps, dabs(eps*r(3))) r_eps(3) = r_eps(3) + delta - fp = j1b_nucl(r_eps) + fp = env_nucl(r_eps) r_eps(3) = r_eps(3) - 2.d0 * delta - fm = j1b_nucl(r_eps) + fm = env_nucl(r_eps) - grad_z_j1b_nucl_num = 0.5d0 * (fp - fm) / delta + grad_z_env_nucl_num = 0.5d0 * (fp - fm) / delta return -end function grad_z_j1b_nucl_num - -! --------------------------------------------------------------------------------------- +end ! --- -double precision function lapl_j1b_nucl(r) +double precision function lapl_env_nucl(r) implicit none double precision, intent(in) :: r(3) double precision :: r_eps(3), eps, fp, fm, delta - double precision, external :: grad_x_j1b_nucl_num - double precision, external :: grad_y_j1b_nucl_num - double precision, external :: grad_z_j1b_nucl_num + double precision, external :: grad_x_env_nucl_num + double precision, external :: grad_y_env_nucl_num + double precision, external :: grad_z_env_nucl_num eps = 1d-5 r_eps = r - lapl_j1b_nucl = 0.d0 + lapl_env_nucl = 0.d0 ! --- delta = max(eps, dabs(eps*r(1))) r_eps(1) = r_eps(1) + delta - fp = grad_x_j1b_nucl_num(r_eps) + fp = grad_x_env_nucl_num(r_eps) r_eps(1) = r_eps(1) - 2.d0 * delta - fm = grad_x_j1b_nucl_num(r_eps) + fm = grad_x_env_nucl_num(r_eps) r_eps(1) = r_eps(1) + delta - lapl_j1b_nucl += 0.5d0 * (fp - fm) / delta + lapl_env_nucl += 0.5d0 * (fp - fm) / delta ! --- delta = max(eps, dabs(eps*r(2))) r_eps(2) = r_eps(2) + delta - fp = grad_y_j1b_nucl_num(r_eps) + fp = grad_y_env_nucl_num(r_eps) r_eps(2) = r_eps(2) - 2.d0 * delta - fm = grad_y_j1b_nucl_num(r_eps) + fm = grad_y_env_nucl_num(r_eps) r_eps(2) = r_eps(2) + delta - lapl_j1b_nucl += 0.5d0 * (fp - fm) / delta + lapl_env_nucl += 0.5d0 * (fp - fm) / delta ! --- delta = max(eps, dabs(eps*r(3))) r_eps(3) = r_eps(3) + delta - fp = grad_z_j1b_nucl_num(r_eps) + fp = grad_z_env_nucl_num(r_eps) r_eps(3) = r_eps(3) - 2.d0 * delta - fm = grad_z_j1b_nucl_num(r_eps) + fm = grad_z_env_nucl_num(r_eps) r_eps(3) = r_eps(3) + delta - lapl_j1b_nucl += 0.5d0 * (fp - fm) / delta + lapl_env_nucl += 0.5d0 * (fp - fm) / delta ! --- return -end function lapl_j1b_nucl +end ! --- -! --------------------------------------------------------------------------------------- - double precision function grad1_x_jmu_modif(r1, r2) implicit none @@ -546,7 +446,9 @@ double precision function grad1_x_jmu_modif(r1, r2) grad1_x_jmu_modif = 0.5d0 * (fp - fm) / delta return -end function grad1_x_jmu_modif +end + +! --- double precision function grad1_y_jmu_modif(r1, r2) @@ -567,7 +469,9 @@ double precision function grad1_y_jmu_modif(r1, r2) grad1_y_jmu_modif = 0.5d0 * (fp - fm) / delta return -end function grad1_y_jmu_modif +end + +! --- double precision function grad1_z_jmu_modif(r1, r2) @@ -588,14 +492,10 @@ double precision function grad1_z_jmu_modif(r1, r2) grad1_z_jmu_modif = 0.5d0 * (fp - fm) / delta return -end function grad1_z_jmu_modif - -! --------------------------------------------------------------------------------------- +end ! --- -! --------------------------------------------------------------------------------------- - double precision function grad1_x_j12_mu_num(r1, r2) implicit none @@ -615,7 +515,9 @@ double precision function grad1_x_j12_mu_num(r1, r2) grad1_x_j12_mu_num = 0.5d0 * (fp - fm) / delta return -end function grad1_x_j12_mu_num +end + +! --- double precision function grad1_y_j12_mu_num(r1, r2) @@ -636,7 +538,9 @@ double precision function grad1_y_j12_mu_num(r1, r2) grad1_y_j12_mu_num = 0.5d0 * (fp - fm) / delta return -end function grad1_y_j12_mu_num +end + +! --- double precision function grad1_z_j12_mu_num(r1, r2) @@ -657,9 +561,9 @@ double precision function grad1_z_j12_mu_num(r1, r2) grad1_z_j12_mu_num = 0.5d0 * (fp - fm) / delta return -end function grad1_z_j12_mu_num +end -! --------------------------------------------------------------------------------------- +! --- subroutine grad1_jmu_modif_num(r1, r2, grad) @@ -671,103 +575,23 @@ subroutine grad1_jmu_modif_num(r1, r2, grad) double precision :: tmp0, tmp1, tmp2, grad_u12(3) double precision, external :: j12_mu - double precision, external :: j1b_nucl - double precision, external :: grad_x_j1b_nucl_num - double precision, external :: grad_y_j1b_nucl_num - double precision, external :: grad_z_j1b_nucl_num + double precision, external :: env_nucl + double precision, external :: grad_x_env_nucl_num + double precision, external :: grad_y_env_nucl_num + double precision, external :: grad_z_env_nucl_num call grad1_j12_mu(r1, r2, grad_u12) - tmp0 = j1b_nucl(r1) - tmp1 = j1b_nucl(r2) + tmp0 = env_nucl(r1) + tmp1 = env_nucl(r2) tmp2 = j12_mu(r1, r2) - grad(1) = (tmp0 * grad_u12(1) + tmp2 * grad_x_j1b_nucl_num(r1)) * tmp1 - grad(2) = (tmp0 * grad_u12(2) + tmp2 * grad_y_j1b_nucl_num(r1)) * tmp1 - grad(3) = (tmp0 * grad_u12(3) + tmp2 * grad_z_j1b_nucl_num(r1)) * tmp1 + grad(1) = (tmp0 * grad_u12(1) + tmp2 * grad_x_env_nucl_num(r1)) * tmp1 + grad(2) = (tmp0 * grad_u12(2) + tmp2 * grad_y_env_nucl_num(r1)) * tmp1 + grad(3) = (tmp0 * grad_u12(3) + tmp2 * grad_z_env_nucl_num(r1)) * tmp1 return -end subroutine grad1_jmu_modif_num +end ! --- -subroutine get_tchint_rsdft_jastrow(x, y, dj) - - implicit none - double precision, intent(in) :: x(3), y(3) - double precision, intent(out) :: dj(3) - integer :: at - double precision :: a, mu_tmp, inv_sq_pi_2 - double precision :: tmp_x, tmp_y, tmp_z, tmp - double precision :: dx2, dy2, pos(3), dxy, dxy2 - double precision :: v1b_x, v1b_y - double precision :: u2b, grad1_u2b(3), grad1_v1b(3) - - PROVIDE mu_erf - - inv_sq_pi_2 = 0.5d0 / dsqrt(dacos(-1.d0)) - - dj = 0.d0 - -! double precision, external :: j12_mu, j1b_nucl -! v1b_x = j1b_nucl(x) -! v1b_y = j1b_nucl(y) -! call grad1_j1b_nucl(x, grad1_v1b) -! u2b = j12_mu(x, y) -! call grad1_j12_mu(x, y, grad1_u2b) - - ! 1b terms - v1b_x = 1.d0 - v1b_y = 1.d0 - tmp_x = 0.d0 - tmp_y = 0.d0 - tmp_z = 0.d0 - do at = 1, nucl_num - - a = j1b_pen(at) - pos(1) = nucl_coord(at,1) - pos(2) = nucl_coord(at,2) - pos(3) = nucl_coord(at,3) - - dx2 = sum((x-pos)**2) - dy2 = sum((y-pos)**2) - tmp = dexp(-a*dx2) * a - - v1b_x = v1b_x - dexp(-a*dx2) - v1b_y = v1b_y - dexp(-a*dy2) - - tmp_x = tmp_x + tmp * (x(1) - pos(1)) - tmp_y = tmp_y + tmp * (x(2) - pos(2)) - tmp_z = tmp_z + tmp * (x(3) - pos(3)) - end do - grad1_v1b(1) = 2.d0 * tmp_x - grad1_v1b(2) = 2.d0 * tmp_y - grad1_v1b(3) = 2.d0 * tmp_z - - ! 2b terms - dxy2 = sum((x-y)**2) - dxy = dsqrt(dxy2) - mu_tmp = mu_erf * dxy - u2b = 0.5d0 * dxy * (1.d0 - derf(mu_tmp)) - inv_sq_pi_2 * dexp(-mu_tmp*mu_tmp) / mu_erf - - if(dxy .lt. 1d-8) then - grad1_u2b(1) = 0.d0 - grad1_u2b(2) = 0.d0 - grad1_u2b(3) = 0.d0 - else - tmp = 0.5d0 * (1.d0 - derf(mu_tmp)) / dxy - grad1_u2b(1) = tmp * (x(1) - y(1)) - grad1_u2b(2) = tmp * (x(2) - y(2)) - grad1_u2b(3) = tmp * (x(3) - y(3)) - endif - - dj(1) = (grad1_u2b(1) * v1b_x + u2b * grad1_v1b(1)) * v1b_y - dj(2) = (grad1_u2b(2) * v1b_x + u2b * grad1_v1b(2)) * v1b_y - dj(3) = (grad1_u2b(3) * v1b_x + u2b * grad1_v1b(3)) * v1b_y - - return -end subroutine get_tchint_rsdft_jastrow - -! --- - - diff --git a/plugins/local/non_h_ints_mu/jast_1e.irp.f b/plugins/local/non_h_ints_mu/jast_1e.irp.f new file mode 100644 index 00000000..e994d27a --- /dev/null +++ b/plugins/local/non_h_ints_mu/jast_1e.irp.f @@ -0,0 +1,306 @@ + +! --- + +BEGIN_PROVIDER [double precision, j1e_val, (n_points_final_grid)] + + implicit none + integer :: ipoint, i, j, p + double precision :: x, y, z, dx, dy, dz, d2 + double precision :: a, c, tmp + double precision :: time0, time1 + + PROVIDE j1e_type + + call wall_time(time0) + print*, ' providing j1e_val ...' + + if(j1e_type .eq. "None") then + + j1e_val = 0.d0 + + elseif(j1e_type .eq. "Gauss") then + + ! \sum_{A} \sum_p c_{p_A} \exp(-\alpha_{p_A} (r - R_A)^2) + + PROVIDE j1e_size j1e_coef j1e_expo + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + tmp = 0.d0 + do j = 1, nucl_num + + dx = x - nucl_coord(j,1) + dy = y - nucl_coord(j,2) + dz = z - nucl_coord(j,3) + d2 = dx*dx + dy*dy + dz*dz + + do p = 1, j1e_size + + c = j1e_coef(p,j) + a = j1e_expo(p,j) + + tmp = tmp + c * dexp(-a*d2) + enddo + enddo + + j1e_val(ipoint) = tmp + enddo + + else + + print *, ' Error in j1e_val: Unknown j1e_type = ', j1e_type + stop + + endif + + call wall_time(time1) + print*, ' Wall time for j1e_val (min) = ', (time1 - time0) / 60.d0 + call print_memory_usage() + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [double precision, j1e_gradx, (n_points_final_grid)] +&BEGIN_PROVIDER [double precision, j1e_grady, (n_points_final_grid)] +&BEGIN_PROVIDER [double precision, j1e_gradz, (n_points_final_grid)] + + implicit none + integer :: ipoint, i, j, ij, p + integer :: ierr + logical :: exists + double precision :: x, y, z, dx, dy, dz, d2 + double precision :: a, c, g, tmp_x, tmp_y, tmp_z + double precision :: cx, cy, cz + double precision :: time0, time1 + double precision, allocatable :: Pa(:,:), Pb(:,:), Pt(:,:) + double precision, allocatable :: coef_fit2(:,:) + + PROVIDE j1e_type + + call wall_time(time0) + print*, ' providing j1e_grad ...' + + if(j1e_type .eq. "None") then + + j1e_gradx = 0.d0 + j1e_grady = 0.d0 + j1e_gradz = 0.d0 + + elseif(j1e_type .eq. "Gauss") then + + ! - \sum_{A} (r - R_A) \sum_p c_{p_A} \exp(-\alpha_{p_A} (r - R_A)^2) + + PROVIDE j1e_size j1e_coef j1e_expo + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + tmp_x = 0.d0 + tmp_y = 0.d0 + tmp_z = 0.d0 + do j = 1, nucl_num + + dx = x - nucl_coord(j,1) + dy = y - nucl_coord(j,2) + dz = z - nucl_coord(j,3) + d2 = dx*dx + dy*dy + dz*dz + + do p = 1, j1e_size + + c = j1e_coef(p,j) + a = j1e_expo(p,j) + g = c * a * dexp(-a*d2) + + tmp_x = tmp_x + g * dx + tmp_y = tmp_y + g * dy + tmp_z = tmp_z + g * dz + enddo + enddo + + j1e_gradx(ipoint) = -2.d0 * tmp_x + j1e_grady(ipoint) = -2.d0 * tmp_y + j1e_gradz(ipoint) = -2.d0 * tmp_z + enddo + + elseif(j1e_type .eq. "Charge_Harmonizer") then + + ! -[(N-1)/2N] x \sum_{\mu,\nu} P_{\mu,\nu} \int dr2 [\grad_r1 J_2e(r1,r2)] \phi_\mu(r2) \phi_nu(r2) + + PROVIDE elec_alpha_num elec_beta_num elec_num + PROVIDE mo_coef + PROVIDE int2_grad1_u2e_ao + + allocate(Pa(ao_num,ao_num), Pb(ao_num,ao_num), Pt(ao_num,ao_num)) + + call dgemm( 'N', 'T', ao_num, ao_num, elec_alpha_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pa, size(Pa, 1)) + + if(elec_alpha_num .eq. elec_beta_num) then + Pb = Pa + else + call dgemm( 'N', 'T', ao_num, ao_num, elec_beta_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pb, size(Pb, 1)) + endif + Pt = Pa + Pb + + g = -0.5d0 * (dble(elec_num) - 1.d0) / dble(elec_num) + + call dgemv("T", ao_num*ao_num, n_points_final_grid, g, int2_grad1_u2e_ao(1,1,1,1), ao_num*ao_num, Pt, 1, 0.d0, j1e_gradx, 1) + call dgemv("T", ao_num*ao_num, n_points_final_grid, g, int2_grad1_u2e_ao(1,1,1,2), ao_num*ao_num, Pt, 1, 0.d0, j1e_grady, 1) + call dgemv("T", ao_num*ao_num, n_points_final_grid, g, int2_grad1_u2e_ao(1,1,1,3), ao_num*ao_num, Pt, 1, 0.d0, j1e_gradz, 1) + + FREE int2_grad1_u2e_ao + + deallocate(Pa, Pb, Pt) + + elseif(j1e_type .eq. "Charge_Harmonizer_AO") then + + ! \grad_1 \sum_{\eta,\beta} C_{\eta,\beta} \chi_{\eta} \chi_{\beta} + ! where + ! \chi_{\eta} are the AOs + ! C_{\eta,\beta} are fitted to mimic (j1e_type .eq. "Charge_Harmonizer") + ! + ! The - sign is in the parameters C_{\eta,\beta} + + PROVIDE aos_grad_in_r_array + + allocate(coef_fit2(ao_num,ao_num)) + + if(mpi_master) then + call ezfio_has_jastrow_j1e_coef_ao2(exists) + endif + IRP_IF MPI_DEBUG + print *, irp_here, mpi_rank + call MPI_BARRIER(MPI_COMM_WORLD, ierr) + IRP_ENDIF + IRP_IF MPI + include 'mpif.h' + call MPI_BCAST(coef_fit2, (ao_num*ao_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read j1e_coef_ao2 with MPI' + endif + IRP_ENDIF + if(exists) then + if(mpi_master) then + write(6,'(A)') '.. >>>>> [ IO READ: j1e_coef_ao2 ] <<<<< ..' + call ezfio_get_jastrow_j1e_coef_ao2(coef_fit2) + IRP_IF MPI + call MPI_BCAST(coef_fit2, (ao_num*ao_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read j1e_coef_ao2 with MPI' + endif + IRP_ENDIF + endif + else + call get_j1e_coef_fit_ao2(ao_num, coef_fit2) + call ezfio_set_jastrow_j1e_coef_ao2(coef_fit2) + endif + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint, c) & + !$OMP SHARED (n_points_final_grid, ao_num, & + !$OMP aos_grad_in_r_array, coef_fit2, & + !$OMP aos_in_r_array, j1e_gradx, j1e_grady, j1e_gradz) + !$OMP DO SCHEDULE (static) + do ipoint = 1, n_points_final_grid + + j1e_gradx(ipoint) = 0.d0 + j1e_grady(ipoint) = 0.d0 + j1e_gradz(ipoint) = 0.d0 + + do i = 1, ao_num + do j = 1, ao_num + c = coef_fit2(j,i) + + j1e_gradx(ipoint) += c * (aos_in_r_array(i,ipoint) * aos_grad_in_r_array(j,ipoint,1) + aos_grad_in_r_array(i,ipoint,1) * aos_in_r_array(j,ipoint)) + j1e_grady(ipoint) += c * (aos_in_r_array(i,ipoint) * aos_grad_in_r_array(j,ipoint,2) + aos_grad_in_r_array(i,ipoint,2) * aos_in_r_array(j,ipoint)) + j1e_gradz(ipoint) += c * (aos_in_r_array(i,ipoint) * aos_grad_in_r_array(j,ipoint,3) + aos_grad_in_r_array(i,ipoint,3) * aos_in_r_array(j,ipoint)) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(coef_fit2) + + else + + print *, ' Error in j1e_grad: Unknown j1e_type = ', j1e_type + stop + + endif + + call wall_time(time1) + print*, ' Wall time for j1e_grad (min) = ', (time1 - time0) / 60.d0 + call print_memory_usage() + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, j1e_lapl, (n_points_final_grid)] + + implicit none + integer :: ipoint, i, j, p + double precision :: x, y, z, dx, dy, dz, d2 + double precision :: a, c, g, tmp + + if(j1e_type .eq. "None") then + + j1e_lapl = 0.d0 + + elseif(j1e_type .eq. "Gauss") then + + ! - \sum_{A} (r - R_A) \sum_p c_{p_A} \exp(-\alpha_{p_A} (r - R_A)^2) + + PROVIDE j1e_size j1e_coef j1e_expo + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + tmp = 0.d0 + do j = 1, nucl_num + + dx = x - nucl_coord(j,1) + dy = y - nucl_coord(j,2) + dz = z - nucl_coord(j,3) + d2 = dx*dx + dy*dy + dz*dz + + do p = 1, j1e_size + + c = j1e_coef(p,j) + a = j1e_expo(p,j) + g = c * a * dexp(-a*d2) + + tmp = tmp + (2.d0 * a * d2 - 3.d0) * g + enddo + enddo + + j1e_lapl(ipoint) = tmp + enddo + + else + + print *, ' Error in j1e_lapl: Unknown j1e_type = ', j1e_type + stop + + endif + +END_PROVIDER + +! --- + diff --git a/plugins/local/non_h_ints_mu/jast_1e_utils.irp.f b/plugins/local/non_h_ints_mu/jast_1e_utils.irp.f new file mode 100644 index 00000000..9cfabf58 --- /dev/null +++ b/plugins/local/non_h_ints_mu/jast_1e_utils.irp.f @@ -0,0 +1,436 @@ + +! --- + +subroutine get_j1e_coef_fit_ao(dim_fit, coef_fit) + + implicit none + integer , intent(in) :: dim_fit + double precision, intent(out) :: coef_fit(dim_fit) + + integer :: i, ipoint + double precision :: g + double precision :: t0, t1 + double precision, allocatable :: A(:,:), b(:), A_inv(:,:) + double precision, allocatable :: Pa(:,:), Pb(:,:), Pt(:,:) + double precision, allocatable :: u1e_tmp(:) + + + PROVIDE j1e_type + PROVIDE int2_u2e_ao + PROVIDE elec_alpha_num elec_beta_num elec_num + PROVIDE mo_coef + PROVIDE ao_overlap + + call wall_time(t0) + print*, ' PROVIDING the representation of 1e-Jastrow in AOs ... ' + + ! --- --- --- + ! get u1e(r) + + allocate(Pa(ao_num,ao_num), Pb(ao_num,ao_num), Pt(ao_num,ao_num)) + + call dgemm( 'N', 'T', ao_num, ao_num, elec_alpha_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pa, size(Pa, 1)) + + if(elec_alpha_num .eq. elec_beta_num) then + Pb = Pa + else + call dgemm( 'N', 'T', ao_num, ao_num, elec_beta_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pb, size(Pb, 1)) + endif + Pt = Pa + Pb + + allocate(u1e_tmp(n_points_final_grid)) + + g = -0.5d0 * (dble(elec_num) - 1.d0) / dble(elec_num) + call dgemv("T", ao_num*ao_num, n_points_final_grid, g, int2_u2e_ao, ao_num*ao_num, Pt, 1, 0.d0, u1e_tmp, 1) + + FREE int2_u2e_ao + + deallocate(Pa, Pb, Pt) + + ! --- --- --- + ! get A & b + + allocate(A(ao_num,ao_num), b(ao_num)) + + A(1:ao_num,1:ao_num) = ao_overlap(1:ao_num,1:ao_num) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, ipoint) & + !$OMP SHARED (n_points_final_grid, ao_num, & + !$OMP final_weight_at_r_vector, aos_in_r_array_transp, u1e_tmp, b) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + b(i) = 0.d0 + do ipoint = 1, n_points_final_grid + b(i) = b(i) + final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * u1e_tmp(ipoint) + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(u1e_tmp) + + ! --- --- --- + ! solve Ax = b + + allocate(A_inv(ao_num,ao_num)) + call get_inverse(A, ao_num, ao_num, A_inv, ao_num) + + ! coef_fit = A_inv x b + call dgemv("N", ao_num, ao_num, 1.d0, A_inv, ao_num, b, 1, 0.d0, coef_fit, 1) + + integer :: j + double precision :: tmp, acc, nrm + + acc = 0.d0 + nrm = 0.d0 + print *, ' check A_inv' + do i = 1, ao_num + tmp = 0.d0 + do j = 1, ao_num + tmp += ao_overlap(i,j) * coef_fit(j) + enddo + tmp = tmp - b(i) + if(dabs(tmp) .gt. 1d-8) then + print*, ' problem found in fitting 1e-Jastrow' + print*, i, tmp + endif + + acc += dabs(tmp) + nrm += dabs(b(i)) + enddo + print *, ' Relative Error (%) =', 100.d0*acc/nrm + + deallocate(A, A_inv, b) + + call wall_time(t1) + print*, ' END after (min) ', (t1-t0)/60.d0 + + return +end + +! --- + +subroutine get_j1e_coef_fit_ao2(dim_fit, coef_fit) + + implicit none + integer , intent(in) :: dim_fit + double precision, intent(out) :: coef_fit(dim_fit,dim_fit) + + integer :: i, j, k, l, ipoint + integer :: ij, kl, mn + integer :: info, n_svd, LWORK + double precision :: g + double precision :: t0, t1, svd_t0, svd_t1 + double precision :: cutoff_svd, D1_inv + double precision, allocatable :: diff(:) + double precision, allocatable :: A(:,:,:,:), b(:), A_tmp(:,:,:,:) + double precision, allocatable :: Pa(:,:), Pb(:,:), Pt(:,:) + double precision, allocatable :: u1e_tmp(:), tmp(:,:,:) + double precision, allocatable :: U(:,:), D(:), Vt(:,:), work(:) + + + PROVIDE j1e_type + PROVIDE int2_u2e_ao + PROVIDE elec_alpha_num elec_beta_num elec_num + PROVIDE mo_coef + + + cutoff_svd = 1d-10 + + call wall_time(t0) + print*, ' PROVIDING the representation of 1e-Jastrow in AOs x AOs ... ' + + ! --- --- --- + ! get u1e(r) + + allocate(Pa(ao_num,ao_num), Pb(ao_num,ao_num), Pt(ao_num,ao_num)) + + call dgemm( 'N', 'T', ao_num, ao_num, elec_alpha_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pa, size(Pa, 1)) + + if(elec_alpha_num .eq. elec_beta_num) then + Pb = Pa + else + call dgemm( 'N', 'T', ao_num, ao_num, elec_beta_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pb, size(Pb, 1)) + endif + Pt = Pa + Pb + + allocate(u1e_tmp(n_points_final_grid)) + + g = -0.5d0 * (dble(elec_num) - 1.d0) / dble(elec_num) + call dgemv("T", ao_num*ao_num, n_points_final_grid, g, int2_u2e_ao, ao_num*ao_num, Pt, 1, 0.d0, u1e_tmp, 1) + + FREE int2_u2e_ao + + deallocate(Pa, Pb, Pt) + + ! --- --- --- + ! get A + + allocate(tmp(n_points_final_grid,ao_num,ao_num)) + allocate(A(ao_num,ao_num,ao_num,ao_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (n_points_final_grid, ao_num, final_weight_at_r_vector, aos_in_r_array_transp, tmp) + !$OMP DO COLLAPSE(2) + do j = 1, ao_num + do i = 1, ao_num + do ipoint = 1, n_points_final_grid + tmp(ipoint,i,j) = dsqrt(final_weight_at_r_vector(ipoint)) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call dgemm( "T", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , tmp(1,1,1), n_points_final_grid, tmp(1,1,1), n_points_final_grid & + , 0.d0, A(1,1,1,1), ao_num*ao_num) + + allocate(A_tmp(ao_num,ao_num,ao_num,ao_num)) + A_tmp = A + + ! --- --- --- + ! get b + + allocate(b(ao_num*ao_num)) + + do ipoint = 1, n_points_final_grid + u1e_tmp(ipoint) = dsqrt(final_weight_at_r_vector(ipoint)) * u1e_tmp(ipoint) + enddo + + call dgemv("T", n_points_final_grid, ao_num*ao_num, 1.d0, tmp(1,1,1), n_points_final_grid, u1e_tmp(1), 1, 0.d0, b(1), 1) + + deallocate(u1e_tmp) + deallocate(tmp) + + ! --- --- --- + ! solve Ax = b + + allocate(D(ao_num*ao_num), U(ao_num*ao_num,ao_num*ao_num), Vt(ao_num*ao_num,ao_num*ao_num)) + + call wall_time(svd_t0) + + allocate(work(1)) + lwork = -1 + call dgesvd( 'S', 'A', ao_num*ao_num, ao_num*ao_num, A(1,1,1,1), ao_num*ao_num & + , D(1), U(1,1), ao_num*ao_num, Vt(1,1), ao_num*ao_num, work, lwork, info) + if(info /= 0) then + print *, info, ': SVD failed' + stop + endif + + LWORK = max(5*ao_num*ao_num, int(WORK(1))) + deallocate(work) + allocate(work(lwork)) + call dgesvd( 'S', 'A', ao_num*ao_num, ao_num*ao_num, A(1,1,1,1), ao_num*ao_num & + , D(1), U(1,1), ao_num*ao_num, Vt(1,1), ao_num*ao_num, work, lwork, info) + if(info /= 0) then + print *, info, ':: SVD failed' + stop 1 + endif + + deallocate(work) + + call wall_time(svd_t1) + print*, ' SVD time (min) ', (svd_t1-svd_t0)/60.d0 + + if(D(1) .lt. 1d-14) then + print*, ' largest singular value is very small:', D(1) + n_svd = 1 + else + n_svd = 0 + D1_inv = 1.d0 / D(1) + do ij = 1, ao_num*ao_num + if(D(ij)*D1_inv > cutoff_svd) then + D(ij) = 1.d0 / D(ij) + n_svd = n_svd + 1 + else + D(ij) = 0.d0 + endif + enddo + endif + print*, ' n_svd = ', n_svd + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ij, kl) & + !$OMP SHARED (ao_num, n_svd, D, Vt) + !$OMP DO + do kl = 1, ao_num*ao_num + do ij = 1, n_svd + Vt(ij,kl) = Vt(ij,kl) * D(ij) + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + ! A = A_inv + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_svd, 1.d0 & + , U(1,1), ao_num*ao_num, Vt(1,1), ao_num*ao_num & + , 0.d0, A(1,1,1,1), ao_num*ao_num) + + deallocate(D, U, Vt) + + + ! --- + + ! coef_fit = A_inv x b + call dgemv("N", ao_num*ao_num, ao_num*ao_num, 1.d0, A(1,1,1,1), ao_num*ao_num, b(1), 1, 0.d0, coef_fit(1,1), 1) + + ! --- + + allocate(diff(ao_num*ao_num)) + + call dgemv("N", ao_num*ao_num, ao_num*ao_num, 1.d0, A_tmp(1,1,1,1), ao_num*ao_num, coef_fit(1,1), 1, 0.d0, diff(1), 1) + print*, ' accu total on Ax = b (%) = ', 100.d0*sum(dabs(diff-b))/sum(dabs(b)) + + deallocate(diff) + deallocate(A_tmp) + + ! --- + + deallocate(A, b) + + call wall_time(t1) + print*, ' END after (min) ', (t1-t0)/60.d0 + + return +end + +! --- + +subroutine get_j1e_coef_fit_ao3(dim_fit, coef_fit) + + implicit none + integer , intent(in) :: dim_fit + double precision, intent(out) :: coef_fit(dim_fit,3) + + integer :: i, d, ipoint + double precision :: g + double precision :: t0, t1 + double precision, allocatable :: A(:,:), b(:,:), A_inv(:,:) + double precision, allocatable :: Pa(:,:), Pb(:,:), Pt(:,:) + double precision, allocatable :: u1e_tmp(:,:) + + + PROVIDE j1e_type + PROVIDE int2_grad1_u2e_ao + PROVIDE elec_alpha_num elec_beta_num elec_num + PROVIDE mo_coef + PROVIDE ao_overlap + + call wall_time(t0) + print*, ' PROVIDING the representation of 1e-Jastrow in AOs ... ' + + ! --- --- --- + ! get u1e(r) + + allocate(Pa(ao_num,ao_num), Pb(ao_num,ao_num), Pt(ao_num,ao_num)) + + call dgemm( 'N', 'T', ao_num, ao_num, elec_alpha_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pa, size(Pa, 1)) + + if(elec_alpha_num .eq. elec_beta_num) then + Pb = Pa + else + call dgemm( 'N', 'T', ao_num, ao_num, elec_beta_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pb, size(Pb, 1)) + endif + Pt = Pa + Pb + + allocate(u1e_tmp(n_points_final_grid,3)) + + g = -0.5d0 * (dble(elec_num) - 1.d0) / dble(elec_num) + do d = 1, 3 + call dgemv("T", ao_num*ao_num, n_points_final_grid, g, int2_grad1_u2e_ao(1,1,1,d), ao_num*ao_num, Pt, 1, 0.d0, u1e_tmp(1,d), 1) + enddo + + deallocate(Pa, Pb, Pt) + + ! --- --- --- + ! get A & b + + allocate(A(ao_num,ao_num), b(ao_num,3)) + + A(1:ao_num,1:ao_num) = ao_overlap(1:ao_num,1:ao_num) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, ipoint) & + !$OMP SHARED (n_points_final_grid, ao_num, & + !$OMP final_weight_at_r_vector, aos_in_r_array_transp, u1e_tmp, b) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + b(i,1) = 0.d0 + b(i,2) = 0.d0 + b(i,3) = 0.d0 + do ipoint = 1, n_points_final_grid + b(i,1) = b(i,1) + final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * u1e_tmp(ipoint,1) + b(i,2) = b(i,2) + final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * u1e_tmp(ipoint,2) + b(i,3) = b(i,3) + final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * u1e_tmp(ipoint,3) + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(u1e_tmp) + + ! --- --- --- + ! solve Ax = b + + allocate(A_inv(ao_num,ao_num)) + call get_inverse(A, ao_num, ao_num, A_inv, ao_num) + + ! coef_fit = A_inv x b + do d = 1, 3 + call dgemv("N", ao_num, ao_num, 1.d0, A_inv, ao_num, b(1,d), 1, 0.d0, coef_fit(1,d), 1) + enddo + + integer :: j + double precision :: tmp, acc, nrm + + acc = 0.d0 + nrm = 0.d0 + print *, ' check A_inv' + do d = 1, 3 + do i = 1, ao_num + tmp = 0.d0 + do j = 1, ao_num + tmp += ao_overlap(i,j) * coef_fit(j,d) + enddo + tmp = tmp - b(i,d) + if(dabs(tmp) .gt. 1d-8) then + print*, ' problem found in fitting 1e-Jastrow' + print*, d, i, tmp + endif + + acc += dabs(tmp) + nrm += dabs(b(i,d)) + enddo + enddo + print *, ' Relative Error (%) =', 100.d0*acc/nrm + + deallocate(A, A_inv, b) + + call wall_time(t1) + print*, ' END after (min) ', (t1-t0)/60.d0 + + return +end + +! --- + diff --git a/plugins/local/non_h_ints_mu/jast_2e_utils.irp.f b/plugins/local/non_h_ints_mu/jast_2e_utils.irp.f new file mode 100644 index 00000000..34d01fb2 --- /dev/null +++ b/plugins/local/non_h_ints_mu/jast_2e_utils.irp.f @@ -0,0 +1,393 @@ + +! --- + +BEGIN_PROVIDER [double precision, int2_u2e_ao, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! int2_u2e_ao(i,j,ipoint,:) = \int dr2 J_2e(r1,r2) \phi_i(r2) \phi_j(r2) + ! + ! where r1 = r(ipoint) + ! + END_DOC + + implicit none + integer :: ipoint, i, j, jpoint + integer :: n_blocks, n_rest, n_pass + integer :: i_blocks, i_rest, i_pass, ii + double precision :: mem, n_double + double precision :: time0, time1 + double precision :: x, y, z, r2 + double precision :: dx, dy, dz + double precision :: tmp_ct + double precision :: tmp0, tmp1, tmp2, tmp3 + double precision, allocatable :: tmp(:,:,:) + double precision, allocatable :: tmp_u12(:,:) + + PROVIDE j2e_type + PROVIDE Env_type + + call wall_time(time0) + print*, ' providing int2_u2e_ao ...' + + if(tc_integ_type .eq. "numeric") then + + PROVIDE final_weight_at_r_vector_extra aos_in_r_array_extra + + allocate(tmp(n_points_extra_final_grid,ao_num,ao_num)) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (j, i, jpoint) & + !$OMP SHARED (tmp, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) + !$OMP DO SCHEDULE (static) + do j = 1, ao_num + do i = 1, ao_num + do jpoint = 1, n_points_extra_final_grid + tmp(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call total_memory(mem) + mem = max(1.d0, qp_max_mem - mem) + n_double = mem * 1.d8 + n_blocks = int(min(n_double / (n_points_extra_final_grid * 1.d0), 1.d0*n_points_final_grid)) + n_rest = int(mod(n_points_final_grid, n_blocks)) + n_pass = int((n_points_final_grid - n_rest) / n_blocks) + + call write_int(6, n_pass, 'Number of passes') + call write_int(6, n_blocks, 'Size of the blocks') + call write_int(6, n_rest, 'Size of the last block') + + allocate(tmp_u12(n_points_extra_final_grid,n_blocks)) + + do i_pass = 1, n_pass + ii = (i_pass-1)*n_blocks + 1 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i_blocks, ipoint) & + !$OMP SHARED (n_blocks, n_points_extra_final_grid, ii, & + !$OMP final_grid_points, tmp_u12) + !$OMP DO + do i_blocks = 1, n_blocks + ipoint = ii - 1 + i_blocks ! r1 + call get_u12_2e_r1_seq(ipoint, n_points_extra_final_grid, tmp_u12(1,i_blocks)) + enddo + !$OMP END DO + !$OMP END PARALLEL + + call dgemm( "T", "N", ao_num*ao_num, n_blocks, n_points_extra_final_grid, 1.d0 & + , tmp(1,1,1), n_points_extra_final_grid, tmp_u12(1,1), n_points_extra_final_grid & + , 0.d0, int2_u2e_ao(1,1,ii), ao_num*ao_num) + enddo + + deallocate(tmp_u12) + + if(n_rest .gt. 0) then + + allocate(tmp_u12(n_points_extra_final_grid,n_rest)) + + ii = n_pass*n_blocks + 1 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i_rest, ipoint) & + !$OMP SHARED (n_rest, n_points_extra_final_grid, ii, & + !$OMP final_grid_points, tmp_u12) + !$OMP DO + do i_rest = 1, n_rest + ipoint = ii - 1 + i_rest ! r1 + call get_u12_2e_r1_seq(ipoint, n_points_extra_final_grid, tmp_u12(1,i_rest)) + enddo + !$OMP END DO + !$OMP END PARALLEL + + call dgemm( "T", "N", ao_num*ao_num, n_rest, n_points_extra_final_grid, 1.d0 & + , tmp(1,1,1), n_points_extra_final_grid, tmp_u12(1,1), n_points_extra_final_grid & + , 0.d0, int2_u2e_ao(1,1,ii), ao_num*ao_num) + + deallocate(tmp_u12) + endif + + deallocate(tmp) + + elseif(tc_integ_type .eq. "semi-analytic") then + + if( (j2e_type .eq. "Mu") .and. & + ( (env_type .eq. "None") .or. (env_type .eq. "Prod_Gauss") .or. (env_type .eq. "Sum_Gauss") ) ) then + + PROVIDE mu_erf + PROVIDE env_type env_val + PROVIDE Ir2_Mu_long_Du_0 Ir2_Mu_long_Du_x Ir2_Mu_long_Du_y Ir2_Mu_long_Du_z Ir2_Mu_long_Du_2 + PROVIDE Ir2_Mu_gauss_Du + + tmp_ct = 0.5d0 / (dsqrt(dacos(-1.d0)) * mu_erf) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, x, y, z, r2, dx, dy, dz, & + !$OMP tmp0, tmp1, tmp2, tmp3) & + !$OMP SHARED (ao_num, n_points_final_grid, final_grid_points, & + !$OMP tmp_ct, env_val, Ir2_Mu_long_Du_0, & + !$OMP Ir2_Mu_long_Du_x, Ir2_Mu_long_Du_y, & + !$OMP Ir2_Mu_long_Du_z, Ir2_Mu_gauss_Du, & + !$OMP Ir2_Mu_long_Du_2, int2_u2e_ao) + !$OMP DO SCHEDULE (static) + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + r2 = x*x + y*y + z*z + + dx = x * env_val(ipoint) + dy = y * env_val(ipoint) + dz = z * env_val(ipoint) + + tmp0 = 0.5d0 * env_val(ipoint) * r2 + tmp1 = 0.5d0 * env_val(ipoint) + tmp3 = tmp_ct * env_val(ipoint) + + do j = 1, ao_num + do i = 1, ao_num + + tmp2 = tmp1 * Ir2_Mu_long_Du_2(i,j,ipoint) - dx * Ir2_Mu_long_Du_x(i,j,ipoint) - dy * Ir2_Mu_long_Du_y(i,j,ipoint) - dz * Ir2_Mu_long_Du_z(i,j,ipoint) + + int2_u2e_ao(i,j,ipoint) = tmp0 * Ir2_Mu_long_Du_0(i,j,ipoint) + tmp2 - tmp3 * Ir2_Mu_gauss_Du(i,j,ipoint) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + else + + print *, ' Error in int2_u2e_ao: Unknown Jastrow' + stop + + endif ! j2e_type + + else + + print *, ' Error in int2_u2e_ao: Unknown tc_integ_type' + stop + + endif ! tc_integ_type + + call wall_time(time1) + print*, ' wall time for int2_u2e_ao (min) =', (time1-time0)/60.d0 + call print_memory_usage() + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, int2_grad1_u2e_ao, (ao_num, ao_num, n_points_final_grid, 3)] + + BEGIN_DOC + ! + ! int2_grad1_u2e_ao(i,j,ipoint,:) = \int dr2 [\grad_r1 J_2e(r1,r2)] \phi_i(r2) \phi_j(r2) + ! + ! where r1 = r(ipoint) + ! + END_DOC + + implicit none + integer :: ipoint, i, j, m, jpoint + integer :: n_blocks, n_rest, n_pass + integer :: i_blocks, i_rest, i_pass, ii + double precision :: mem, n_double + double precision :: time0, time1 + double precision :: x, y, z, r2 + double precision :: dx, dy, dz + double precision :: tmp_ct + double precision :: tmp0, tmp1, tmp2 + double precision :: tmp0_x, tmp0_y, tmp0_z + double precision :: tmp1_x, tmp1_y, tmp1_z + double precision, allocatable :: tmp(:,:,:) + double precision, allocatable :: tmp_grad1_u12(:,:,:) + + + PROVIDE j2e_type + PROVIDE Env_type + + call wall_time(time0) + print*, ' providing int2_grad1_u2e_ao ...' + + if(tc_integ_type .eq. "numeric") then + + PROVIDE final_weight_at_r_vector_extra aos_in_r_array_extra + + allocate(tmp(n_points_extra_final_grid,ao_num,ao_num)) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (j, i, jpoint) & + !$OMP SHARED (tmp, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) + !$OMP DO SCHEDULE (static) + do j = 1, ao_num + do i = 1, ao_num + do jpoint = 1, n_points_extra_final_grid + tmp(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call total_memory(mem) + mem = max(1.d0, qp_max_mem - mem) + n_double = mem * 1.d8 + n_blocks = int(min(n_double / (n_points_extra_final_grid * 3.d0), 1.d0*n_points_final_grid)) + n_rest = int(mod(n_points_final_grid, n_blocks)) + n_pass = int((n_points_final_grid - n_rest) / n_blocks) + + call write_int(6, n_pass, 'Number of passes') + call write_int(6, n_blocks, 'Size of the blocks') + call write_int(6, n_rest, 'Size of the last block') + + allocate(tmp_grad1_u12(n_points_extra_final_grid,n_blocks,3)) + + do i_pass = 1, n_pass + ii = (i_pass-1)*n_blocks + 1 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i_blocks, ipoint) & + !$OMP SHARED (n_blocks, n_points_extra_final_grid, ii, & + !$OMP final_grid_points, tmp_grad1_u12) + !$OMP DO + do i_blocks = 1, n_blocks + ipoint = ii - 1 + i_blocks ! r1 + call get_grad1_u12_2e_r1_seq(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,i_blocks,1) & + , tmp_grad1_u12(1,i_blocks,2) & + , tmp_grad1_u12(1,i_blocks,3)) + enddo + !$OMP END DO + !$OMP END PARALLEL + + do m = 1, 3 + call dgemm( "T", "N", ao_num*ao_num, n_blocks, n_points_extra_final_grid, 1.d0 & + , tmp(1,1,1), n_points_extra_final_grid, tmp_grad1_u12(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u2e_ao(1,1,ii,m), ao_num*ao_num) + enddo + enddo + + deallocate(tmp_grad1_u12) + + if(n_rest .gt. 0) then + + allocate(tmp_grad1_u12(n_points_extra_final_grid,n_rest,3)) + + ii = n_pass*n_blocks + 1 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i_rest, ipoint) & + !$OMP SHARED (n_rest, n_points_extra_final_grid, ii, & + !$OMP final_grid_points, tmp_grad1_u12) + !$OMP DO + do i_rest = 1, n_rest + ipoint = ii - 1 + i_rest ! r1 + call get_grad1_u12_2e_r1_seq(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,i_rest,1) & + , tmp_grad1_u12(1,i_rest,2) & + , tmp_grad1_u12(1,i_rest,3)) + enddo + !$OMP END DO + !$OMP END PARALLEL + + do m = 1, 3 + call dgemm( "T", "N", ao_num*ao_num, n_rest, n_points_extra_final_grid, 1.d0 & + , tmp(1,1,1), n_points_extra_final_grid, tmp_grad1_u12(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u2e_ao(1,1,ii,m), ao_num*ao_num) + enddo + + deallocate(tmp_grad1_u12) + endif + + deallocate(tmp) + + elseif(tc_integ_type .eq. "semi-analytic") then + + if( (j2e_type .eq. "Mu") .and. & + ( (env_type .eq. "None") .or. (env_type .eq. "Prod_Gauss") .or. (env_type .eq. "Sum_Gauss") ) ) then + + PROVIDE mu_erf + PROVIDE env_type env_val env_grad + PROVIDE Ir2_Mu_long_Du_0 Ir2_Mu_long_Du_x Ir2_Mu_long_Du_y Ir2_Mu_long_Du_z Ir2_Mu_long_Du_2 + PROVIDE Ir2_Mu_gauss_Du + + tmp_ct = 0.5d0 / (dsqrt(dacos(-1.d0)) * mu_erf) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, x, y, z, r2, dx, dy, dz, tmp1, tmp2, & + !$OMP tmp0_x, tmp0_y, tmp0_z, tmp1_x, tmp1_y, tmp1_z) & + !$OMP SHARED (ao_num, n_points_final_grid, final_grid_points, & + !$OMP tmp_ct, env_val, env_grad, Ir2_Mu_long_Du_0, & + !$OMP Ir2_Mu_long_Du_x, Ir2_Mu_long_Du_y, & + !$OMP Ir2_Mu_long_Du_z, Ir2_Mu_gauss_Du, & + !$OMP Ir2_Mu_long_Du_2, int2_grad1_u2e_ao) + !$OMP DO SCHEDULE (static) + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + r2 = x*x + y*y + z*z + + dx = env_grad(1,ipoint) + dy = env_grad(2,ipoint) + dz = env_grad(3,ipoint) + + tmp0_x = 0.5d0 * (env_val(ipoint) * x + r2 * dx) + tmp0_y = 0.5d0 * (env_val(ipoint) * y + r2 * dy) + tmp0_z = 0.5d0 * (env_val(ipoint) * z + r2 * dz) + + tmp1 = 0.5d0 * env_val(ipoint) + + tmp1_x = tmp_ct * dx + tmp1_y = tmp_ct * dy + tmp1_z = tmp_ct * dz + + do j = 1, ao_num + do i = 1, ao_num + + tmp2 = 0.5d0 * Ir2_Mu_long_Du_2(i,j,ipoint) - x * Ir2_Mu_long_Du_x(i,j,ipoint) - y * Ir2_Mu_long_Du_y(i,j,ipoint) - z * Ir2_Mu_long_Du_z(i,j,ipoint) + + int2_grad1_u2e_ao(i,j,ipoint,1) = Ir2_Mu_long_Du_0(i,j,ipoint) * tmp0_x - tmp1 * Ir2_Mu_long_Du_x(i,j,ipoint) + dx * tmp2 - tmp1_x * Ir2_Mu_gauss_Du(i,j,ipoint) + int2_grad1_u2e_ao(i,j,ipoint,2) = Ir2_Mu_long_Du_0(i,j,ipoint) * tmp0_y - tmp1 * Ir2_Mu_long_Du_y(i,j,ipoint) + dy * tmp2 - tmp1_y * Ir2_Mu_gauss_Du(i,j,ipoint) + int2_grad1_u2e_ao(i,j,ipoint,3) = Ir2_Mu_long_Du_0(i,j,ipoint) * tmp0_z - tmp1 * Ir2_Mu_long_Du_z(i,j,ipoint) + dz * tmp2 - tmp1_z * Ir2_Mu_gauss_Du(i,j,ipoint) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + FREE Ir2_Mu_long_Du_0 Ir2_Mu_long_Du_x Ir2_Mu_long_Du_y Ir2_Mu_long_Du_z Ir2_Mu_long_Du_2 + FREE Ir2_Mu_gauss_Du + + else + + print *, ' Error in int2_grad1_u2e_ao: Unknown Jastrow' + stop + + endif ! j2e_type + + else + + print *, ' Error in int2_grad1_u2e_ao: Unknown tc_integ_type' + stop + + endif ! tc_integ_type + + call wall_time(time1) + print*, ' wall time for int2_grad1_u2e_ao (min) =', (time1-time0)/60.d0 + call print_memory_usage() + +END_PROVIDER + +! --- + diff --git a/plugins/local/non_h_ints_mu/jast_deriv.irp.f b/plugins/local/non_h_ints_mu/jast_deriv.irp.f index 19b900da..9a430135 100644 --- a/plugins/local/non_h_ints_mu/jast_deriv.irp.f +++ b/plugins/local/non_h_ints_mu/jast_deriv.irp.f @@ -1,33 +1,27 @@ ! --- - BEGIN_PROVIDER [ double precision, grad1_u12_num, (n_points_extra_final_grid, n_points_final_grid, 3)] -&BEGIN_PROVIDER [ double precision, grad1_u12_squared_num, (n_points_extra_final_grid, n_points_final_grid)] + BEGIN_PROVIDER [double precision, grad1_u12_num, (n_points_extra_final_grid, n_points_final_grid, 3)] +&BEGIN_PROVIDER [double precision, grad1_u12_squared_num, (n_points_extra_final_grid, n_points_final_grid)] BEGIN_DOC ! + ! ! grad_1 u(r1,r2) - ! - ! this will be integrated numerically over r2: - ! we use grid for r1 and extra_grid for r2 - ! - ! for 99 < j1b_type < 199 - ! - ! u(r1,r2) = j12_mu(r12) x v(r1) x v(r2) - ! grad1 u(r1, r2) = [(grad1 j12_mu) v(r1) + j12_mu grad1 v(r1)] v(r2) + ! numerical integration over r1 & r2 ! END_DOC implicit none integer :: ipoint, jpoint double precision :: r1(3), r2(3) - double precision :: v1b_r1, v1b_r2, u2b_r12 - double precision :: grad1_v1b(3), grad1_u2b(3) + double precision :: v_r1, v_r2, u2b_r12 + double precision :: grad1_v(3), grad1_u2b(3) double precision :: dx, dy, dz double precision :: time0, time1 - double precision, external :: j12_mu, j1b_nucl + double precision, external :: j12_mu, env_nucl - PROVIDE j1b_type + PROVIDE env_type PROVIDE final_grid_points_extra print*, ' providing grad1_u12_num & grad1_u12_squared_num ...' @@ -36,12 +30,12 @@ grad1_u12_num = 0.d0 grad1_u12_squared_num = 0.d0 - if( (j1b_type .eq. 100) .or. & - (j1b_type .ge. 200) .and. (j1b_type .lt. 300) ) then + if( ((j2e_type .eq. "Mu") .and. (env_type .eq. "None")) .or. & + (j2e_type .eq. "Mur") ) then !$OMP PARALLEL & !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, jpoint, r1, r2, v1b_r1, v1b_r2, u2b_r12, grad1_v1b, grad1_u2b, dx, dy, dz) & + !$OMP PRIVATE (ipoint, jpoint, r1, r2, v_r1, v_r2, u2b_r12, grad1_v, grad1_u2b, dx, dy, dz) & !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, final_grid_points, & !$OMP final_grid_points_extra, grad1_u12_num, grad1_u12_squared_num) !$OMP DO SCHEDULE (static) @@ -57,7 +51,7 @@ r2(2) = final_grid_points_extra(2,jpoint) r2(3) = final_grid_points_extra(3,jpoint) - call grad1_j12_mu(r1, r2, grad1_u2b) + call grad1_j12_mu(r2, r1, grad1_u2b) dx = grad1_u2b(1) dy = grad1_u2b(2) @@ -73,14 +67,14 @@ !$OMP END DO !$OMP END PARALLEL - elseif((j1b_type .gt. 100) .and. (j1b_type .lt. 200)) then + elseif((j2e_type .eq. "Mu") .and. (env_type .ne. "None")) then PROVIDE final_grid_points - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, jpoint, r1, r2, v1b_r1, v1b_r2, u2b_r12, grad1_v1b, grad1_u2b, dx, dy, dz) & - !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, final_grid_points, & + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, jpoint, r1, r2, v_r1, v_r2, u2b_r12, grad1_v, grad1_u2b, dx, dy, dz) & + !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, final_grid_points, & !$OMP final_grid_points_extra, grad1_u12_num, grad1_u12_squared_num) !$OMP DO SCHEDULE (static) do ipoint = 1, n_points_final_grid ! r1 @@ -89,8 +83,8 @@ r1(2) = final_grid_points(2,ipoint) r1(3) = final_grid_points(3,ipoint) - v1b_r1 = j1b_nucl(r1) - call grad1_j1b_nucl(r1, grad1_v1b) + v_r1 = env_nucl(r1) + call grad1_env_nucl(r1, grad1_v) do jpoint = 1, n_points_extra_final_grid ! r2 @@ -98,13 +92,13 @@ r2(2) = final_grid_points_extra(2,jpoint) r2(3) = final_grid_points_extra(3,jpoint) - v1b_r2 = j1b_nucl(r2) + v_r2 = env_nucl(r2) u2b_r12 = j12_mu(r1, r2) - call grad1_j12_mu(r1, r2, grad1_u2b) + call grad1_j12_mu(r2, r1, grad1_u2b) - dx = (grad1_u2b(1) * v1b_r1 + u2b_r12 * grad1_v1b(1)) * v1b_r2 - dy = (grad1_u2b(2) * v1b_r1 + u2b_r12 * grad1_v1b(2)) * v1b_r2 - dz = (grad1_u2b(3) * v1b_r1 + u2b_r12 * grad1_v1b(3)) * v1b_r2 + dx = (grad1_u2b(1) * v_r1 + u2b_r12 * grad1_v(1)) * v_r2 + dy = (grad1_u2b(2) * v_r1 + u2b_r12 * grad1_v(2)) * v_r2 + dz = (grad1_u2b(3) * v_r1 + u2b_r12 * grad1_v(3)) * v_r2 grad1_u12_num(jpoint,ipoint,1) = dx grad1_u12_num(jpoint,ipoint,2) = dy @@ -116,7 +110,7 @@ !$OMP END DO !$OMP END PARALLEL - elseif (j1b_type .eq. 1000) then + elseif(j2e_type .eq. "Qmckl") then double precision :: f f = 1.d0 / dble(elec_num - 1) @@ -227,13 +221,13 @@ else - print *, ' j1b_type = ', j1b_type, 'not implemented yet' + print *, ' Error in grad1_u12_num & grad1_u12_squared_num: Unknown Jastrow' stop - endif + endif ! j2e_type call wall_time(time1) - print*, ' Wall time for grad1_u12_num & grad1_u12_squared_num (min) =', (time1-time0)/60.d0 + print*, ' Wall time for grad1_u12_num & grad1_u12_squared_num (min) = ', (time1-time0)/60.d0 END_PROVIDER diff --git a/plugins/local/non_h_ints_mu/jast_deriv_utils.irp.f b/plugins/local/non_h_ints_mu/jast_deriv_utils.irp.f index 745d00ad..79822508 100644 --- a/plugins/local/non_h_ints_mu/jast_deriv_utils.irp.f +++ b/plugins/local/non_h_ints_mu/jast_deriv_utils.irp.f @@ -9,7 +9,7 @@ double precision function j12_mu(r1, r2) double precision, intent(in) :: r1(3), r2(3) double precision :: mu_tmp, r12 - if((j1b_type .ge. 0) .and. (j1b_type .lt. 200)) then + if(j2e_type .eq. "Mu") then r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + (r1(2) - r2(2)) * (r1(2) - r2(2)) & @@ -20,13 +20,13 @@ double precision function j12_mu(r1, r2) else - print *, ' j1b_type = ', j1b_type, 'not implemented for j12_mu' + print *, ' Error in j12_mu: Unknown j2e_type = ', j2e_type stop - endif + endif ! j2e_type return -end function j12_mu +end ! --- @@ -36,11 +36,11 @@ subroutine grad1_j12_mu(r1, r2, grad) ! ! gradient of j(mu(r1,r2),r12) form of jastrow. ! - ! if mu(r1,r2) = cst ---> j1b_type < 200 and + ! if mu(r1,r2) = cst ---> ! ! d/dx1 j(mu,r12) = 0.5 * (1 - erf(mu *r12))/r12 * (x1 - x2) ! - ! if mu(r1,r2) /= cst ---> 200 < j1b_type < 300 and + ! if mu(r1,r2) /= cst ---> ! ! d/dx1 j(mu(r1,r2),r12) = exp(-(mu(r1,r2)*r12)**2) /(2 *sqrt(pi) * mu(r1,r2)**2 ) d/dx1 mu(r1,r2) ! + 0.5 * (1 - erf(mu(r1,r2) *r12))/r12 * (x1 - x2) @@ -53,10 +53,11 @@ subroutine grad1_j12_mu(r1, r2, grad) double precision, intent(in) :: r1(3), r2(3) double precision, intent(out) :: grad(3) double precision :: dx, dy, dz, r12, tmp + double precision :: mu_val, mu_tmp, mu_der(3) grad = 0.d0 - if((j1b_type .ge. 0) .and. (j1b_type .lt. 200)) then + if(j2e_type .eq. "Mu") then dx = r1(1) - r2(1) dy = r1(2) - r2(2) @@ -71,9 +72,7 @@ subroutine grad1_j12_mu(r1, r2, grad) grad(2) = tmp * dy grad(3) = tmp * dz - elseif((j1b_type .ge. 200) .and. (j1b_type .lt. 300)) then - - double precision :: mu_val, mu_tmp, mu_der(3) + elseif(j2e_type .eq. "Mur") then dx = r1(1) - r2(1) dy = r1(2) - r2(2) @@ -95,152 +94,153 @@ subroutine grad1_j12_mu(r1, r2, grad) else - print *, ' j1b_type = ', j1b_type, 'not implemented yet' + print *, ' Error in grad1_j12_mu: Unknown j2e_type = ', j2e_type stop - endif + endif ! j2e_type + grad = -grad return -end subroutine grad1_j12_mu +end ! --- -double precision function j1b_nucl(r) +double precision function env_nucl(r) implicit none double precision, intent(in) :: r(3) integer :: i double precision :: a, d, e, x, y, z - if((j1b_type .eq. 2) .or. (j1b_type .eq. 102)) then + if(env_type .eq. "Sum_Slat") then - j1b_nucl = 1.d0 + env_nucl = 1.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) d = ( (r(1) - nucl_coord(i,1)) * (r(1) - nucl_coord(i,1)) & + (r(2) - nucl_coord(i,2)) * (r(2) - nucl_coord(i,2)) & + (r(3) - nucl_coord(i,3)) * (r(3) - nucl_coord(i,3)) ) - j1b_nucl = j1b_nucl - dexp(-a*dsqrt(d)) + env_nucl = env_nucl - env_coef(i) * dexp(-a*dsqrt(d)) enddo - elseif((j1b_type .eq. 3) .or. (j1b_type .eq. 103)) then + elseif(env_type .eq. "Prod_Gauss") then - j1b_nucl = 1.d0 + env_nucl = 1.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) d = ( (r(1) - nucl_coord(i,1)) * (r(1) - nucl_coord(i,1)) & + (r(2) - nucl_coord(i,2)) * (r(2) - nucl_coord(i,2)) & + (r(3) - nucl_coord(i,3)) * (r(3) - nucl_coord(i,3)) ) e = 1.d0 - dexp(-a*d) - j1b_nucl = j1b_nucl * e + env_nucl = env_nucl * e enddo - elseif((j1b_type .eq. 4) .or. (j1b_type .eq. 104)) then + elseif(env_type .eq. "Sum_Gauss") then - j1b_nucl = 1.d0 + env_nucl = 1.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) d = ( (r(1) - nucl_coord(i,1)) * (r(1) - nucl_coord(i,1)) & + (r(2) - nucl_coord(i,2)) * (r(2) - nucl_coord(i,2)) & + (r(3) - nucl_coord(i,3)) * (r(3) - nucl_coord(i,3)) ) - j1b_nucl = j1b_nucl - j1b_pen_coef(i) * dexp(-a*d) + env_nucl = env_nucl - env_coef(i) * dexp(-a*d) enddo - elseif((j1b_type .eq. 5) .or. (j1b_type .eq. 105)) then + elseif(env_type .eq. "Sum_Quartic") then - j1b_nucl = 1.d0 + env_nucl = 1.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) x = r(1) - nucl_coord(i,1) y = r(2) - nucl_coord(i,2) z = r(3) - nucl_coord(i,3) d = x*x + y*y + z*z - j1b_nucl = j1b_nucl - dexp(-a*d*d) + env_nucl = env_nucl - env_coef(i) * dexp(-a*d*d) enddo else - print *, ' j1b_type = ', j1b_type, 'not implemented for j1b_nucl' + print *, ' Error in env_nucl: Unknown env_type = ', env_type stop endif return -end function j1b_nucl +end ! --- -double precision function j1b_nucl_square(r) +double precision function env_nucl_square(r) implicit none double precision, intent(in) :: r(3) integer :: i double precision :: a, d, e, x, y, z - if((j1b_type .eq. 2) .or. (j1b_type .eq. 102)) then + if(env_type .eq. "Sum_Slat") then - j1b_nucl_square = 1.d0 + env_nucl_square = 1.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) d = ( (r(1) - nucl_coord(i,1)) * (r(1) - nucl_coord(i,1)) & + (r(2) - nucl_coord(i,2)) * (r(2) - nucl_coord(i,2)) & + (r(3) - nucl_coord(i,3)) * (r(3) - nucl_coord(i,3)) ) - j1b_nucl_square = j1b_nucl_square - dexp(-a*dsqrt(d)) + env_nucl_square = env_nucl_square - env_coef(i) * dexp(-a*dsqrt(d)) enddo - j1b_nucl_square = j1b_nucl_square * j1b_nucl_square + env_nucl_square = env_nucl_square * env_nucl_square - elseif((j1b_type .eq. 3) .or. (j1b_type .eq. 103)) then + elseif(env_type .eq. "Prod_Gauss") then - j1b_nucl_square = 1.d0 + env_nucl_square = 1.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) d = ( (r(1) - nucl_coord(i,1)) * (r(1) - nucl_coord(i,1)) & + (r(2) - nucl_coord(i,2)) * (r(2) - nucl_coord(i,2)) & + (r(3) - nucl_coord(i,3)) * (r(3) - nucl_coord(i,3)) ) e = 1.d0 - dexp(-a*d) - j1b_nucl_square = j1b_nucl_square * e + env_nucl_square = env_nucl_square * e enddo - j1b_nucl_square = j1b_nucl_square * j1b_nucl_square + env_nucl_square = env_nucl_square * env_nucl_square - elseif((j1b_type .eq. 4) .or. (j1b_type .eq. 104)) then + elseif(env_type .eq. "Sum_Gauss") then - j1b_nucl_square = 1.d0 + env_nucl_square = 1.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) d = ( (r(1) - nucl_coord(i,1)) * (r(1) - nucl_coord(i,1)) & + (r(2) - nucl_coord(i,2)) * (r(2) - nucl_coord(i,2)) & + (r(3) - nucl_coord(i,3)) * (r(3) - nucl_coord(i,3)) ) - j1b_nucl_square = j1b_nucl_square - j1b_pen_coef(i) * dexp(-a*d) + env_nucl_square = env_nucl_square - env_coef(i) * dexp(-a*d) enddo - j1b_nucl_square = j1b_nucl_square * j1b_nucl_square + env_nucl_square = env_nucl_square * env_nucl_square - elseif((j1b_type .eq. 5) .or. (j1b_type .eq. 105)) then + elseif(env_type .eq. "Sum_Quartic") then - j1b_nucl_square = 1.d0 + env_nucl_square = 1.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) x = r(1) - nucl_coord(i,1) y = r(2) - nucl_coord(i,2) z = r(3) - nucl_coord(i,3) d = x*x + y*y + z*z - j1b_nucl_square = j1b_nucl_square - dexp(-a*d*d) + env_nucl_square = env_nucl_square - env_coef(i) * dexp(-a*d*d) enddo - j1b_nucl_square = j1b_nucl_square * j1b_nucl_square + env_nucl_square = env_nucl_square * env_nucl_square else - print *, ' j1b_type = ', j1b_type, 'not implemented for j1b_nucl_square' + print *, ' Error in env_nucl_square: Unknown env_type = ', env_type stop endif return -end function j1b_nucl_square +end ! --- -subroutine grad1_j1b_nucl(r, grad) +subroutine grad1_env_nucl(r, grad) implicit none double precision, intent(in) :: r(3) @@ -251,18 +251,18 @@ subroutine grad1_j1b_nucl(r, grad) double precision :: fact_x, fact_y, fact_z double precision :: ax_der, ay_der, az_der, a_expo - if((j1b_type .eq. 2) .or. (j1b_type .eq. 102)) then + if(env_type .eq. "Sum_Slat") then fact_x = 0.d0 fact_y = 0.d0 fact_z = 0.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) x = r(1) - nucl_coord(i,1) y = r(2) - nucl_coord(i,2) z = r(3) - nucl_coord(i,3) d = dsqrt(x*x + y*y + z*z) - e = a * dexp(-a*d) / d + e = a * env_coef(i) * dexp(-a*d) / d fact_x += e * x fact_y += e * y @@ -273,7 +273,7 @@ subroutine grad1_j1b_nucl(r, grad) grad(2) = fact_y grad(3) = fact_z - elseif((j1b_type .eq. 3) .or. (j1b_type .eq. 103)) then + elseif(env_type .eq. "Prod_Gauss") then x = r(1) y = r(2) @@ -282,7 +282,7 @@ subroutine grad1_j1b_nucl(r, grad) fact_x = 0.d0 fact_y = 0.d0 fact_z = 0.d0 - do i = 1, List_all_comb_b2_size + do i = 1, List_env1s_size phase = 0 a_expo = 0.d0 @@ -290,12 +290,12 @@ subroutine grad1_j1b_nucl(r, grad) ay_der = 0.d0 az_der = 0.d0 do j = 1, nucl_num - a = dble(List_all_comb_b2(j,i)) * j1b_pen(j) + a = dble(List_env1s(j,i)) * env_expo(j) dx = x - nucl_coord(j,1) dy = y - nucl_coord(j,2) dz = z - nucl_coord(j,3) - phase += List_all_comb_b2(j,i) + phase += List_env1s(j,i) a_expo += a * (dx*dx + dy*dy + dz*dz) ax_der += a * dx ay_der += a * dy @@ -312,18 +312,18 @@ subroutine grad1_j1b_nucl(r, grad) grad(2) = fact_y grad(3) = fact_z - elseif((j1b_type .eq. 4) .or. (j1b_type .eq. 104)) then + elseif(env_type .eq. "Sum_Gauss") then fact_x = 0.d0 fact_y = 0.d0 fact_z = 0.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) x = r(1) - nucl_coord(i,1) y = r(2) - nucl_coord(i,2) z = r(3) - nucl_coord(i,3) d = x*x + y*y + z*z - e = a * j1b_pen_coef(i) * dexp(-a*d) + e = a * env_coef(i) * dexp(-a*d) fact_x += e * x fact_y += e * y @@ -334,18 +334,18 @@ subroutine grad1_j1b_nucl(r, grad) grad(2) = 2.d0 * fact_y grad(3) = 2.d0 * fact_z - elseif((j1b_type .eq. 5) .or. (j1b_type .eq. 105)) then + elseif(env_type .eq. "Sum_Quartic") then fact_x = 0.d0 fact_y = 0.d0 fact_z = 0.d0 do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) x = r(1) - nucl_coord(i,1) y = r(2) - nucl_coord(i,2) z = r(3) - nucl_coord(i,3) d = x*x + y*y + z*z - e = a * d * dexp(-a*d*d) + e = a * env_coef(i) * d * dexp(-a*d*d) fact_x += e * x fact_y += e * y @@ -358,13 +358,13 @@ subroutine grad1_j1b_nucl(r, grad) else - print *, ' j1b_type = ', j1b_type, 'not implemented for grad1_j1b_nucl' + print *, ' Error in grad1_env_nucl: Unknown env_type = ', env_type stop endif return -end subroutine grad1_j1b_nucl +end ! --- @@ -380,7 +380,10 @@ subroutine mu_r_val_and_grad(r1, r2, mu_val, mu_der) double precision :: f_rho1, f_rho2, d_drho_f_rho1 double precision :: d_dx1_f_rho1(3),d_dx_rho_f_rho(3),nume - if(j1b_type .eq. 200) then + PROVIDE murho_type + PROVIDE mu_r_ct mu_erf + + if(murho_type .eq. 1) then ! ! r = 0.5 (r1 + r2) @@ -391,8 +394,6 @@ subroutine mu_r_val_and_grad(r1, r2, mu_val, mu_der) ! d mu[rho(r)] / dx = [0.5 alpha / sqrt(rho) - mu0 exp(-rho)] (d rho(r) / dx) ! - PROVIDE mu_r_ct mu_erf - r(1) = 0.5d0 * (r1(1) + r2(1)) r(2) = 0.5d0 * (r1(2) + r2(2)) r(3) = 0.5d0 * (r1(3) + r2(3)) @@ -413,7 +414,7 @@ subroutine mu_r_val_and_grad(r1, r2, mu_val, mu_der) mu_der(2) = tmp3 * (grad_dm_a(2,1) + grad_dm_b(2,1)) mu_der(3) = tmp3 * (grad_dm_a(3,1) + grad_dm_b(3,1)) - elseif(j1b_type .eq. 201) then + elseif(murho_type .eq. 2) then ! ! r = 0.5 (r1 + r2) @@ -424,8 +425,6 @@ subroutine mu_r_val_and_grad(r1, r2, mu_val, mu_der) ! d mu[rho(r)] / dx = [0.5 alpha / sqrt(rho) - mu0 exp(-rho)] (d rho(r) / dx) ! - PROVIDE mu_r_ct mu_erf - r(1) = 0.5d0 * (r1(1) + r2(1)) r(2) = 0.5d0 * (r1(2) + r2(2)) r(3) = 0.5d0 * (r1(3) + r2(3)) @@ -442,7 +441,7 @@ subroutine mu_r_val_and_grad(r1, r2, mu_val, mu_der) mu_der(2) = tmp3 * (grad_dm_a(2,1) + grad_dm_b(2,1)) mu_der(3) = tmp3 * (grad_dm_a(3,1) + grad_dm_b(3,1)) - elseif(j1b_type .eq. 202) then + elseif(murho_type .eq. 3) then ! mu(r1,r2) = {rho(r1) f[rho(r1)] + rho(r2) f[rho(r2)]} / RHO ! @@ -469,7 +468,8 @@ subroutine mu_r_val_and_grad(r1, r2, mu_val, mu_der) nume = rho1 * f_rho1 + rho2 * f_rho2 mu_val = nume * inv_rho_tot mu_der(1:3) = inv_rho_tot*inv_rho_tot * (rho_tot * d_dx_rho_f_rho(1:3) - grad_rho1(1:3) * nume) - elseif(j1b_type .eq. 203) then + + elseif(murho_type .eq. 4) then ! mu(r1,r2) = {rho(r1) f[rho(r1)] + rho(r2) f[rho(r2)]} / RHO ! @@ -503,7 +503,8 @@ subroutine mu_r_val_and_grad(r1, r2, mu_val, mu_der) nume = rho1 * f_rho1 + rho2 * f_rho2 mu_val = nume * inv_rho_tot mu_der(1:3) = inv_rho_tot*inv_rho_tot * (rho_tot * d_dx_rho_f_rho(1:3) - grad_rho1(1:3) * nume) - elseif(j1b_type .eq. 204) then + + elseif(murho_type .eq. 5) then ! mu(r1,r2) = 1/2 * (f[rho(r1)] + f[rho(r2)]} ! @@ -535,23 +536,24 @@ subroutine mu_r_val_and_grad(r1, r2, mu_val, mu_der) mu_der(1:3) = inv_rho_tot*inv_rho_tot * (rho_tot * d_dx_rho_f_rho(1:3) - grad_rho1(1:3) * nume) else - print *, ' j1b_type = ', j1b_type, 'not implemented yet' + + print *, ' Error in mu_r_val_and_grad: Unknown env_type = ', env_type stop endif return -end subroutine mu_r_val_and_grad +end ! --- -subroutine grad1_j1b_nucl_square_num(r1, grad) +subroutine grad1_env_nucl_square_num(r1, grad) implicit none double precision, intent(in) :: r1(3) double precision, intent(out) :: grad(3) double precision :: r(3), eps, tmp_eps, vp, vm - double precision, external :: j1b_nucl_square + double precision, external :: env_nucl_square eps = 1d-5 tmp_eps = 0.5d0 / eps @@ -559,28 +561,28 @@ subroutine grad1_j1b_nucl_square_num(r1, grad) r(1:3) = r1(1:3) r(1) = r(1) + eps - vp = j1b_nucl_square(r) + vp = env_nucl_square(r) r(1) = r(1) - 2.d0 * eps - vm = j1b_nucl_square(r) + vm = env_nucl_square(r) r(1) = r(1) + eps grad(1) = tmp_eps * (vp - vm) r(2) = r(2) + eps - vp = j1b_nucl_square(r) + vp = env_nucl_square(r) r(2) = r(2) - 2.d0 * eps - vm = j1b_nucl_square(r) + vm = env_nucl_square(r) r(2) = r(2) + eps grad(2) = tmp_eps * (vp - vm) r(3) = r(3) + eps - vp = j1b_nucl_square(r) + vp = env_nucl_square(r) r(3) = r(3) - 2.d0 * eps - vm = j1b_nucl_square(r) + vm = env_nucl_square(r) r(3) = r(3) + eps grad(3) = tmp_eps * (vp - vm) return -end subroutine grad1_j1b_nucl_square_num +end ! --- @@ -622,7 +624,7 @@ subroutine grad1_j12_mu_square_num(r1, r2, grad) grad(3) = tmp_eps * (vp - vm) return -end subroutine grad1_j12_mu_square_num +end ! --- @@ -635,134 +637,172 @@ double precision function j12_mu_square(r1, r2) j12_mu_square = j12_mu(r1, r2) * j12_mu(r1, r2) return -end function j12_mu_square +end ! --- -subroutine f_mu_and_deriv_mu(rho,alpha,mu0,beta,f_mu,d_drho_f_mu) - implicit none - BEGIN_DOC -! function giving mu as a function of rho -! -! f_mu = alpha * rho**beta + mu0 * exp(-rho) -! -! and its derivative with respect to rho d_drho_f_mu - END_DOC - double precision, intent(in) :: rho,alpha,mu0,beta - double precision, intent(out) :: f_mu,d_drho_f_mu - f_mu = alpha * (rho)**beta + mu0 * dexp(-rho) - d_drho_f_mu = alpha * beta * rho**(beta-1.d0) - mu0 * dexp(-rho) +subroutine f_mu_and_deriv_mu(rho, alpha, mu0, beta, f_mu, d_drho_f_mu) + + BEGIN_DOC + ! function giving mu as a function of rho + ! + ! f_mu = alpha * rho**beta + mu0 * exp(-rho) + ! + ! and its derivative with respect to rho d_drho_f_mu + END_DOC + + implicit none + double precision, intent(in) :: rho, alpha, mu0, beta + double precision, intent(out) :: f_mu, d_drho_f_mu + + f_mu = alpha * (rho)**beta + mu0 * dexp(-rho) + d_drho_f_mu = alpha * beta * rho**(beta-1.d0) - mu0 * dexp(-rho) end +! --- + +subroutine get_all_rho_grad_rho(r1, r2, rho1, rho2, grad_rho1) + + BEGIN_DOC + ! returns the density in r1,r2 and grad_rho at r1 + END_DOC + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision, intent(out) :: grad_rho1(3), rho1, rho2 + double precision :: dm_a(1), dm_b(1), grad_dm_a(3,1), grad_dm_b(3,1) + + call density_and_grad_alpha_beta(r1, dm_a, dm_b, grad_dm_a, grad_dm_b) + rho1 = dm_a(1) + dm_b(1) + grad_rho1(1:3) = grad_dm_a(1:3,1) + grad_dm_b(1:3,1) + call density_and_grad_alpha_beta(r2, dm_a, dm_b, grad_dm_a, grad_dm_b) + rho2 = dm_a(1) + dm_b(1) -subroutine get_all_rho_grad_rho(r1,r2,rho1,rho2,grad_rho1) - implicit none - BEGIN_DOC -! returns the density in r1,r2 and grad_rho at r1 - END_DOC - double precision, intent(in) :: r1(3),r2(3) - double precision, intent(out):: grad_rho1(3),rho1,rho2 - double precision :: dm_a(1), dm_b(1), grad_dm_a(3,1), grad_dm_b(3,1) - call density_and_grad_alpha_beta(r1, dm_a, dm_b, grad_dm_a, grad_dm_b) - rho1 = dm_a(1) + dm_b(1) - grad_rho1(1:3) = grad_dm_a(1:3,1) + grad_dm_b(1:3,1) - call density_and_grad_alpha_beta(r2, dm_a, dm_b, grad_dm_a, grad_dm_b) - rho2 = dm_a(1) + dm_b(1) end -subroutine get_all_f_rho(rho1,rho2,alpha,mu0,beta,f_rho1,d_drho_f_rho1,f_rho2) - implicit none - BEGIN_DOC -! returns the values f(mu(r1)), f(mu(r2)) and d/drho(1) f(mu(r1)) - END_DOC - double precision, intent(in) :: rho1,rho2,alpha,mu0,beta - double precision, intent(out):: f_rho1,d_drho_f_rho1,f_rho2 - double precision :: tmp - call f_mu_and_deriv_mu(rho1,alpha,mu0,beta,f_rho1,d_drho_f_rho1) - call f_mu_and_deriv_mu(rho2,alpha,mu0,beta,f_rho2,tmp) +! --- + +subroutine get_all_f_rho(rho1, rho2, alpha, mu0, beta, f_rho1, d_drho_f_rho1, f_rho2) + + BEGIN_DOC + ! returns the values f(mu(r1)), f(mu(r2)) and d/drho(1) f(mu(r1)) + END_DOC + + implicit none + double precision, intent(in) :: rho1, rho2, alpha, mu0, beta + double precision, intent(out) :: f_rho1, d_drho_f_rho1, f_rho2 + double precision :: tmp + + call f_mu_and_deriv_mu(rho1, alpha, mu0, beta, f_rho1, d_drho_f_rho1) + call f_mu_and_deriv_mu(rho2, alpha, mu0, beta, f_rho2, tmp) + end +! --- subroutine get_all_f_rho_simple(rho1,rho2,alpha,mu0,beta,f_rho1,d_drho_f_rho1,f_rho2) - implicit none - BEGIN_DOC -! returns the values f(mu(r1)), f(mu(r2)) and d/drho(1) f(mu(r1)) - END_DOC - double precision, intent(in) :: rho1,rho2,alpha,mu0,beta - double precision, intent(out):: f_rho1,d_drho_f_rho1,f_rho2 - double precision :: tmp - if(rho1.lt.1.d-10)then - f_rho1 = 0.d0 - d_drho_f_rho1 = 0.d0 - else - call f_mu_and_deriv_mu_simple(rho1,alpha,mu0,beta,f_rho1,d_drho_f_rho1) - endif - if(rho2.lt.1.d-10)then - f_rho2 = 0.d0 - else - call f_mu_and_deriv_mu_simple(rho2,alpha,mu0,beta,f_rho2,tmp) - endif + + BEGIN_DOC + ! returns the values f(mu(r1)), f(mu(r2)) and d/drho(1) f(mu(r1)) + END_DOC + + implicit none + double precision, intent(in) :: rho1, rho2, alpha, mu0, beta + double precision, intent(out) :: f_rho1, d_drho_f_rho1, f_rho2 + double precision :: tmp + + if(rho1.lt.1.d-10) then + f_rho1 = 0.d0 + d_drho_f_rho1 = 0.d0 + else + call f_mu_and_deriv_mu_simple(rho1, alpha, mu0, beta, f_rho1, d_drho_f_rho1) + endif + + if(rho2.lt.1.d-10)then + f_rho2 = 0.d0 + else + call f_mu_and_deriv_mu_simple(rho2, alpha, mu0, beta, f_rho2, tmp) + endif + end -subroutine f_mu_and_deriv_mu_simple(rho,alpha,mu0,beta,f_mu,d_drho_f_mu) - implicit none - BEGIN_DOC -! function giving mu as a function of rho -! -! f_mu = alpha * rho**beta + mu0 -! -! and its derivative with respect to rho d_drho_f_mu - END_DOC - double precision, intent(in) :: rho,alpha,mu0,beta - double precision, intent(out) :: f_mu,d_drho_f_mu - f_mu = alpha**beta * (rho)**beta + mu0 - d_drho_f_mu = alpha**beta * beta * rho**(beta-1.d0) +! --- + +subroutine f_mu_and_deriv_mu_simple(rho, alpha, mu0, beta, f_mu, d_drho_f_mu) + + BEGIN_DOC + ! function giving mu as a function of rho + ! + ! f_mu = alpha * rho**beta + mu0 + ! + ! and its derivative with respect to rho d_drho_f_mu + END_DOC + + implicit none + double precision, intent(in) :: rho, alpha, mu0, beta + double precision, intent(out) :: f_mu, d_drho_f_mu + + f_mu = alpha**beta * (rho)**beta + mu0 + d_drho_f_mu = alpha**beta * beta * rho**(beta-1.d0) end ! --- subroutine f_mu_and_deriv_mu_erf(rho,alpha,zeta,mu0,beta,f_mu,d_drho_f_mu) - implicit none + include 'constants.include.F' - BEGIN_DOC -! function giving mu as a function of rho -! -! f_mu = (alpha * rho)**zeta * erf(beta * rho) + mu0 * (1 - erf(beta*rho)) -! -! and its derivative with respect to rho d_drho_f_mu -! -! d_drho_f_mu = 2 beta/sqrt(pi) * exp(-(beta*rho)**2) * ( (alpha*rho)**zeta - mu0) -! + alpha * zeta * (alpha *rho)**(zeta-1) * erf(beta*rho) - END_DOC - double precision, intent(in) :: rho,alpha,mu0,beta,zeta - double precision, intent(out) :: f_mu,d_drho_f_mu - f_mu = (alpha * rho)**zeta * derf(beta * rho) + mu0 * (1.d0 - derf(beta*rho)) - d_drho_f_mu = 2.d0 * beta * inv_sq_pi * dexp(-(beta*rho)**2) * ( (alpha*rho)**zeta - mu0) & - + alpha * zeta * (alpha *rho)**(zeta-1) * derf(beta*rho) + + BEGIN_DOC + ! function giving mu as a function of rho + ! + ! f_mu = (alpha * rho)**zeta * erf(beta * rho) + mu0 * (1 - erf(beta*rho)) + ! + ! and its derivative with respect to rho d_drho_f_mu + ! + ! d_drho_f_mu = 2 beta/sqrt(pi) * exp(-(beta*rho)**2) * ( (alpha*rho)**zeta - mu0) + ! + alpha * zeta * (alpha *rho)**(zeta-1) * erf(beta*rho) + END_DOC + + implicit none + double precision, intent(in) :: rho, alpha, mu0, beta, zeta + double precision, intent(out) :: f_mu, d_drho_f_mu + + f_mu = (alpha * rho)**zeta * derf(beta * rho) + mu0 * (1.d0 - derf(beta*rho)) + d_drho_f_mu = 2.d0 * beta * inv_sq_pi * dexp(-(beta*rho)**2) * ( (alpha*rho)**zeta - mu0) & + + alpha * zeta * (alpha *rho)**(zeta-1) * derf(beta*rho) end +! --- + +subroutine get_all_f_rho_erf(rho1, rho2, alpha, zeta, mu0, beta, f_rho1, d_drho_f_rho1, f_rho2) + + BEGIN_DOC + ! returns the values f(mu(r1)), f(mu(r2)) and d/drho(1) f(mu(r1)) + ! with f_mu = (alpha * rho)**zeta * erf(beta * rho) + mu0 * (1 - erf(beta*rho)) + END_DOC + + implicit none + double precision, intent(in) :: rho1, rho2, alpha, mu0, beta, zeta + double precision, intent(out) :: f_rho1, d_drho_f_rho1, f_rho2 + double precision :: tmp + + if(rho1 .lt. 1.d-10) then + f_rho1 = mu_erf + d_drho_f_rho1 = 0.d0 + else + call f_mu_and_deriv_mu_erf(rho1, alpha, zeta, mu0, beta, f_rho1, d_drho_f_rho1) + endif + + if(rho2 .lt. 1.d-10)then + f_rho2 = mu_erf + else + call f_mu_and_deriv_mu_erf(rho2, alpha, zeta, mu0, beta, f_rho2, tmp) + endif -subroutine get_all_f_rho_erf(rho1,rho2,alpha,zeta,mu0,beta,f_rho1,d_drho_f_rho1,f_rho2) - implicit none - BEGIN_DOC -! returns the values f(mu(r1)), f(mu(r2)) and d/drho(1) f(mu(r1)) -! with f_mu = (alpha * rho)**zeta * erf(beta * rho) + mu0 * (1 - erf(beta*rho)) - END_DOC - double precision, intent(in) :: rho1,rho2,alpha,mu0,beta,zeta - double precision, intent(out):: f_rho1,d_drho_f_rho1,f_rho2 - double precision :: tmp - if(rho1.lt.1.d-10)then - f_rho1 = mu_erf - d_drho_f_rho1 = 0.d0 - else - call f_mu_and_deriv_mu_erf(rho1,alpha,zeta,mu0,beta,f_rho1,d_drho_f_rho1) - endif - if(rho2.lt.1.d-10)then - f_rho2 = mu_erf - else - call f_mu_and_deriv_mu_erf(rho2,alpha,zeta,mu0,beta,f_rho2,tmp) - endif end + +! --- + diff --git a/plugins/local/non_h_ints_mu/jast_deriv_utils_vect.irp.f b/plugins/local/non_h_ints_mu/jast_deriv_utils_vect.irp.f index f9512827..5777a44a 100644 --- a/plugins/local/non_h_ints_mu/jast_deriv_utils_vect.irp.f +++ b/plugins/local/non_h_ints_mu/jast_deriv_utils_vect.irp.f @@ -1,100 +1,149 @@ ! --- -subroutine get_grad1_u12_withsq_r1_seq(r1, n_grid2, resx, resy, resz, res) +subroutine get_grad1_u12_withsq_r1_seq(ipoint, n_grid2, resx, resy, resz, res) BEGIN_DOC ! ! grad_1 u(r1,r2) ! - ! this will be integrated numerically over r2: - ! we use grid for r1 and extra_grid for r2 - ! - ! for 99 < j1b_type < 199 - ! - ! u(r1,r2) = j12_mu(r12) x v(r1) x v(r2) - ! grad1 u(r1, r2) = [(grad1 j12_mu) v(r1) + j12_mu grad1 v(r1)] v(r2) + ! we use grid for r1 and extra_grid for r2 ! END_DOC implicit none - integer, intent(in) :: n_grid2 - double precision, intent(in) :: r1(3) + integer, intent(in) :: ipoint, n_grid2 double precision, intent(out) :: resx(n_grid2), resy(n_grid2), resz(n_grid2), res(n_grid2) integer :: jpoint - double precision :: v1b_r1 - double precision :: grad1_v1b(3) - double precision, allocatable :: v1b_r2(:) - double precision, allocatable :: u2b_r12(:) - double precision, allocatable :: gradx1_u2b(:), grady1_u2b(:), gradz1_u2b(:) - double precision, external :: j1b_nucl + double precision :: env_r1, tmp + double precision :: grad1_env(3), r1(3) + double precision, allocatable :: env_r2(:) + double precision, allocatable :: u2b_r12(:), gradx1_u2b(:), grady1_u2b(:), gradz1_u2b(:) + double precision, allocatable :: u2b_mu(:), gradx1_mu(:), grady1_mu(:), gradz1_mu(:) + double precision, allocatable :: u2b_nu(:), gradx1_nu(:), grady1_nu(:), gradz1_nu(:) + double precision, external :: env_nucl - PROVIDE j1b_type + PROVIDE j1e_type j2e_type env_type + PROVIDE mu_erf nu_erf a_boys + PROVIDE final_grid_points PROVIDE final_grid_points_extra - if( (j1b_type .eq. 100) .or. & - (j1b_type .ge. 200) .and. (j1b_type .lt. 300) ) then + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) - call grad1_j12_mu_r1_seq(r1, n_grid2, resx, resy, resz) - do jpoint = 1, n_points_extra_final_grid - res(jpoint) = resx(jpoint) * resx(jpoint) & - + resy(jpoint) * resy(jpoint) & - + resz(jpoint) * resz(jpoint) - enddo + if( (j2e_type .eq. "Mu") .or. & + (j2e_type .eq. "Mur") .or. & + (j2e_type .eq. "Boys") ) then - elseif((j1b_type .gt. 100) .and. (j1b_type .lt. 200)) then + if(env_type .eq. "None") then - allocate(v1b_r2(n_grid2)) - allocate(u2b_r12(n_grid2)) - allocate(gradx1_u2b(n_grid2)) - allocate(grady1_u2b(n_grid2)) - allocate(gradz1_u2b(n_grid2)) + call grad1_j12_r1_seq(r1, n_grid2, resx, resy, resz) - v1b_r1 = j1b_nucl(r1) - call grad1_j1b_nucl(r1, grad1_v1b) + else - call j1b_nucl_r1_seq(n_grid2, v1b_r2) - call j12_mu_r1_seq(r1, n_grid2, u2b_r12) - call grad1_j12_mu_r1_seq(r1, n_grid2, gradx1_u2b, grady1_u2b, gradz1_u2b) + ! u(r1,r2) = j12_mu(r12) x v(r1) x v(r2) + ! grad1 u(r1, r2) = [(grad1 j12_mu) v(r1) + j12_mu grad1 v(r1)] v(r2) - do jpoint = 1, n_points_extra_final_grid - resx(jpoint) = (gradx1_u2b(jpoint) * v1b_r1 + u2b_r12(jpoint) * grad1_v1b(1)) * v1b_r2(jpoint) - resy(jpoint) = (grady1_u2b(jpoint) * v1b_r1 + u2b_r12(jpoint) * grad1_v1b(2)) * v1b_r2(jpoint) - resz(jpoint) = (gradz1_u2b(jpoint) * v1b_r1 + u2b_r12(jpoint) * grad1_v1b(3)) * v1b_r2(jpoint) - res (jpoint) = resx(jpoint) * resx(jpoint) & - + resy(jpoint) * resy(jpoint) & - + resz(jpoint) * resz(jpoint) - enddo + allocate(env_r2(n_grid2)) + allocate(u2b_r12(n_grid2)) + allocate(gradx1_u2b(n_grid2)) + allocate(grady1_u2b(n_grid2)) + allocate(gradz1_u2b(n_grid2)) - deallocate(v1b_r2, u2b_r12, gradx1_u2b, grady1_u2b, gradz1_u2b) + env_r1 = env_nucl(r1) + call grad1_env_nucl(r1, grad1_env) + + call env_nucl_r1_seq(n_grid2, env_r2) + call j12_r1_seq(r1, n_grid2, u2b_r12) + call grad1_j12_r1_seq(r1, n_grid2, gradx1_u2b, grady1_u2b, gradz1_u2b) + + do jpoint = 1, n_points_extra_final_grid + resx(jpoint) = (gradx1_u2b(jpoint) * env_r1 + u2b_r12(jpoint) * grad1_env(1)) * env_r2(jpoint) + resy(jpoint) = (grady1_u2b(jpoint) * env_r1 + u2b_r12(jpoint) * grad1_env(2)) * env_r2(jpoint) + resz(jpoint) = (gradz1_u2b(jpoint) * env_r1 + u2b_r12(jpoint) * grad1_env(3)) * env_r2(jpoint) + enddo + + deallocate(env_r2, u2b_r12, gradx1_u2b, grady1_u2b, gradz1_u2b) + + endif ! env_type + + elseif(j2e_type .eq. "Mu_Nu") then + + if(env_type .eq. "None") then + + call grad1_jmu_r1_seq(mu_erf, r1, n_grid2, resx, resy, resz) + + else + + ! u(r1,r2) = jmu(r12) x v(r1) x v(r2) + jnu(r12) x [1 - v(r1) x v(r2)] + + allocate(env_r2(n_grid2)) + allocate(u2b_mu(n_grid2)) + allocate(u2b_nu(n_grid2)) + allocate(gradx1_mu(n_grid2), grady1_mu(n_grid2), gradz1_mu(n_grid2)) + allocate(gradx1_nu(n_grid2), grady1_nu(n_grid2), gradz1_nu(n_grid2)) + + env_r1 = env_nucl(r1) + call grad1_env_nucl(r1, grad1_env) + call env_nucl_r1_seq(n_grid2, env_r2) + + call jmu_r1_seq(mu_erf, r1, n_grid2, u2b_mu) + call jmu_r1_seq(nu_erf, r1, n_grid2, u2b_nu) + + call grad1_jmu_r1_seq(mu_erf, r1, n_grid2, gradx1_mu, grady1_mu, gradz1_mu) + call grad1_jmu_r1_seq(nu_erf, r1, n_grid2, gradx1_nu, grady1_nu, gradz1_nu) + + do jpoint = 1, n_points_extra_final_grid + resx(jpoint) = gradx1_nu(jpoint) + ((gradx1_mu(jpoint) - gradx1_nu(jpoint)) * env_r1 + (u2b_mu(jpoint) - u2b_nu(jpoint)) * grad1_env(1)) * env_r2(jpoint) + resy(jpoint) = grady1_nu(jpoint) + ((grady1_mu(jpoint) - grady1_nu(jpoint)) * env_r1 + (u2b_mu(jpoint) - u2b_nu(jpoint)) * grad1_env(2)) * env_r2(jpoint) + resz(jpoint) = gradz1_nu(jpoint) + ((gradz1_mu(jpoint) - gradz1_nu(jpoint)) * env_r1 + (u2b_mu(jpoint) - u2b_nu(jpoint)) * grad1_env(3)) * env_r2(jpoint) + enddo + + deallocate(env_r2) + deallocate(u2b_mu) + deallocate(u2b_nu) + deallocate(gradx1_mu, grady1_mu, gradz1_mu) + deallocate(gradx1_nu, grady1_nu, gradz1_nu) + + endif ! env_type else - print *, ' j1b_type = ', j1b_type, 'not implemented yet' + print *, ' Error in get_grad1_u12_withsq_r1_seq: Unknown Jastrow' stop + endif ! j2e_type + + + if(j1e_type .ne. "None") then + PROVIDE j1e_gradx j1e_grady j1e_gradz + PROVIDE elec_num + tmp = 1.d0 / (dble(elec_num) - 1.d0) + do jpoint = 1, n_points_extra_final_grid + resx(jpoint) = resx(jpoint) + tmp * j1e_gradx(ipoint) + resy(jpoint) = resy(jpoint) + tmp * j1e_grady(ipoint) + resz(jpoint) = resz(jpoint) + tmp * j1e_gradz(ipoint) + enddo endif + do jpoint = 1, n_points_extra_final_grid + res(jpoint) = resx(jpoint) * resx(jpoint) + resy(jpoint) * resy(jpoint) + resz(jpoint) * resz(jpoint) + enddo + return -end subroutine get_grad1_u12_withsq_r1_seq +end ! --- -subroutine grad1_j12_mu_r1_seq(r1, n_grid2, gradx, grady, gradz) +subroutine grad1_j12_r1_seq(r1, n_grid2, gradx, grady, gradz) BEGIN_DOC ! - ! gradient of j(mu(r1,r2),r12) form of jastrow. - ! - ! if mu(r1,r2) = cst ---> j1b_type < 200 and - ! - ! d/dx1 j(mu,r12) = 0.5 * (1 - erf(mu *r12))/r12 * (x1 - x2) - ! - ! if mu(r1,r2) /= cst ---> 200 < j1b_type < 300 and - ! - ! d/dx1 j(mu(r1,r2),r12) = exp(-(mu(r1,r2)*r12)**2) /(2 *sqrt(pi) * mu(r1,r2)**2 ) d/dx1 mu(r1,r2) - ! + 0.5 * (1 - erf(mu(r1,r2) *r12))/r12 * (x1 - x2) + ! d/dx1 j_2e(1,2) + ! d/dy1 j_2e(1,2) + ! d/dz1 j_2e(1,2) ! END_DOC @@ -110,8 +159,15 @@ subroutine grad1_j12_mu_r1_seq(r1, n_grid2, gradx, grady, gradz) integer :: jpoint double precision :: r2(3) double precision :: dx, dy, dz, r12, tmp + double precision :: mu_val, mu_tmp, mu_der(3) - if((j1b_type .ge. 0) .and. (j1b_type .lt. 200)) then + PROVIDE j2e_type + + if(j2e_type .eq. "Mu") then + + ! d/dx1 j(mu,r12) = 0.5 * [(1 - erf(mu * r12)) / r12] * (x1 - x2) + ! d/dy1 j(mu,r12) = 0.5 * [(1 - erf(mu * r12)) / r12] * (y1 - y2) + ! d/dz1 j(mu,r12) = 0.5 * [(1 - erf(mu * r12)) / r12] * (z1 - z2) do jpoint = 1, n_points_extra_final_grid ! r2 @@ -138,9 +194,10 @@ subroutine grad1_j12_mu_r1_seq(r1, n_grid2, gradx, grady, gradz) gradz(jpoint) = tmp * dz enddo - elseif((j1b_type .ge. 200) .and. (j1b_type .lt. 300)) then + elseif(j2e_type .eq. "Mur") then - double precision :: mu_val, mu_tmp, mu_der(3) + ! d/dx1 j(mu(r1,r2),r12) = exp(-(mu(r1,r2)*r12)**2) /(2 *sqrt(pi) * mu(r1,r2)**2 ) d/dx1 mu(r1,r2) + ! + 0.5 * (1 - erf(mu(r1,r2) *r12))/r12 * (x1 - x2) do jpoint = 1, n_points_extra_final_grid ! r2 @@ -174,19 +231,107 @@ subroutine grad1_j12_mu_r1_seq(r1, n_grid2, gradx, grady, gradz) gradz(jpoint) = gradz(jpoint) + tmp * dz enddo + elseif(j2e_type .eq. "Boys") then + + ! + ! j(r12) = 0.5 r12 / (1 + a_boys r_12) + ! + ! d/dx1 j(r12) = 0.5 (x1 - x2) / [r12 * (1 + b r12^2)^2] + ! d/dy1 j(r12) = 0.5 (y1 - y2) / [r12 * (1 + b r12^2)^2] + ! d/dz1 j(r12) = 0.5 (z1 - z2) / [r12 * (1 + b r12^2)^2] + + PROVIDE a_boys + + do jpoint = 1, n_points_extra_final_grid ! r2 + + r2(1) = final_grid_points_extra(1,jpoint) + r2(2) = final_grid_points_extra(2,jpoint) + r2(3) = final_grid_points_extra(3,jpoint) + + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + r12 = dsqrt(dx * dx + dy * dy + dz * dz) + if(r12 .lt. 1d-10) then + gradx(jpoint) = 0.d0 + grady(jpoint) = 0.d0 + gradz(jpoint) = 0.d0 + cycle + endif + + tmp = 1.d0 + a_boys * r12 + tmp = 0.5d0 / (r12 * tmp * tmp) + + gradx(jpoint) = tmp * dx + grady(jpoint) = tmp * dy + gradz(jpoint) = tmp * dz + enddo + else - print *, ' j1b_type = ', j1b_type, 'not implemented yet' + print *, ' Error in grad1_j12_r1_seq: Unknown j2e_type = ', j2e_type stop - endif + endif ! j2e_type return -end subroutine grad1_j12_mu_r1_seq +end ! --- -subroutine j12_mu_r1_seq(r1, n_grid2, res) +subroutine grad1_jmu_r1_seq(mu, r1, n_grid2, gradx, grady, gradz) + + BEGIN_DOC + ! + ! d/dx1 jmu(r12) = 0.5 * [(1 - erf(mu * r12)) / r12] * (x1 - x2) + ! d/dy1 jmu(r12) = 0.5 * [(1 - erf(mu * r12)) / r12] * (y1 - y2) + ! d/dz1 jmu(r12) = 0.5 * [(1 - erf(mu * r12)) / r12] * (z1 - z2) + ! + END_DOC + + implicit none + integer , intent(in) :: n_grid2 + double precision, intent(in) :: mu, r1(3) + double precision, intent(out) :: gradx(n_grid2) + double precision, intent(out) :: grady(n_grid2) + double precision, intent(out) :: gradz(n_grid2) + + integer :: jpoint + double precision :: r2(3) + double precision :: dx, dy, dz, r12, tmp + + + do jpoint = 1, n_points_extra_final_grid ! r2 + + r2(1) = final_grid_points_extra(1,jpoint) + r2(2) = final_grid_points_extra(2,jpoint) + r2(3) = final_grid_points_extra(3,jpoint) + + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + + r12 = dsqrt(dx * dx + dy * dy + dz * dz) + if(r12 .lt. 1d-10) then + gradx(jpoint) = 0.d0 + grady(jpoint) = 0.d0 + gradz(jpoint) = 0.d0 + cycle + endif + + tmp = 0.5d0 * (1.d0 - derf(mu * r12)) / r12 + + gradx(jpoint) = tmp * dx + grady(jpoint) = tmp * dy + gradz(jpoint) = tmp * dz + enddo + + return +end + +! --- + +subroutine j12_r1_seq(r1, n_grid2, res) include 'constants.include.F' @@ -197,11 +342,36 @@ subroutine j12_mu_r1_seq(r1, n_grid2, res) integer :: jpoint double precision :: r2(3) + double precision :: dx, dy, dz double precision :: mu_tmp, r12 PROVIDE final_grid_points_extra - if((j1b_type .ge. 0) .and. (j1b_type .lt. 200)) then + if(j2e_type .eq. "Mu") then + + PROVIDE mu_erf + + do jpoint = 1, n_points_extra_final_grid ! r2 + + r2(1) = final_grid_points_extra(1,jpoint) + r2(2) = final_grid_points_extra(2,jpoint) + r2(3) = final_grid_points_extra(3,jpoint) + + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + r12 = dsqrt(dx * dx + dy * dy + dz * dz) + + mu_tmp = mu_erf * r12 + + res(jpoint) = 0.5d0 * r12 * (1.d0 - derf(mu_tmp)) - inv_sq_pi_2 * dexp(-mu_tmp*mu_tmp) / mu_erf + enddo + + elseif(j2e_type .eq. "Boys") then + + ! j(r12) = 0.5 r12 / (1 + a_boys r_12) + + PROVIDE a_boys do jpoint = 1, n_points_extra_final_grid ! r2 @@ -209,27 +379,65 @@ subroutine j12_mu_r1_seq(r1, n_grid2, res) r2(2) = final_grid_points_extra(2,jpoint) r2(3) = final_grid_points_extra(3,jpoint) - r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & - + (r1(2) - r2(2)) * (r1(2) - r2(2)) & - + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) - mu_tmp = mu_erf * r12 + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + r12 = dsqrt(dx * dx + dy * dy + dz * dz) - res(jpoint) = 0.5d0 * r12 * (1.d0 - derf(mu_tmp)) - inv_sq_pi_2 * dexp(-mu_tmp*mu_tmp) / mu_erf + res(jpoint) = 0.5d0 * r12 / (1.d0 + a_boys * r12) enddo else - print *, ' j1b_type = ', j1b_type, 'not implemented for j12_mu_r1_seq' + print *, ' Error in j12_r1_seq: Unknown j2e_type = ', j2e_type stop - endif + endif ! j2e_type return -end subroutine j12_mu_r1_seq +end ! --- -subroutine j1b_nucl_r1_seq(n_grid2, res) +subroutine jmu_r1_seq(mu, r1, n_grid2, res) + + include 'constants.include.F' + + implicit none + integer, intent(in) :: n_grid2 + double precision, intent(in) :: mu, r1(3) + double precision, intent(out) :: res(n_grid2) + + integer :: jpoint + double precision :: r2(3) + double precision :: dx, dy, dz + double precision :: r12, tmp1, tmp2 + + tmp1 = inv_sq_pi_2 / mu + + do jpoint = 1, n_points_extra_final_grid ! r2 + + r2(1) = final_grid_points_extra(1,jpoint) + r2(2) = final_grid_points_extra(2,jpoint) + r2(3) = final_grid_points_extra(3,jpoint) + + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + r12 = dsqrt(dx * dx + dy * dy + dz * dz) + + tmp2 = mu * r12 + + res(jpoint) = 0.5d0 * r12 * (1.d0 - derf(tmp2)) - tmp1 * dexp(-tmp2*tmp2) + enddo + + return +end + +! --- + + +subroutine env_nucl_r1_seq(n_grid2, res) ! TODO ! change loops order @@ -242,7 +450,7 @@ subroutine j1b_nucl_r1_seq(n_grid2, res) integer :: i, jpoint double precision :: a, d, e, x, y, z - if((j1b_type .eq. 2) .or. (j1b_type .eq. 102)) then + if(env_type .eq. "Sum_Slat") then res = 1.d0 @@ -252,16 +460,16 @@ subroutine j1b_nucl_r1_seq(n_grid2, res) r(3) = final_grid_points_extra(3,jpoint) do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) d = ( (r(1) - nucl_coord(i,1)) * (r(1) - nucl_coord(i,1)) & + (r(2) - nucl_coord(i,2)) * (r(2) - nucl_coord(i,2)) & + (r(3) - nucl_coord(i,3)) * (r(3) - nucl_coord(i,3)) ) - res(jpoint) -= dexp(-a*dsqrt(d)) + res(jpoint) -= env_coef(i) * dexp(-a*dsqrt(d)) enddo enddo - elseif((j1b_type .eq. 3) .or. (j1b_type .eq. 103)) then + elseif(env_type .eq. "Prod_Gauss") then res = 1.d0 @@ -271,7 +479,7 @@ subroutine j1b_nucl_r1_seq(n_grid2, res) r(3) = final_grid_points_extra(3,jpoint) do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) d = ( (r(1) - nucl_coord(i,1)) * (r(1) - nucl_coord(i,1)) & + (r(2) - nucl_coord(i,2)) * (r(2) - nucl_coord(i,2)) & + (r(3) - nucl_coord(i,3)) * (r(3) - nucl_coord(i,3)) ) @@ -281,7 +489,7 @@ subroutine j1b_nucl_r1_seq(n_grid2, res) enddo enddo - elseif((j1b_type .eq. 4) .or. (j1b_type .eq. 104)) then + elseif(env_type .eq. "Sum_Gauss") then res = 1.d0 @@ -291,15 +499,15 @@ subroutine j1b_nucl_r1_seq(n_grid2, res) r(3) = final_grid_points_extra(3,jpoint) do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) d = ( (r(1) - nucl_coord(i,1)) * (r(1) - nucl_coord(i,1)) & + (r(2) - nucl_coord(i,2)) * (r(2) - nucl_coord(i,2)) & + (r(3) - nucl_coord(i,3)) * (r(3) - nucl_coord(i,3)) ) - res(jpoint) -= j1b_pen_coef(i) * dexp(-a*d) + res(jpoint) -= env_coef(i) * dexp(-a*d) enddo enddo - elseif((j1b_type .eq. 5) .or. (j1b_type .eq. 105)) then + elseif(env_type .eq. "Sum_Quartic") then res = 1.d0 @@ -309,24 +517,243 @@ subroutine j1b_nucl_r1_seq(n_grid2, res) r(3) = final_grid_points_extra(3,jpoint) do i = 1, nucl_num - a = j1b_pen(i) + a = env_expo(i) x = r(1) - nucl_coord(i,1) y = r(2) - nucl_coord(i,2) z = r(3) - nucl_coord(i,3) d = x*x + y*y + z*z - res(jpoint) -= dexp(-a*d*d) + res(jpoint) -= env_coef(i) * dexp(-a*d*d) enddo enddo else - print *, ' j1b_type = ', j1b_type, 'not implemented for j1b_nucl_r1_seq' + print *, ' Error in env_nucl_r1_seq: Unknown env_type = ', env_type stop endif return -end subroutine j1b_nucl_r1_seq +end + +! --- + +subroutine get_grad1_u12_2e_r1_seq(ipoint, n_grid2, resx, resy, resz) + + BEGIN_DOC + ! + ! grad_1 u_2e(r1,r2) + ! + ! we use grid for r1 and extra_grid for r2 + ! + END_DOC + + implicit none + integer, intent(in) :: ipoint, n_grid2 + double precision, intent(out) :: resx(n_grid2), resy(n_grid2), resz(n_grid2) + + integer :: jpoint + double precision :: env_r1, tmp + double precision :: grad1_env(3), r1(3) + double precision, allocatable :: env_r2(:) + double precision, allocatable :: u2b_r12(:) + double precision, allocatable :: gradx1_u2b(:), grady1_u2b(:), gradz1_u2b(:) + double precision, allocatable :: u2b_mu(:), gradx1_mu(:), grady1_mu(:), gradz1_mu(:) + double precision, allocatable :: u2b_nu(:), gradx1_nu(:), grady1_nu(:), gradz1_nu(:) + double precision, external :: env_nucl + + PROVIDE j1e_type j2e_type env_type + PROVIDE final_grid_points + PROVIDE final_grid_points_extra + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + if( (j2e_type .eq. "Mu") .or. & + (j2e_type .eq. "Mur") .or. & + (j2e_type .eq. "Boys") ) then + + if(env_type .eq. "None") then + + call grad1_j12_r1_seq(r1, n_grid2, resx, resy, resz) + + else + + ! u(r1,r2) = j12_mu(r12) x v(r1) x v(r2) + ! grad1 u(r1, r2) = [(grad1 j12_mu) v(r1) + j12_mu grad1 v(r1)] v(r2) + + allocate(env_r2(n_grid2)) + allocate(u2b_r12(n_grid2)) + allocate(gradx1_u2b(n_grid2)) + allocate(grady1_u2b(n_grid2)) + allocate(gradz1_u2b(n_grid2)) + + env_r1 = env_nucl(r1) + call grad1_env_nucl(r1, grad1_env) + + call env_nucl_r1_seq(n_grid2, env_r2) + call j12_r1_seq(r1, n_grid2, u2b_r12) + call grad1_j12_r1_seq(r1, n_grid2, gradx1_u2b, grady1_u2b, gradz1_u2b) + + do jpoint = 1, n_points_extra_final_grid + resx(jpoint) = (gradx1_u2b(jpoint) * env_r1 + u2b_r12(jpoint) * grad1_env(1)) * env_r2(jpoint) + resy(jpoint) = (grady1_u2b(jpoint) * env_r1 + u2b_r12(jpoint) * grad1_env(2)) * env_r2(jpoint) + resz(jpoint) = (gradz1_u2b(jpoint) * env_r1 + u2b_r12(jpoint) * grad1_env(3)) * env_r2(jpoint) + enddo + + deallocate(env_r2, u2b_r12, gradx1_u2b, grady1_u2b, gradz1_u2b) + + endif ! env_type + + elseif(j2e_type .eq. "Mu_Nu") then + + if(env_type .eq. "None") then + + call grad1_jmu_r1_seq(mu_erf, r1, n_grid2, resx, resy, resz) + + else + + ! u(r1,r2) = jmu(r12) x v(r1) x v(r2) + jnu(r12) x [1 - v(r1) x v(r2)] + + allocate(env_r2(n_grid2)) + allocate(u2b_mu(n_grid2)) + allocate(u2b_nu(n_grid2)) + allocate(gradx1_mu(n_grid2), grady1_mu(n_grid2), gradz1_mu(n_grid2)) + allocate(gradx1_nu(n_grid2), grady1_nu(n_grid2), gradz1_nu(n_grid2)) + + env_r1 = env_nucl(r1) + call grad1_env_nucl(r1, grad1_env) + call env_nucl_r1_seq(n_grid2, env_r2) + + call jmu_r1_seq(mu_erf, r1, n_grid2, u2b_mu) + call jmu_r1_seq(nu_erf, r1, n_grid2, u2b_nu) + + call grad1_jmu_r1_seq(mu_erf, r1, n_grid2, gradx1_mu, grady1_mu, gradz1_mu) + call grad1_jmu_r1_seq(nu_erf, r1, n_grid2, gradx1_nu, grady1_nu, gradz1_nu) + + do jpoint = 1, n_points_extra_final_grid + resx(jpoint) = gradx1_nu(jpoint) + ((gradx1_mu(jpoint) - gradx1_nu(jpoint)) * env_r1 + (u2b_mu(jpoint) - u2b_nu(jpoint)) * grad1_env(1)) * env_r2(jpoint) + resy(jpoint) = grady1_nu(jpoint) + ((grady1_mu(jpoint) - grady1_nu(jpoint)) * env_r1 + (u2b_mu(jpoint) - u2b_nu(jpoint)) * grad1_env(2)) * env_r2(jpoint) + resz(jpoint) = gradz1_nu(jpoint) + ((gradz1_mu(jpoint) - gradz1_nu(jpoint)) * env_r1 + (u2b_mu(jpoint) - u2b_nu(jpoint)) * grad1_env(3)) * env_r2(jpoint) + enddo + + deallocate(env_r2) + deallocate(u2b_mu) + deallocate(u2b_nu) + deallocate(gradx1_mu, grady1_mu, gradz1_mu) + deallocate(gradx1_nu, grady1_nu, gradz1_nu) + + endif ! env_type + + else + + print *, ' Error in get_grad1_u12_withsq_r1_seq: Unknown Jastrow' + stop + + endif ! j2e_type + + return +end + +! --- + +subroutine get_u12_2e_r1_seq(ipoint, n_grid2, res) + + BEGIN_DOC + ! + ! u_2e(r1,r2) + ! + ! we use grid for r1 and extra_grid for r2 + ! + END_DOC + + implicit none + integer, intent(in) :: ipoint, n_grid2 + double precision, intent(out) :: res(n_grid2) + + integer :: jpoint + double precision :: env_r1, tmp + double precision :: grad1_env(3), r1(3) + double precision, allocatable :: env_r2(:) + double precision, allocatable :: u2b_r12(:) + double precision, allocatable :: u2b_mu(:), u2b_nu(:) + double precision, external :: env_nucl + + PROVIDE j1e_type j2e_type env_type + PROVIDE final_grid_points + PROVIDE final_grid_points_extra + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + if( (j2e_type .eq. "Mu") .or. & + (j2e_type .eq. "Mur") .or. & + (j2e_type .eq. "Boys") ) then + + if(env_type .eq. "None") then + + call j12_r1_seq(r1, n_grid2, res) + + else + + ! u(r1,r2) = j12_mu(r12) x v(r1) x v(r2) + + allocate(env_r2(n_grid2)) + allocate(u2b_r12(n_grid2)) + + env_r1 = env_nucl(r1) + call j12_r1_seq(r1, n_grid2, u2b_r12) + call env_nucl_r1_seq(n_grid2, env_r2) + + do jpoint = 1, n_points_extra_final_grid + res(jpoint) = env_r1 * u2b_r12(jpoint) * env_r2(jpoint) + enddo + + deallocate(env_r2, u2b_r12) + + endif ! env_type + + elseif(j2e_type .eq. "Mu_Nu") then + + if(env_type .eq. "None") then + + call jmu_r1_seq(mu_erf, r1, n_grid2, res) + + else + + ! u(r1,r2) = jmu(r12) x v(r1) x v(r2) + jnu(r12) x [1 - v(r1) x v(r2)] + + allocate(env_r2(n_grid2)) + allocate(u2b_mu(n_grid2)) + allocate(u2b_nu(n_grid2)) + + env_r1 = env_nucl(r1) + call env_nucl_r1_seq(n_grid2, env_r2) + + call jmu_r1_seq(mu_erf, r1, n_grid2, u2b_mu) + call jmu_r1_seq(nu_erf, r1, n_grid2, u2b_nu) + + do jpoint = 1, n_points_extra_final_grid + res(jpoint) = u2b_nu(jpoint) + (u2b_mu(jpoint) - u2b_nu(jpoint)) * env_r1 * env_r2(jpoint) + enddo + + deallocate(env_r2) + deallocate(u2b_mu) + deallocate(u2b_nu) + + endif ! env_type + + else + + print *, ' Error in get_u12_withsq_r1_seq: Unknown Jastrow' + stop + + endif ! j2e_type + + return +end ! --- diff --git a/plugins/local/non_h_ints_mu/new_grad_tc.irp.f b/plugins/local/non_h_ints_mu/new_grad_tc.irp.f deleted file mode 100644 index ab3cc3be..00000000 --- a/plugins/local/non_h_ints_mu/new_grad_tc.irp.f +++ /dev/null @@ -1,171 +0,0 @@ - -! --- - -BEGIN_PROVIDER [double precision, tc_grad_and_lapl_ao_loop, (ao_num, ao_num, ao_num, ao_num)] - - BEGIN_DOC - ! - ! tc_grad_and_lapl_ao_loop(k,i,l,j) = < k l | -1/2 \Delta_1 u(r1,r2) - \grad_1 u(r1,r2) . \grad_1 | ij > - ! - ! = 1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) - ! - ! This is obtained by integration by parts. - ! - END_DOC - - implicit none - integer :: ipoint, i, j, k, l - double precision :: weight1, contrib_x, contrib_y, contrib_z, tmp_x, tmp_y, tmp_z - double precision :: ao_k_r, ao_i_r, ao_i_dx, ao_i_dy, ao_i_dz - double precision :: ao_j_r, ao_l_r, ao_l_dx, ao_l_dy, ao_l_dz - double precision :: time0, time1 - double precision, allocatable :: ac_mat(:,:,:,:) - - print*, ' providing tc_grad_and_lapl_ao_loop ...' - call wall_time(time0) - - allocate(ac_mat(ao_num,ao_num,ao_num,ao_num)) - ac_mat = 0.d0 - - ! --- - - do ipoint = 1, n_points_final_grid - weight1 = 0.5d0 * final_weight_at_r_vector(ipoint) - - do i = 1, ao_num - ao_i_r = weight1 * aos_in_r_array (i,ipoint) - ao_i_dx = weight1 * aos_grad_in_r_array(i,ipoint,1) - ao_i_dy = weight1 * aos_grad_in_r_array(i,ipoint,2) - ao_i_dz = weight1 * aos_grad_in_r_array(i,ipoint,3) - - do k = 1, ao_num - ao_k_r = aos_in_r_array(k,ipoint) - - tmp_x = ao_k_r * ao_i_dx - ao_i_r * aos_grad_in_r_array(k,ipoint,1) - tmp_y = ao_k_r * ao_i_dy - ao_i_r * aos_grad_in_r_array(k,ipoint,2) - tmp_z = ao_k_r * ao_i_dz - ao_i_r * aos_grad_in_r_array(k,ipoint,3) - - do j = 1, ao_num - do l = 1, ao_num - - contrib_x = int2_grad1_u12_ao(l,j,ipoint,1) * tmp_x - contrib_y = int2_grad1_u12_ao(l,j,ipoint,2) * tmp_y - contrib_z = int2_grad1_u12_ao(l,j,ipoint,3) * tmp_z - - ac_mat(k,i,l,j) += contrib_x + contrib_y + contrib_z - enddo - enddo - enddo - enddo - enddo - - ! --- - - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - tc_grad_and_lapl_ao_loop(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) - enddo - enddo - enddo - enddo - - deallocate(ac_mat) - - call wall_time(time1) - print*, ' Wall time for tc_grad_and_lapl_ao_loop = ', time1 - time0 - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [double precision, tc_grad_and_lapl_ao, (ao_num, ao_num, ao_num, ao_num)] - - BEGIN_DOC - ! - ! tc_grad_and_lapl_ao(k,i,l,j) = < k l | -1/2 \Delta_1 u(r1,r2) - \grad_1 u(r1,r2) . \grad_1 | ij > - ! - ! = -1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) - ! = 1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 (-1) \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) - ! - ! -1 in \int dr2 - ! - ! This is obtained by integration by parts. - ! - END_DOC - - implicit none - integer :: ipoint, i, j, k, l, m - double precision :: weight1, ao_k_r, ao_i_r - double precision :: time0, time1 - double precision, allocatable :: b_mat(:,:,:,:) - - print*, ' providing tc_grad_and_lapl_ao ...' - call wall_time(time0) - - if(read_tc_integ) then - - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/tc_grad_and_lapl_ao', action="read") - read(11) tc_grad_and_lapl_ao - close(11) - - else - - PROVIDE int2_grad1_u12_ao - - allocate(b_mat(n_points_final_grid,ao_num,ao_num,3)) - - b_mat = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & - !$OMP SHARED (aos_in_r_array_transp, aos_grad_in_r_array_transp_bis, b_mat, & - !$OMP ao_num, n_points_final_grid, final_weight_at_r_vector) - !$OMP DO SCHEDULE (static) - do i = 1, ao_num - do k = 1, ao_num - do ipoint = 1, n_points_final_grid - - weight1 = 0.5d0 * final_weight_at_r_vector(ipoint) - ao_i_r = aos_in_r_array_transp(ipoint,i) - ao_k_r = aos_in_r_array_transp(ipoint,k) - - b_mat(ipoint,k,i,1) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,1) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,1)) - b_mat(ipoint,k,i,2) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,2) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,2)) - b_mat(ipoint,k,i,3) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,3) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,3)) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - tc_grad_and_lapl_ao = 0.d0 - do m = 1, 3 - call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & - , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, b_mat(1,1,1,m), n_points_final_grid & - , 1.d0, tc_grad_and_lapl_ao, ao_num*ao_num) - enddo - deallocate(b_mat) - - call sum_A_At(tc_grad_and_lapl_ao(1,1,1,1), ao_num*ao_num) - - endif - - if(write_tc_integ.and.mpi_master) then - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/tc_grad_and_lapl_ao', action="write") - call ezfio_set_work_empty(.False.) - write(11) tc_grad_and_lapl_ao - close(11) - call ezfio_set_tc_keywords_io_tc_integ('Read') - endif - - call wall_time(time1) - print*, ' Wall time for tc_grad_and_lapl_ao = ', time1 - time0 - call print_memory_usage() - -END_PROVIDER - -! --- - - diff --git a/plugins/local/non_h_ints_mu/new_grad_tc_manu.irp.f b/plugins/local/non_h_ints_mu/new_grad_tc_manu.irp.f index 7ab5b327..5df80a0e 100644 --- a/plugins/local/non_h_ints_mu/new_grad_tc_manu.irp.f +++ b/plugins/local/non_h_ints_mu/new_grad_tc_manu.irp.f @@ -3,6 +3,15 @@ BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao_test, (ao_num, ao_num, n_po BEGIN_DOC ! + ! !!!!!! WARNING !!!!!!!!! + ! + ! DEFINED WITH - SIGN + ! + ! FOR 3e-iontegrals this doesn't matter + ! + ! !!!!!! WARNING !!!!!!!!! + ! + ! ! int2_grad1_u12_ao_test(i,j,ipoint,:) = \int dr2 [-1 * \grad_r1 J(r1,r2)] \phi_i(r2) \phi_j(r2) ! ! where r1 = r(ipoint) @@ -16,9 +25,9 @@ BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao_test, (ao_num, ao_num, n_po ! ! int2_grad1_u12_ao_test(i,j,ipoint,:) = v1 x [ 0.5 x \int dr2 [(r1 - r2) (erf(mu * r12)-1)r_12] v2 \phi_i(r2) \phi_j(r2) ] ! - \grad_1 v1 x [ \int dr2 u12 v2 \phi_i(r2) \phi_j(r2) ] - ! = 0.5 v_1b(ipoint) * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) * r(:) - ! - 0.5 v_1b(ipoint) * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,:) - ! - v_1b_grad[:,ipoint] * v_ij_u_cst_mu_j1b(i,j,ipoint) + ! = 0.5 env_val(ipoint) * v_ij_erf_rk_cst_mu_env(i,j,ipoint) * r(:) + ! - 0.5 env_val(ipoint) * x_v_ij_erf_rk_cst_mu_env(i,j,ipoint,:) + ! - env_grad[:,ipoint] * v_ij_u_cst_mu_env(i,j,ipoint) ! ! END_DOC @@ -31,8 +40,6 @@ BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao_test, (ao_num, ao_num, n_po print*, ' providing int2_grad1_u12_ao_test ...' call wall_time(time0) - PROVIDE j1b_type - if(read_tc_integ) then open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao_test', action="read") @@ -41,41 +48,33 @@ BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao_test, (ao_num, ao_num, n_po else - if(j1b_type .eq. 3) then + if((j2e_type .eq. "Mu") .and. (env_type .eq. "Prod_Gauss")) then + do ipoint = 1, n_points_final_grid x = final_grid_points(1,ipoint) y = final_grid_points(2,ipoint) z = final_grid_points(3,ipoint) - tmp0 = 0.5d0 * v_1b(ipoint) - tmp_x = v_1b_grad(1,ipoint) - tmp_y = v_1b_grad(2,ipoint) - tmp_z = v_1b_grad(3,ipoint) + tmp0 = 0.5d0 * env_val(ipoint) + tmp_x = env_grad(1,ipoint) + tmp_y = env_grad(2,ipoint) + tmp_z = env_grad(3,ipoint) do j = 1, ao_num do i = 1, ao_num - tmp1 = tmp0 * v_ij_erf_rk_cst_mu_j1b_test(i,j,ipoint) - tmp2 = v_ij_u_cst_mu_j1b_test(i,j,ipoint) - int2_grad1_u12_ao_test(i,j,ipoint,1) = tmp1 * x - tmp0 * x_v_ij_erf_rk_cst_mu_j1b_test(i,j,ipoint,1) - tmp2 * tmp_x - int2_grad1_u12_ao_test(i,j,ipoint,2) = tmp1 * y - tmp0 * x_v_ij_erf_rk_cst_mu_j1b_test(i,j,ipoint,2) - tmp2 * tmp_y - int2_grad1_u12_ao_test(i,j,ipoint,3) = tmp1 * z - tmp0 * x_v_ij_erf_rk_cst_mu_j1b_test(i,j,ipoint,3) - tmp2 * tmp_z + tmp1 = tmp0 * v_ij_erf_rk_cst_mu_env_test(i,j,ipoint) + tmp2 = v_ij_u_cst_mu_env_test(i,j,ipoint) + int2_grad1_u12_ao_test(i,j,ipoint,1) = tmp1 * x - tmp0 * x_v_ij_erf_rk_cst_mu_env_test(i,j,ipoint,1) - tmp2 * tmp_x + int2_grad1_u12_ao_test(i,j,ipoint,2) = tmp1 * y - tmp0 * x_v_ij_erf_rk_cst_mu_env_test(i,j,ipoint,2) - tmp2 * tmp_y + int2_grad1_u12_ao_test(i,j,ipoint,3) = tmp1 * z - tmp0 * x_v_ij_erf_rk_cst_mu_env_test(i,j,ipoint,3) - tmp2 * tmp_z enddo enddo enddo + else - do ipoint = 1, n_points_final_grid - x = final_grid_points(1,ipoint) - y = final_grid_points(2,ipoint) - z = final_grid_points(3,ipoint) - do j = 1, ao_num - do i = 1, ao_num - tmp1 = v_ij_erf_rk_cst_mu(i,j,ipoint) - int2_grad1_u12_ao_test(i,j,ipoint,1) = tmp1 * x - x_v_ij_erf_rk_cst_mu_tmp(i,j,ipoint,1) - int2_grad1_u12_ao_test(i,j,ipoint,2) = tmp1 * y - x_v_ij_erf_rk_cst_mu_tmp(i,j,ipoint,2) - int2_grad1_u12_ao_test(i,j,ipoint,3) = tmp1 * z - x_v_ij_erf_rk_cst_mu_tmp(i,j,ipoint,3) - enddo - enddo - enddo - int2_grad1_u12_ao_test *= 0.5d0 - endif + + print *, ' Error in int2_grad1_u12_ao_test: Unknown j2e_type = ', j2e_type + stop + + endif ! j2e_type endif @@ -191,7 +190,7 @@ BEGIN_PROVIDER [double precision, tc_grad_and_lapl_ao_test, (ao_num, ao_num, ao_ endif call wall_time(time1) - print*, ' Wall time for tc_grad_and_lapl_ao_test = ', time1 - time0 + print*, ' Wall time for tc_grad_and_lapl_ao_test (min) = ', (time1 - time0) / 60.d0 END_PROVIDER diff --git a/plugins/local/non_h_ints_mu/numerical_integ.irp.f b/plugins/local/non_h_ints_mu/numerical_integ.irp.f index f9457247..5436b857 100644 --- a/plugins/local/non_h_ints_mu/numerical_integ.irp.f +++ b/plugins/local/non_h_ints_mu/numerical_integ.irp.f @@ -1,11 +1,11 @@ ! --- -double precision function num_v_ij_u_cst_mu_j1b(i, j, ipoint) +double precision function num_v_ij_u_cst_mu_env(i, j, ipoint) BEGIN_DOC ! - ! \int dr2 u12 \phi_i(r2) \phi_j(r2) x v_1b(r2) + ! \int dr2 u12 \phi_i(r2) \phi_j(r2) x v_env(r2) ! END_DOC @@ -17,31 +17,31 @@ double precision function num_v_ij_u_cst_mu_j1b(i, j, ipoint) double precision :: r1(3), r2(3) double precision, external :: ao_value - double precision, external :: j12_mu, j1b_nucl, j12_mu_gauss + double precision, external :: j12_mu, env_nucl, j12_mu_gauss r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) r1(3) = final_grid_points(3,ipoint) - num_v_ij_u_cst_mu_j1b = 0.d0 + num_v_ij_u_cst_mu_env = 0.d0 do jpoint = 1, n_points_final_grid r2(1) = final_grid_points(1,jpoint) r2(2) = final_grid_points(2,jpoint) r2(3) = final_grid_points(3,jpoint) - num_v_ij_u_cst_mu_j1b += ao_value(i, r2) * ao_value(j, r2) * j12_mu_gauss(r1, r2) * j1b_nucl(r2) * final_weight_at_r_vector(jpoint) + num_v_ij_u_cst_mu_env += ao_value(i, r2) * ao_value(j, r2) * j12_mu_gauss(r1, r2) * env_nucl(r2) * final_weight_at_r_vector(jpoint) enddo return -end function num_v_ij_u_cst_mu_j1b +end ! --- -double precision function num_int2_u2_j1b2(i, j, ipoint) +double precision function num_int2_u2_env2(i, j, ipoint) BEGIN_DOC ! - ! \int dr2 u12^2 \phi_i(r2) \phi_j(r2) x v_1b(r2)^2 + ! \int dr2 u12^2 \phi_i(r2) \phi_j(r2) x v_env(r2)^2 ! END_DOC @@ -54,14 +54,14 @@ double precision function num_int2_u2_j1b2(i, j, ipoint) double precision :: dx, dy, dz, r12, x2, tmp1, tmp2, tmp3, coef, expo double precision, external :: ao_value - double precision, external :: j1b_nucl + double precision, external :: env_nucl double precision, external :: j12_mu r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) r1(3) = final_grid_points(3,ipoint) - num_int2_u2_j1b2 = 0.d0 + num_int2_u2_env2 = 0.d0 do jpoint = 1, n_points_final_grid r2(1) = final_grid_points(1,jpoint) r2(2) = final_grid_points(2,jpoint) @@ -72,7 +72,7 @@ double precision function num_int2_u2_j1b2(i, j, ipoint) x2 = dx * dx + dy * dy + dz * dz r12 = dsqrt(x2) - tmp1 = j1b_nucl(r2) + tmp1 = env_nucl(r2) tmp2 = tmp1 * tmp1 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) !tmp3 = 0.d0 @@ -84,19 +84,19 @@ double precision function num_int2_u2_j1b2(i, j, ipoint) tmp3 = j12_mu(r1, r2) tmp3 = tmp3 * tmp3 - num_int2_u2_j1b2 += tmp2 * tmp3 + num_int2_u2_env2 += tmp2 * tmp3 enddo return -end function num_int2_u2_j1b2 +end ! --- -double precision function num_int2_grad1u2_grad2u2_j1b2(i, j, ipoint) +double precision function num_int2_grad1u2_grad2u2_env2(i, j, ipoint) BEGIN_DOC ! - ! \int dr2 \frac{-[erf(mu r12) -1]^2}{4} \phi_i(r2) \phi_j(r2) x v_1b(r2)^2 + ! \int dr2 \frac{-[erf(mu r12) -1]^2}{4} \phi_i(r2) \phi_j(r2) x v_env(r2)^2 ! END_DOC @@ -109,13 +109,13 @@ double precision function num_int2_grad1u2_grad2u2_j1b2(i, j, ipoint) double precision :: dx, dy, dz, r12, x2, tmp1, tmp2, tmp3, coef, expo double precision, external :: ao_value - double precision, external :: j1b_nucl + double precision, external :: env_nucl r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) r1(3) = final_grid_points(3,ipoint) - num_int2_grad1u2_grad2u2_j1b2 = 0.d0 + num_int2_grad1u2_grad2u2_env2 = 0.d0 do jpoint = 1, n_points_final_grid r2(1) = final_grid_points(1,jpoint) r2(2) = final_grid_points(2,jpoint) @@ -126,7 +126,7 @@ double precision function num_int2_grad1u2_grad2u2_j1b2(i, j, ipoint) x2 = dx * dx + dy * dy + dz * dz r12 = dsqrt(x2) - tmp1 = j1b_nucl(r2) + tmp1 = env_nucl(r2) tmp2 = tmp1 * tmp1 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) !tmp3 = 0.d0 @@ -140,19 +140,19 @@ double precision function num_int2_grad1u2_grad2u2_j1b2(i, j, ipoint) tmp3 = -0.25d0 * tmp3 - num_int2_grad1u2_grad2u2_j1b2 += tmp2 * tmp3 + num_int2_grad1u2_grad2u2_env2 += tmp2 * tmp3 enddo return -end function num_int2_grad1u2_grad2u2_j1b2 +end ! --- -double precision function num_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint) +double precision function num_v_ij_erf_rk_cst_mu_env(i, j, ipoint) BEGIN_DOC ! - ! \int dr2 [erf(mu r12) -1]/r12 \phi_i(r2) \phi_j(r2) x v_1b(r2) + ! \int dr2 [erf(mu r12) -1]/r12 \phi_i(r2) \phi_j(r2) x v_env(r2) ! END_DOC @@ -165,13 +165,13 @@ double precision function num_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint) double precision :: dx, dy, dz, r12, tmp1, tmp2 double precision, external :: ao_value - double precision, external :: j1b_nucl + double precision, external :: env_nucl r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) r1(3) = final_grid_points(3,ipoint) - num_v_ij_erf_rk_cst_mu_j1b = 0.d0 + num_v_ij_erf_rk_cst_mu_env = 0.d0 do jpoint = 1, n_points_final_grid r2(1) = final_grid_points(1,jpoint) r2(2) = final_grid_points(2,jpoint) @@ -183,21 +183,21 @@ double precision function num_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint) if(r12 .lt. 1d-10) cycle tmp1 = (derf(mu_erf * r12) - 1.d0) / r12 - tmp2 = tmp1 * ao_value(i, r2) * ao_value(j, r2) * j1b_nucl(r2) * final_weight_at_r_vector(jpoint) + tmp2 = tmp1 * ao_value(i, r2) * ao_value(j, r2) * env_nucl(r2) * final_weight_at_r_vector(jpoint) - num_v_ij_erf_rk_cst_mu_j1b += tmp2 + num_v_ij_erf_rk_cst_mu_env += tmp2 enddo return -end function num_v_ij_erf_rk_cst_mu_j1b +end ! --- -subroutine num_x_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint, integ) +subroutine num_x_v_ij_erf_rk_cst_mu_env(i, j, ipoint, integ) BEGIN_DOC ! - ! \int dr2 [erf(mu r12) -1]/r12 \phi_i(r2) \phi_j(r2) x v_1b(r2) x r2 + ! \int dr2 [erf(mu r12) -1]/r12 \phi_i(r2) \phi_j(r2) x v_env(r2) x r2 ! END_DOC @@ -212,7 +212,7 @@ subroutine num_x_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint, integ) double precision :: tmp_x, tmp_y, tmp_z double precision, external :: ao_value - double precision, external :: j1b_nucl + double precision, external :: env_nucl r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) @@ -232,7 +232,7 @@ subroutine num_x_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint, integ) if(r12 .lt. 1d-10) cycle tmp1 = (derf(mu_erf * r12) - 1.d0) / r12 - tmp2 = tmp1 * ao_value(i, r2) * ao_value(j, r2) * j1b_nucl(r2) * final_weight_at_r_vector(jpoint) + tmp2 = tmp1 * ao_value(i, r2) * ao_value(j, r2) * env_nucl(r2) * final_weight_at_r_vector(jpoint) tmp_x += tmp2 * r2(1) tmp_y += tmp2 * r2(2) @@ -244,7 +244,7 @@ subroutine num_x_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint, integ) integ(3) = tmp_z return -end subroutine num_x_v_ij_erf_rk_cst_mu_j1b +end ! --- @@ -252,7 +252,7 @@ subroutine num_int2_grad1_u12_ao(i, j, ipoint, integ) BEGIN_DOC ! - ! \int dr2 [-grad_1 u12] \phi_i(r2) \phi_j(r2) x v12_1b(r1, r2) + ! \int dr2 [-grad_1 u12] \phi_i(r2) \phi_j(r2) x v12_env(r1, r2) ! END_DOC @@ -292,78 +292,7 @@ subroutine num_int2_grad1_u12_ao(i, j, ipoint, integ) integ(3) = tmp_z return -end subroutine num_int2_grad1_u12_ao - -! --- - -double precision function num_gradu_squared_u_ij_mu(i, j, ipoint) - - BEGIN_DOC - ! - ! -0.50 x \int r2 \phi_i(2) \phi_j(2) x v2^2 - ! [ v1^2 ((grad_1 u12)^2 + (grad_2 u12^2)]) - ! + u12^2 (grad_1 v1)^2 - ! + 2 u12 v1 (grad_1 u12) . (grad_1 v1) - ! - END_DOC - - - implicit none - - integer, intent(in) :: i, j, ipoint - - integer :: jpoint - double precision :: r1(3), r2(3) - double precision :: tmp_x, tmp_y, tmp_z, r12 - double precision :: dx1_v1, dy1_v1, dz1_v1, grad_u12(3) - double precision :: tmp1, v1_tmp, v2_tmp, u12_tmp - double precision :: fst_term, scd_term, thd_term, tmp - - double precision, external :: ao_value - double precision, external :: j1b_nucl - double precision, external :: j12_mu - double precision, external :: grad_x_j1b_nucl_num - double precision, external :: grad_y_j1b_nucl_num - double precision, external :: grad_z_j1b_nucl_num - - r1(1) = final_grid_points(1,ipoint) - r1(2) = final_grid_points(2,ipoint) - r1(3) = final_grid_points(3,ipoint) - - num_gradu_squared_u_ij_mu = 0.d0 - do jpoint = 1, n_points_final_grid - - r2(1) = final_grid_points(1,jpoint) - r2(2) = final_grid_points(2,jpoint) - r2(3) = final_grid_points(3,jpoint) - - tmp_x = r1(1) - r2(1) - tmp_y = r1(2) - r2(2) - tmp_z = r1(3) - r2(3) - r12 = dsqrt(tmp_x*tmp_x + tmp_y*tmp_y + tmp_z*tmp_z) - - dx1_v1 = grad_x_j1b_nucl_num(r1) - dy1_v1 = grad_y_j1b_nucl_num(r1) - dz1_v1 = grad_z_j1b_nucl_num(r1) - - call grad1_j12_mu(r1, r2, grad_u12) - - tmp1 = 1.d0 - derf(mu_erf * r12) - v1_tmp = j1b_nucl(r1) - v2_tmp = j1b_nucl(r2) - u12_tmp = j12_mu(r1, r2) - - fst_term = 0.5d0 * tmp1 * tmp1 * v1_tmp * v1_tmp - scd_term = u12_tmp * u12_tmp * (dx1_v1*dx1_v1 + dy1_v1*dy1_v1 + dz1_v1*dz1_v1) - thd_term = 2.d0 * v1_tmp * u12_tmp * (dx1_v1*grad_u12(1) + dy1_v1*grad_u12(2) + dz1_v1*grad_u12(3)) - - tmp = -0.5d0 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) * (fst_term + scd_term + thd_term) * v2_tmp * v2_tmp - - num_gradu_squared_u_ij_mu += tmp - enddo - - return -end function num_gradu_squared_u_ij_mu +end ! --- @@ -388,11 +317,11 @@ double precision function num_grad12_j12(i, j, ipoint) double precision :: fst_term, scd_term, thd_term, tmp double precision, external :: ao_value - double precision, external :: j1b_nucl + double precision, external :: env_nucl double precision, external :: j12_mu - double precision, external :: grad_x_j1b_nucl_num - double precision, external :: grad_y_j1b_nucl_num - double precision, external :: grad_z_j1b_nucl_num + double precision, external :: grad_x_env_nucl_num + double precision, external :: grad_y_env_nucl_num + double precision, external :: grad_z_env_nucl_num r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) @@ -410,15 +339,15 @@ double precision function num_grad12_j12(i, j, ipoint) tmp_z = r1(3) - r2(3) r12 = dsqrt(tmp_x*tmp_x + tmp_y*tmp_y + tmp_z*tmp_z) - dx1_v1 = grad_x_j1b_nucl_num(r1) - dy1_v1 = grad_y_j1b_nucl_num(r1) - dz1_v1 = grad_z_j1b_nucl_num(r1) + dx1_v1 = grad_x_env_nucl_num(r1) + dy1_v1 = grad_y_env_nucl_num(r1) + dz1_v1 = grad_z_env_nucl_num(r1) call grad1_j12_mu(r1, r2, grad_u12) tmp1 = 1.d0 - derf(mu_erf * r12) - v1_tmp = j1b_nucl(r1) - v2_tmp = j1b_nucl(r2) + v1_tmp = env_nucl(r1) + v2_tmp = env_nucl(r2) u12_tmp = j12_mu(r1, r2) fst_term = 0.5d0 * tmp1 * tmp1 * v1_tmp * v1_tmp @@ -429,11 +358,11 @@ double precision function num_grad12_j12(i, j, ipoint) enddo return -end function num_grad12_j12 +end ! --- -double precision function num_u12sq_j1bsq(i, j, ipoint) +double precision function num_u12sq_envsq(i, j, ipoint) BEGIN_DOC ! @@ -454,17 +383,17 @@ double precision function num_u12sq_j1bsq(i, j, ipoint) double precision :: fst_term, scd_term, thd_term, tmp double precision, external :: ao_value - double precision, external :: j1b_nucl + double precision, external :: env_nucl double precision, external :: j12_mu - double precision, external :: grad_x_j1b_nucl_num - double precision, external :: grad_y_j1b_nucl_num - double precision, external :: grad_z_j1b_nucl_num + double precision, external :: grad_x_env_nucl_num + double precision, external :: grad_y_env_nucl_num + double precision, external :: grad_z_env_nucl_num r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) r1(3) = final_grid_points(3,ipoint) - num_u12sq_j1bsq = 0.d0 + num_u12sq_envsq = 0.d0 do jpoint = 1, n_points_final_grid r2(1) = final_grid_points(1,jpoint) @@ -476,30 +405,30 @@ double precision function num_u12sq_j1bsq(i, j, ipoint) tmp_z = r1(3) - r2(3) r12 = dsqrt(tmp_x*tmp_x + tmp_y*tmp_y + tmp_z*tmp_z) - dx1_v1 = grad_x_j1b_nucl_num(r1) - dy1_v1 = grad_y_j1b_nucl_num(r1) - dz1_v1 = grad_z_j1b_nucl_num(r1) + dx1_v1 = grad_x_env_nucl_num(r1) + dy1_v1 = grad_y_env_nucl_num(r1) + dz1_v1 = grad_z_env_nucl_num(r1) call grad1_j12_mu(r1, r2, grad_u12) tmp1 = 1.d0 - derf(mu_erf * r12) - v1_tmp = j1b_nucl(r1) - v2_tmp = j1b_nucl(r2) + v1_tmp = env_nucl(r1) + v2_tmp = env_nucl(r2) u12_tmp = j12_mu(r1, r2) scd_term = u12_tmp * u12_tmp * (dx1_v1*dx1_v1 + dy1_v1*dy1_v1 + dz1_v1*dz1_v1) tmp = -0.5d0 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) * scd_term * v2_tmp * v2_tmp - num_u12sq_j1bsq += tmp + num_u12sq_envsq += tmp enddo return -end function num_u12sq_j1bsq +end ! --- -double precision function num_u12_grad1_u12_j1b_grad1_j1b(i, j, ipoint) +double precision function num_u12_grad1_u12_env_grad1_env(i, j, ipoint) BEGIN_DOC ! @@ -520,17 +449,17 @@ double precision function num_u12_grad1_u12_j1b_grad1_j1b(i, j, ipoint) double precision :: fst_term, scd_term, thd_term, tmp double precision, external :: ao_value - double precision, external :: j1b_nucl + double precision, external :: env_nucl double precision, external :: j12_mu - double precision, external :: grad_x_j1b_nucl_num - double precision, external :: grad_y_j1b_nucl_num - double precision, external :: grad_z_j1b_nucl_num + double precision, external :: grad_x_env_nucl_num + double precision, external :: grad_y_env_nucl_num + double precision, external :: grad_z_env_nucl_num r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) r1(3) = final_grid_points(3,ipoint) - num_u12_grad1_u12_j1b_grad1_j1b = 0.d0 + num_u12_grad1_u12_env_grad1_env = 0.d0 do jpoint = 1, n_points_final_grid r2(1) = final_grid_points(1,jpoint) @@ -542,34 +471,34 @@ double precision function num_u12_grad1_u12_j1b_grad1_j1b(i, j, ipoint) tmp_z = r1(3) - r2(3) r12 = dsqrt(tmp_x*tmp_x + tmp_y*tmp_y + tmp_z*tmp_z) - dx1_v1 = grad_x_j1b_nucl_num(r1) - dy1_v1 = grad_y_j1b_nucl_num(r1) - dz1_v1 = grad_z_j1b_nucl_num(r1) + dx1_v1 = grad_x_env_nucl_num(r1) + dy1_v1 = grad_y_env_nucl_num(r1) + dz1_v1 = grad_z_env_nucl_num(r1) call grad1_j12_mu(r1, r2, grad_u12) tmp1 = 1.d0 - derf(mu_erf * r12) - v1_tmp = j1b_nucl(r1) - v2_tmp = j1b_nucl(r2) + v1_tmp = env_nucl(r1) + v2_tmp = env_nucl(r2) u12_tmp = j12_mu(r1, r2) thd_term = 2.d0 * v1_tmp * u12_tmp * (dx1_v1*grad_u12(1) + dy1_v1*grad_u12(2) + dz1_v1*grad_u12(3)) tmp = -0.5d0 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) * thd_term * v2_tmp * v2_tmp - num_u12_grad1_u12_j1b_grad1_j1b += tmp + num_u12_grad1_u12_env_grad1_env += tmp enddo return -end function num_u12_grad1_u12_j1b_grad1_j1b +end ! --- -subroutine num_int2_u_grad1u_total_j1b2(i, j, ipoint, integ) +subroutine num_int2_u_grad1u_total_env2(i, j, ipoint, integ) BEGIN_DOC ! - ! \int dr2 u12 (grad_1 u12) \phi_i(r2) \phi_j(r2) x v_1b(r2)^2 + ! \int dr2 u12 (grad_1 u12) \phi_i(r2) \phi_j(r2) x v_env(r2)^2 ! END_DOC @@ -584,7 +513,7 @@ subroutine num_int2_u_grad1u_total_j1b2(i, j, ipoint, integ) double precision :: tmp_x, tmp_y, tmp_z double precision, external :: ao_value - double precision, external :: j1b_nucl + double precision, external :: env_nucl double precision, external :: j12_mu r1(1) = final_grid_points(1,ipoint) @@ -604,7 +533,7 @@ subroutine num_int2_u_grad1u_total_j1b2(i, j, ipoint, integ) r12 = dsqrt( dx * dx + dy * dy + dz * dz ) if(r12 .lt. 1d-10) cycle - tmp0 = j1b_nucl(r2) + tmp0 = env_nucl(r2) tmp1 = 0.5d0 * j12_mu(r1, r2) * (1.d0 - derf(mu_erf * r12)) / r12 tmp2 = tmp0 * tmp0 * tmp1 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) @@ -618,6 +547,6 @@ subroutine num_int2_u_grad1u_total_j1b2(i, j, ipoint, integ) integ(3) = tmp_z return -end subroutine num_int2_u_grad1u_total_j1b2 +end ! --- diff --git a/plugins/local/non_h_ints_mu/print_j1ecoef_info.irp.f b/plugins/local/non_h_ints_mu/print_j1ecoef_info.irp.f new file mode 100644 index 00000000..feb2685a --- /dev/null +++ b/plugins/local/non_h_ints_mu/print_j1ecoef_info.irp.f @@ -0,0 +1,94 @@ + +! --- + +program print_j1ecoef_info + + implicit none + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + if(tc_integ_type .eq. "numeric") then + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + endif + + call print_j1ecoef() + +end + +! --- + +subroutine print_j1ecoef() + + implicit none + integer :: i, j, ij + integer :: ierr + logical :: exists + character(len=10) :: ni, nj + double precision, allocatable :: coef_fit2(:) + + PROVIDE ao_l_char_space + + allocate(coef_fit2(ao_num*ao_num)) + + if(mpi_master) then + call ezfio_has_jastrow_j1e_coef_ao2(exists) + endif + IRP_IF MPI_DEBUG + print *, irp_here, mpi_rank + call MPI_BARRIER(MPI_COMM_WORLD, ierr) + IRP_ENDIF + IRP_IF MPI + include 'mpif.h' + call MPI_BCAST(coef_fit2, ao_num*ao_num, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read j1e_coef_ao2 with MPI' + endif + IRP_ENDIF + if(exists) then + if(mpi_master) then + write(6,'(A)') '.. >>>>> [ IO READ: j1e_coef_ao2 ] <<<<< ..' + call ezfio_get_jastrow_j1e_coef_ao2(coef_fit2) + IRP_IF MPI + call MPI_BCAST(coef_fit2, ao_num*ao_num, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read j1e_coef_ao2 with MPI' + endif + IRP_ENDIF + endif + else + + call get_j1e_coef_fit_ao2(ao_num*ao_num, coef_fit2) + call ezfio_set_jastrow_j1e_coef_ao2(coef_fit2) + + endif + + + do i = 1, ao_num + write(ni, '(I0)') ao_l(i)+1 + do j = 1, ao_num + write(nj, '(I0)') ao_l(j)+1 + ij = (i-1)*ao_num + j + print *, trim(adjustl(ni)) // trim(adjustl(ao_l_char_space(i))), " " & + , trim(adjustl(nj)) // trim(adjustl(ao_l_char_space(j))), " " & + , dabs(coef_fit2(ij)) + enddo +! print *, ' ' + enddo + + + deallocate(coef_fit2) + + return +end + +! --- + + diff --git a/plugins/local/non_h_ints_mu/tc_integ.irp.f b/plugins/local/non_h_ints_mu/tc_integ.irp.f new file mode 100644 index 00000000..775a9e4c --- /dev/null +++ b/plugins/local/non_h_ints_mu/tc_integ.irp.f @@ -0,0 +1,398 @@ + +BEGIN_PROVIDER [double precision, int2_grad1_u12_ao, (ao_num, ao_num, n_points_final_grid, 3)] + + BEGIN_DOC + ! + ! int2_grad1_u12_ao(i,j,ipoint,:) = \int dr2 [\grad_r1 J(r1,r2)] \phi_i(r2) \phi_j(r2) + ! + ! where r1 = r(ipoint) + ! + END_DOC + + implicit none + integer :: ipoint, i, j, m, jpoint + double precision :: time0, time1 + double precision :: x, y, z, r2 + double precision :: dx, dy, dz + double precision :: tmp_ct + double precision :: tmp0, tmp1, tmp2 + double precision :: tmp0_x, tmp0_y, tmp0_z + double precision :: tmp1_x, tmp1_y, tmp1_z + + PROVIDE j2e_type + PROVIDE j1e_type + + call wall_time(time0) + + print*, ' providing int2_grad1_u12_ao ...' + + if(read_tc_integ) then + + print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") + read(11) int2_grad1_u12_ao + close(11) + + else + + if(tc_integ_type .eq. "analytic") then + + write(*, '(A, A, A)') ' Error: The integration type ', trim(tc_integ_type), ' has not been implemented yet.' + stop + + elseif(tc_integ_type .eq. "numeric") then + + print *, ' Numerical integration over r1 and r2 will be performed' + + ! TODO combine 1shot & int2_grad1_u12_ao_num + + PROVIDE int2_grad1_u12_ao_num + int2_grad1_u12_ao = int2_grad1_u12_ao_num + + !PROVIDE int2_grad1_u12_ao_num_1shot + !int2_grad1_u12_ao = int2_grad1_u12_ao_num_1shot + + elseif(tc_integ_type .eq. "semi-analytic") then + + print*, ' Numerical integration over r1, with analytical integration over r2' + + ! --- + + if(j2e_type .eq. "None") then + + int2_grad1_u12_ao = 0.d0 + + elseif( (j2e_type .eq. "Mu") .and. & + ( (env_type .eq. "None") .or. (env_type .eq. "Prod_Gauss") .or. (env_type .eq. "Sum_Gauss") ) ) then + + PROVIDE int2_grad1_u2e_ao + int2_grad1_u12_ao = int2_grad1_u2e_ao + + else + + print *, ' Error in int2_grad1_u12_ao: Unknown Jastrow' + stop + + endif ! j2e_type + + ! --- + + if(j1e_type .ne. "None") then + + PROVIDE elec_num + PROVIDE ao_overlap + PROVIDE j1e_gradx j1e_grady j1e_gradz + + tmp_ct = 1.d0 / (dble(elec_num) - 1.d0) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, tmp0_x, tmp0_y, tmp0_z) & + !$OMP SHARED (ao_num, n_points_final_grid, tmp_ct, ao_overlap, & + !$OMP j1e_gradx, j1e_grady, j1e_gradz, int2_grad1_u12_ao) + !$OMP DO + do ipoint = 1, n_points_final_grid + tmp0_x = tmp_ct * j1e_gradx(ipoint) + tmp0_y = tmp_ct * j1e_grady(ipoint) + tmp0_z = tmp_ct * j1e_gradz(ipoint) + do j = 1, ao_num + do i = 1, ao_num + int2_grad1_u12_ao(i,j,ipoint,1) = int2_grad1_u12_ao(i,j,ipoint,1) + tmp0_x * ao_overlap(i,j) + int2_grad1_u12_ao(i,j,ipoint,2) = int2_grad1_u12_ao(i,j,ipoint,2) + tmp0_y * ao_overlap(i,j) + int2_grad1_u12_ao(i,j,ipoint,3) = int2_grad1_u12_ao(i,j,ipoint,3) + tmp0_z * ao_overlap(i,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + endif ! j1e_type + + ! --- + + else + + write(*, '(A, A, A)') ' Error: The integration type ', trim(tc_integ_type), ' has not been implemented yet' + stop + + endif ! tc_integ_type + + endif ! read_tc_integ + + + if(write_tc_integ .and. mpi_master) then + + print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") + call ezfio_set_work_empty(.False.) + write(11) int2_grad1_u12_ao + close(11) + call ezfio_set_tc_keywords_io_tc_integ('Read') + endif + + call wall_time(time1) + print*, ' wall time for int2_grad1_u12_ao (min) =', (time1-time0)/60.d0 + call print_memory_usage() + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, int2_grad1_u12_square_ao, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! int2_grad1_u12_square_ao = -(1/2) x int dr2 chi_l(r2) chi_j(r2) [grad_1 u(r1,r2)]^2 + ! + END_DOC + + implicit none + integer :: ipoint, i, j, m, jpoint + double precision :: x, y, z, r2 + double precision :: dx, dy, dz, dr2 + double precision :: dx1, dy1, dz1, dx2, dy2, dz2, dr12 + double precision :: tmp_ct, tmp_ct1, tmp_ct2 + double precision :: tmp0, tmp1, tmp2 + double precision :: tmp3, tmp4, tmp5, tmp6 + double precision :: tmp0_x, tmp0_y, tmp0_z + double precision :: tmp1_x, tmp1_y, tmp1_z + double precision :: time0, time1 + + PROVIDE j2e_type + PROVIDE j1e_type + PROVIDE tc_integ_type + + call wall_time(time0) + + print*, ' providing int2_grad1_u12_square_ao ...' + + if(tc_integ_type .eq. "analytic") then + + write(*, '(A, A, A)') ' Error: The integration type ', trim(tc_integ_type), ' has not been implemented yet.' + stop + + elseif(tc_integ_type .eq. "numeric") then + + print *, ' Numerical integration over r1 and r2 will be performed' + + ! TODO combine 1shot & int2_grad1_u12_square_ao_num + + PROVIDE int2_grad1_u12_square_ao_num + int2_grad1_u12_square_ao = int2_grad1_u12_square_ao_num + + !PROVIDE int2_grad1_u12_square_ao_num_1shot + !int2_grad1_u12_square_ao = int2_grad1_u12_square_ao_num_1shot + + elseif(tc_integ_type .eq. "semi-analytic") then + + print*, ' Numerical integration over r1, with analytical integration over r2' + + ! --- + + if(j2e_type .eq. "None") then + + int2_grad1_u12_square_ao = 0.d0 + + elseif((j2e_type .eq. "Mu") .and. (env_type .eq. "None")) then + + PROVIDE int2_grad1u2_grad2u2 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (int2_grad1_u12_square_ao, ao_num, n_points_final_grid, int2_grad1u2_grad2u2) + !$OMP DO SCHEDULE (static) + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + int2_grad1_u12_square_ao(i,j,ipoint) = -0.5d0 * int2_grad1u2_grad2u2(i,j,ipoint) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + FREE int2_grad1u2_grad2u2 + + elseif((j2e_type .eq. "Mu") .and. (env_type .eq. "Prod_Gauss")) then + + PROVIDE mu_erf + PROVIDE env_val env_grad + + if(use_ipp) then + + ! the term u12_grad1_u12_env_grad1_env is added directly for performance + PROVIDE u12sq_envsq grad12_j12 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (int2_grad1_u12_square_ao, ao_num, n_points_final_grid, u12sq_envsq, grad12_j12) + !$OMP DO SCHEDULE (static) + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + int2_grad1_u12_square_ao(i,j,ipoint) = u12sq_envsq(i,j,ipoint) + 0.5d0 * grad12_j12(i,j,ipoint) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + FREE u12sq_envsq grad12_j12 + + else + + PROVIDE u12sq_envsq u12_grad1_u12_env_grad1_env grad12_j12 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (int2_grad1_u12_square_ao, ao_num, n_points_final_grid, u12sq_envsq, grad12_j12, u12_grad1_u12_env_grad1_env) + !$OMP DO SCHEDULE (static) + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + int2_grad1_u12_square_ao(i,j,ipoint) = u12sq_envsq(i,j,ipoint) + u12_grad1_u12_env_grad1_env(i,j,ipoint) + 0.5d0 * grad12_j12(i,j,ipoint) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + FREE u12sq_envsq u12_grad1_u12_env_grad1_env grad12_j12 + + endif ! use_ipp + + elseif((j2e_type .eq. "Mu") .and. (env_type .eq. "Sum_Gauss")) then + + PROVIDE mu_erf + PROVIDE env_type env_val env_grad + + if(use_ipp) then + + ! do not free int2_u2_env2 here + PROVIDE int2_u2_env2 + PROVIDE int2_grad1u2_grad2u2_env2 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint, tmp0_x, tmp0_y, tmp0_z, tmp1, tmp2) & + !$OMP SHARED (int2_grad1_u12_square_ao, ao_num, n_points_final_grid, & + !$OMP env_val, env_grad, int2_u2_env2, int2_grad1u2_grad2u2_env2) + !$OMP DO SCHEDULE (static) + do ipoint = 1, n_points_final_grid + tmp0_x = env_grad(1,ipoint) + tmp0_y = env_grad(2,ipoint) + tmp0_z = env_grad(3,ipoint) + tmp1 = -0.5d0 * (tmp0_x * tmp0_x + tmp0_y * tmp0_y + tmp0_z * tmp0_z) + tmp2 = 0.5d0 * env_val(ipoint) * env_val(ipoint) + do j = 1, ao_num + do i = 1, ao_num + int2_grad1_u12_square_ao(i,j,ipoint) = tmp1 * int2_u2_env2(i,j,ipoint) + tmp2 * int2_grad1u2_grad2u2_env2(i,j,ipoint) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + FREE int2_grad1u2_grad2u2_env2 + + else + + PROVIDE u12sq_envsq u12_grad1_u12_env_grad1_env grad12_j12 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (int2_grad1_u12_square_ao, ao_num, n_points_final_grid, u12sq_envsq, grad12_j12, u12_grad1_u12_env_grad1_env) + !$OMP DO SCHEDULE (static) + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + int2_grad1_u12_square_ao(i,j,ipoint) = u12sq_envsq(i,j,ipoint) + u12_grad1_u12_env_grad1_env(i,j,ipoint) + 0.5d0 * grad12_j12(i,j,ipoint) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + FREE u12sq_envsq u12_grad1_u12_env_grad1_env grad12_j12 + + endif ! use_ipp + + else + + print *, ' Error in int2_grad1_u12_square_ao: Unknown Jastrow' + stop + + endif ! j2e_type + + ! --- + + if(j1e_type .ne. "None") then + + PROVIDE elec_num + PROVIDE ao_overlap + PROVIDE j1e_gradx j1e_grady j1e_gradz + PROVIDE int2_grad1_u2e_ao + + tmp_ct1 = -1.0d0 / (dble(elec_num) - 1.d0) + tmp_ct2 = -0.5d0 / ((dble(elec_num) - 1.d0) * (dble(elec_num) - 1.d0)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, dx, dy, dz, r2, & + !$OMP tmp0, tmp0_x, tmp0_y, tmp0_z) & + !$OMP SHARED (ao_num, n_points_final_grid, & + !$OMP tmp_ct1, tmp_ct2, ao_overlap, & + !$OMP j1e_gradx, j1e_grady, j1e_gradz, & + !$OMP int2_grad1_u2e_ao, int2_grad1_u12_square_ao) + !$OMP DO + do ipoint = 1, n_points_final_grid + + dx = j1e_gradx(ipoint) + dy = j1e_grady(ipoint) + dz = j1e_gradz(ipoint) + r2 = dx*dx + dy*dy + dz*dz + + tmp0 = tmp_ct2 * r2 + tmp0_x = tmp_ct1 * dx + tmp0_y = tmp_ct1 * dy + tmp0_z = tmp_ct1 * dz + + do j = 1, ao_num + do i = 1, ao_num + + int2_grad1_u12_square_ao(i,j,ipoint) = int2_grad1_u12_square_ao(i,j,ipoint) & + + tmp0 * ao_overlap(i,j) & + + tmp0_x * int2_grad1_u2e_ao(i,j,ipoint,1) & + + tmp0_y * int2_grad1_u2e_ao(i,j,ipoint,2) & + + tmp0_z * int2_grad1_u2e_ao(i,j,ipoint,3) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + endif ! j1e_type + + ! --- + + else + + write(*, '(A, A, A)') ' Error: The integration type ', trim(tc_integ_type), ' has not been implemented yet' + stop + + endif ! tc_integ_type + + call wall_time(time1) + print*, ' wall time for int2_grad1_u12_square_ao (min) = ', (time1-time0) / 60.d0 + call print_memory_usage() + +END_PROVIDER + +! --- + diff --git a/plugins/local/non_h_ints_mu/tc_integ_an.irp.f b/plugins/local/non_h_ints_mu/tc_integ_an.irp.f deleted file mode 100644 index a6459761..00000000 --- a/plugins/local/non_h_ints_mu/tc_integ_an.irp.f +++ /dev/null @@ -1,248 +0,0 @@ - -BEGIN_PROVIDER [double precision, int2_grad1_u12_ao, (ao_num, ao_num, n_points_final_grid, 3)] - - BEGIN_DOC - ! - ! TODO - ! combine with int2_grad1_u12_square_ao to avoid repeated calculation ? - ! - ! int2_grad1_u12_ao(i,j,ipoint,:) = \int dr2 [-1 * \grad_r1 J(r1,r2)] \phi_i(r2) \phi_j(r2) - ! - ! where r1 = r(ipoint) - ! - ! if J(r1,r2) = u12 (j1b_type .eq. 1) - ! - ! int2_grad1_u12_ao(i,j,ipoint,:) = 0.5 x \int dr2 [(r1 - r2) (erf(mu * r12)-1)r_12] \phi_i(r2) \phi_j(r2) - ! = 0.5 * [ v_ij_erf_rk_cst_mu(i,j,ipoint) * r(:) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,:) ] - ! - ! if J(r1,r2) = u12 x v1 x v2 (j1b_type .eq. 3) - ! - ! int2_grad1_u12_ao(i,j,ipoint,:) = v1 x [ 0.5 x \int dr2 [(r1 - r2) (erf(mu * r12)-1)r_12] v2 \phi_i(r2) \phi_j(r2) ] - ! - \grad_1 v1 x [ \int dr2 u12 v2 \phi_i(r2) \phi_j(r2) ] - ! = 0.5 v_1b(ipoint) * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) * r(:) - ! - 0.5 v_1b(ipoint) * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,:) - ! - v_1b_grad[:,ipoint] * v_ij_u_cst_mu_j1b(i,j,ipoint) - ! - END_DOC - - implicit none - integer :: ipoint, i, j, m, jpoint - double precision :: time0, time1 - double precision :: x, y, z, w, tmp_x, tmp_y, tmp_z, tmp0, tmp1, tmp2 - - print*, ' providing int2_grad1_u12_ao ...' - call wall_time(time0) - - PROVIDE j1b_type - - if(read_tc_integ) then - - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") - read(11) int2_grad1_u12_ao - - else - - if(j1b_type .eq. 0) then - - PROVIDE v_ij_erf_rk_cst_mu x_v_ij_erf_rk_cst_mu - - int2_grad1_u12_ao = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, i, j, x, y, z, tmp1) & - !$OMP SHARED ( ao_num, n_points_final_grid, final_grid_points & - !$OMP , v_ij_erf_rk_cst_mu, x_v_ij_erf_rk_cst_mu, int2_grad1_u12_ao) - !$OMP DO SCHEDULE (static) - do ipoint = 1, n_points_final_grid - x = final_grid_points(1,ipoint) - y = final_grid_points(2,ipoint) - z = final_grid_points(3,ipoint) - do j = 1, ao_num - do i = 1, ao_num - tmp1 = v_ij_erf_rk_cst_mu(i,j,ipoint) - int2_grad1_u12_ao(i,j,ipoint,1) = 0.5d0 * (tmp1 * x - x_v_ij_erf_rk_cst_mu(i,j,ipoint,1)) - int2_grad1_u12_ao(i,j,ipoint,2) = 0.5d0 * (tmp1 * y - x_v_ij_erf_rk_cst_mu(i,j,ipoint,2)) - int2_grad1_u12_ao(i,j,ipoint,3) = 0.5d0 * (tmp1 * z - x_v_ij_erf_rk_cst_mu(i,j,ipoint,3)) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - elseif((j1b_type .eq. 3) .or. (j1b_type .eq. 4)) then - - PROVIDE v_1b_grad - PROVIDE v_ij_erf_rk_cst_mu_j1b v_ij_u_cst_mu_j1b_an x_v_ij_erf_rk_cst_mu_j1b - - int2_grad1_u12_ao = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, i, j, x, y, z, tmp0, tmp1, tmp2, tmp_x, tmp_y, tmp_z) & - !$OMP SHARED ( ao_num, n_points_final_grid, final_grid_points, v_1b, v_1b_grad & - !$OMP , v_ij_erf_rk_cst_mu_j1b, v_ij_u_cst_mu_j1b_an, x_v_ij_erf_rk_cst_mu_j1b, int2_grad1_u12_ao) - !$OMP DO SCHEDULE (static) - do ipoint = 1, n_points_final_grid - x = final_grid_points(1,ipoint) - y = final_grid_points(2,ipoint) - z = final_grid_points(3,ipoint) - tmp0 = 0.5d0 * v_1b(ipoint) - tmp_x = v_1b_grad(1,ipoint) - tmp_y = v_1b_grad(2,ipoint) - tmp_z = v_1b_grad(3,ipoint) - do j = 1, ao_num - do i = 1, ao_num - tmp1 = tmp0 * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) - tmp2 = v_ij_u_cst_mu_j1b_an(i,j,ipoint) - int2_grad1_u12_ao(i,j,ipoint,1) = tmp1 * x - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) - tmp2 * tmp_x - int2_grad1_u12_ao(i,j,ipoint,2) = tmp1 * y - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) - tmp2 * tmp_y - int2_grad1_u12_ao(i,j,ipoint,3) = tmp1 * z - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) - tmp2 * tmp_z - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - FREE v_ij_erf_rk_cst_mu_j1b v_ij_u_cst_mu_j1b_an x_v_ij_erf_rk_cst_mu_j1b - - elseif(j1b_type .ge. 100) then - -! PROVIDE int2_grad1_u12_ao_num -! int2_grad1_u12_ao = int2_grad1_u12_ao_num - - PROVIDE int2_grad1_u12_ao_num_1shot - int2_grad1_u12_ao = int2_grad1_u12_ao_num_1shot - - else - - print *, ' j1b_type = ', j1b_type, 'not implemented yet' - stop - - endif - endif - - if(write_tc_integ.and.mpi_master) then - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") - call ezfio_set_work_empty(.False.) - write(11) int2_grad1_u12_ao - close(11) - call ezfio_set_tc_keywords_io_tc_integ('Read') - endif - - call wall_time(time1) - print*, ' wall time for int2_grad1_u12_ao =', time1-time0 - call print_memory_usage() - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [double precision, int2_grad1_u12_square_ao, (ao_num, ao_num, n_points_final_grid)] - - BEGIN_DOC - ! - ! int2_grad1_u12_square_ao = -(1/2) x int dr2 chi_l(r2) chi_j(r2) [grad_1 u(r1,r2)]^2 - ! - END_DOC - - implicit none - integer :: ipoint, i, j, m, jpoint - double precision :: time0, time1 - double precision :: x, y, z, w, tmp_x, tmp_y, tmp_z, tmp0, tmp1, tmp2 - - print*, ' providing int2_grad1_u12_square_ao ...' - call wall_time(time0) - - PROVIDE j1b_type - - if(j1b_type .eq. 0) then - - PROVIDE int2_grad1u2_grad2u2 - - int2_grad1_u12_square_ao = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, ipoint) & - !$OMP SHARED (int2_grad1_u12_square_ao, ao_num, n_points_final_grid, int2_grad1u2_grad2u2) - !$OMP DO SCHEDULE (static) - do ipoint = 1, n_points_final_grid - do j = 1, ao_num - do i = 1, ao_num - int2_grad1_u12_square_ao(i,j,ipoint) = int2_grad1u2_grad2u2(i,j,ipoint) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - elseif((j1b_type .eq. 3) .or. (j1b_type .eq. 4)) then - - if(use_ipp) then - - ! the term u12_grad1_u12_j1b_grad1_j1b is added directly for performance - PROVIDE u12sq_j1bsq grad12_j12 - - int2_grad1_u12_square_ao = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, ipoint) & - !$OMP SHARED (int2_grad1_u12_square_ao, ao_num, n_points_final_grid, u12sq_j1bsq, grad12_j12) - !$OMP DO SCHEDULE (static) - do ipoint = 1, n_points_final_grid - do j = 1, ao_num - do i = 1, ao_num - int2_grad1_u12_square_ao(i,j,ipoint) = u12sq_j1bsq(i,j,ipoint) + 0.5d0 * grad12_j12(i,j,ipoint) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - FREE u12sq_j1bsq grad12_j12 - - else - - PROVIDE u12sq_j1bsq u12_grad1_u12_j1b_grad1_j1b grad12_j12 - - int2_grad1_u12_square_ao = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, ipoint) & - !$OMP SHARED (int2_grad1_u12_square_ao, ao_num, n_points_final_grid, u12sq_j1bsq, grad12_j12, u12_grad1_u12_j1b_grad1_j1b) - !$OMP DO SCHEDULE (static) - do ipoint = 1, n_points_final_grid - do j = 1, ao_num - do i = 1, ao_num - int2_grad1_u12_square_ao(i,j,ipoint) = u12sq_j1bsq(i,j,ipoint) + u12_grad1_u12_j1b_grad1_j1b(i,j,ipoint) + 0.5d0 * grad12_j12(i,j,ipoint) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - FREE u12sq_j1bsq u12_grad1_u12_j1b_grad1_j1b grad12_j12 - - endif - - elseif(j1b_type .ge. 100) then - - ! PROVIDE int2_grad1_u12_square_ao_num - ! int2_grad1_u12_square_ao = int2_grad1_u12_square_ao_num - - PROVIDE int2_grad1_u12_square_ao_num_1shot - int2_grad1_u12_square_ao = int2_grad1_u12_square_ao_num_1shot - - else - - print *, ' j1b_type = ', j1b_type, 'not implemented yet' - stop - - endif - - call wall_time(time1) - print*, ' wall time for int2_grad1_u12_square_ao =', time1-time0 - call print_memory_usage() - -END_PROVIDER - -! --- - diff --git a/plugins/local/non_h_ints_mu/tc_integ_num.irp.f b/plugins/local/non_h_ints_mu/tc_integ_num.irp.f index 5a088331..e5d75c3d 100644 --- a/plugins/local/non_h_ints_mu/tc_integ_num.irp.f +++ b/plugins/local/non_h_ints_mu/tc_integ_num.irp.f @@ -1,10 +1,12 @@ +! --- + BEGIN_PROVIDER [double precision, int2_grad1_u12_ao_num , (ao_num,ao_num,n_points_final_grid,3)] &BEGIN_PROVIDER [double precision, int2_grad1_u12_square_ao_num, (ao_num,ao_num,n_points_final_grid) ] BEGIN_DOC ! - ! int2_grad1_u12_ao_num(i,j,ipoint,:) = \int dr2 [-1 * \grad_r1 J(r1,r2)] \phi_i(r2) \phi_j(r2) + ! int2_grad1_u12_ao_num(i,j,ipoint,:) = \int dr2 [\grad_r1 J(r1,r2)] \phi_i(r2) \phi_j(r2) ! ! int2_grad1_u12_square_ao_num = -(1/2) x int dr2 chi_l(r2) chi_j(r2) [grad_1 u(r1,r2)]^2 ! @@ -43,7 +45,6 @@ !$OMP END DO !$OMP END PARALLEL - ! n_points_final_grid = n_blocks * n_pass + n_rest call total_memory(mem) mem = max(1.d0, qp_max_mem - mem) n_double = mem * 1.d8 @@ -71,10 +72,10 @@ !$OMP DO do i_blocks = 1, n_blocks ipoint = ii - 1 + i_blocks ! r1 - call get_grad1_u12_withsq_r1_seq(final_grid_points(1,ipoint), n_points_extra_final_grid, tmp_grad1_u12(1,i_blocks,1) & - , tmp_grad1_u12(1,i_blocks,2) & - , tmp_grad1_u12(1,i_blocks,3) & - , tmp_grad1_u12_squared(1,i_blocks)) + call get_grad1_u12_withsq_r1_seq(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,i_blocks,1) & + , tmp_grad1_u12(1,i_blocks,2) & + , tmp_grad1_u12(1,i_blocks,3) & + , tmp_grad1_u12_squared(1,i_blocks)) enddo !$OMP END DO !$OMP END PARALLEL @@ -107,10 +108,10 @@ !$OMP DO do i_rest = 1, n_rest ipoint = ii - 1 + i_rest ! r1 - call get_grad1_u12_withsq_r1_seq(final_grid_points(1,ipoint), n_points_extra_final_grid, tmp_grad1_u12(1,i_rest,1) & - , tmp_grad1_u12(1,i_rest,2) & - , tmp_grad1_u12(1,i_rest,3) & - , tmp_grad1_u12_squared(1,i_rest)) + call get_grad1_u12_withsq_r1_seq(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,i_rest,1) & + , tmp_grad1_u12(1,i_rest,2) & + , tmp_grad1_u12(1,i_rest,3) & + , tmp_grad1_u12_squared(1,i_rest)) enddo !$OMP END DO !$OMP END PARALLEL @@ -142,7 +143,7 @@ END_PROVIDER BEGIN_DOC ! - ! int2_grad1_u12_ao_num_1shot(i,j,ipoint,:) = \int dr2 [-1 * \grad_r1 J(r1,r2)] \phi_i(r2) \phi_j(r2) + ! int2_grad1_u12_ao_num_1shot(i,j,ipoint,:) = \int dr2 [\grad_r1 J(r1,r2)] \phi_i(r2) \phi_j(r2) ! ! int2_grad1_u12_square_ao_num_1shot = -(1/2) x int dr2 chi_l(r2) chi_j(r2) [grad_1 u(r1,r2)]^2 ! @@ -176,9 +177,7 @@ END_PROVIDER !$OMP END PARALLEL do m = 1, 3 - !call dgemm( "T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, -1.d0 & - ! this work also because of the symmetry in K(1,2) and sign compensation in L(1,2,3) - call dgemm( "T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, +1.d0 & + call dgemm( "T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & , tmp(1,1,1), n_points_extra_final_grid, grad1_u12_num(1,1,m), n_points_extra_final_grid & , 0.d0, int2_grad1_u12_ao_num_1shot(1,1,1,m), ao_num*ao_num) enddo diff --git a/plugins/local/non_h_ints_mu/test_non_h_ints.irp.f b/plugins/local/non_h_ints_mu/test_non_h_ints.irp.f index 84674fa0..464a1c1f 100644 --- a/plugins/local/non_h_ints_mu/test_non_h_ints.irp.f +++ b/plugins/local/non_h_ints_mu/test_non_h_ints.irp.f @@ -11,7 +11,7 @@ program test_non_h my_n_pt_a_grid = tc_grid1_a touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid - if(j1b_type .ge. 100) then + if(tc_integ_type .eq. "numeric") then my_extra_grid_becke = .True. PROVIDE tc_grid2_a tc_grid2_r my_n_pt_r_extra_grid = tc_grid2_r @@ -19,111 +19,56 @@ program test_non_h touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid endif + PROVIDE j2e_type + PROVIDE j1e_type + PROVIDE env_type + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type - !call routine_grad_squared() !call routine_fit() !call test_ipp() - !call test_v_ij_u_cst_mu_j1b_an() + !call test_v_ij_u_cst_mu_env_an() - call test_int2_grad1_u12_square_ao() - call test_int2_grad1_u12_ao() + !call test_int2_grad1_u12_square_ao() + !call test_int2_grad1_u12_ao() + + !call test_j1e_grad() + + !call test_j1e_fit_ao() + + !call test_tc_grad_and_lapl_ao_new() + !call test_tc_grad_square_ao_new() + + !call test_fit_coef_A1() + !call test_fit_coef_inv() + + call test_fit_coef_testinvA() end ! --- -subroutine routine_lapl_grad - implicit none - integer :: i,j,k,l - double precision :: grad_lapl, get_ao_tc_sym_two_e_pot,new,accu,contrib - double precision :: ao_two_e_integral_erf,get_ao_two_e_integral,count_n,accu_relat -! !!!!!!!!!!!!!!!!!!!!! WARNING -! THIS ROUTINE MAKES SENSE ONLY IF HAND MODIFIED coef_gauss_eff_pot(1:n_max_fit_slat) = 0. to cancel (1-erf(mu*r12))^2 - accu = 0.d0 - accu_relat = 0.d0 - count_n = 0.d0 - do i = 1, ao_num - do j = 1, ao_num - do k = 1, ao_num - do l = 1, ao_num - grad_lapl = get_ao_tc_sym_two_e_pot(i,j,k,l,ao_tc_sym_two_e_pot_map) ! pure gaussian part : comes from Lapl - grad_lapl += ao_two_e_integral_erf(i, k, j, l) ! erf(mu r12)/r12 : comes from Lapl - grad_lapl += ao_non_hermit_term_chemist(k,i,l,j) ! \grad u(r12) . grad - new = tc_grad_and_lapl_ao(k,i,l,j) - new += get_ao_two_e_integral(i,j,k,l,ao_integrals_map) - contrib = dabs(new - grad_lapl) - if(dabs(grad_lapl).gt.1.d-12)then - count_n += 1.d0 - accu_relat += 2.0d0 * contrib/dabs(grad_lapl+new) - endif - if(contrib.gt.1.d-10)then - print*,i,j,k,l - print*,grad_lapl,new,contrib - print*,2.0d0*contrib/dabs(grad_lapl+new+1.d-12) - endif - accu += contrib - enddo - enddo - enddo - enddo - print*,'accu = ',accu/count_n - print*,'accu/rel = ',accu_relat/count_n - -end - -subroutine routine_grad_squared - implicit none - integer :: i,j,k,l - double precision :: grad_squared, get_ao_tc_sym_two_e_pot,new,accu,contrib - double precision :: count_n,accu_relat -! !!!!!!!!!!!!!!!!!!!!! WARNING -! THIS ROUTINE MAKES SENSE ONLY IF HAND MODIFIED coef_gauss_eff_pot(n_max_fit_slat:n_max_fit_slat+1) = 0. to cancel exp(-'mu*r12)^2) - accu = 0.d0 - accu_relat = 0.d0 - count_n = 0.d0 - do i = 1, ao_num - do j = 1, ao_num - do k = 1, ao_num - do l = 1, ao_num - grad_squared = get_ao_tc_sym_two_e_pot(i,j,k,l,ao_tc_sym_two_e_pot_map) ! pure gaussian part : comes from Lapl - new = tc_grad_square_ao(k,i,l,j) - contrib = dabs(new - grad_squared) - if(dabs(grad_squared).gt.1.d-12)then - count_n += 1.d0 - accu_relat += 2.0d0 * contrib/dabs(grad_squared+new) - endif - if(contrib.gt.1.d-10)then - print*,i,j,k,l - print*,grad_squared,new,contrib - print*,2.0d0*contrib/dabs(grad_squared+new+1.d-12) - endif - accu += contrib - enddo - enddo - enddo - enddo - print*,'accu = ',accu/count_n - print*,'accu/rel = ',accu_relat/count_n - -end - subroutine routine_fit - implicit none - integer :: i,nx - double precision :: dx,xmax,x,j_mu,j_mu_F_x_j,j_mu_fit_gauss - nx = 500 - xmax = 5.d0 - dx = xmax/dble(nx) - x = 0.d0 - print*,'coucou',mu_erf - do i = 1, nx - write(33,'(100(F16.10,X))') x,j_mu(x),j_mu_F_x_j(x),j_mu_fit_gauss(x) - x += dx - enddo + + implicit none + integer :: i,nx + double precision :: dx,xmax,x,j_mu,j_mu_F_x_j,j_mu_fit_gauss + + nx = 500 + xmax = 5.d0 + dx = xmax/dble(nx) + x = 0.d0 + print*,'coucou',mu_erf + do i = 1, nx + write(33,'(100(F16.10,X))') x,j_mu(x),j_mu_F_x_j(x),j_mu_fit_gauss(x) + x += dx + enddo end +! --- subroutine test_ipp() @@ -145,7 +90,7 @@ subroutine test_ipp() allocate(I1(ao_num,ao_num,ao_num,ao_num)) I1 = 0.d0 - PROVIDE u12_grad1_u12_j1b_grad1_j1b + PROVIDE u12_grad1_u12_env_grad1_env !$OMP PARALLEL & !$OMP DEFAULT (NONE) & @@ -163,7 +108,7 @@ subroutine test_ipp() !$OMP END PARALLEL call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & - , u12_grad1_u12_j1b_grad1_j1b(1,1,1), ao_num*ao_num, b_mat(1,1,1), n_points_final_grid & + , u12_grad1_u12_env_grad1_env(1,1,1), ao_num*ao_num, b_mat(1,1,1), n_points_final_grid & , 0.d0, I1, ao_num*ao_num) ! --- @@ -173,14 +118,14 @@ subroutine test_ipp() allocate(I2(ao_num,ao_num,ao_num,ao_num)) I2 = 0.d0 - PROVIDE int2_u2_j1b2 + PROVIDE int2_u2_env2 b_mat = 0.d0 !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & !$OMP SHARED (aos_in_r_array_transp, b_mat, ao_num, n_points_final_grid, final_weight_at_r_vector, & - !$OMP v_1b_square_grad, v_1b_square_lapl, aos_grad_in_r_array_transp_bis) + !$OMP env_square_grad, env_square_lapl, aos_grad_in_r_array_transp_bis) !$OMP DO SCHEDULE (static) do i = 1, ao_num do k = 1, ao_num @@ -191,10 +136,10 @@ subroutine test_ipp() ao_i_r = aos_in_r_array_transp(ipoint,i) ao_k_r = aos_in_r_array_transp(ipoint,k) - b_mat(ipoint,k,i) = weight1 * ( ao_k_r * ao_i_r * v_1b_square_lapl(ipoint) & - + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,1) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,1)) * v_1b_square_grad(ipoint,1) & - + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,2) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,2)) * v_1b_square_grad(ipoint,2) & - + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,3) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,3)) * v_1b_square_grad(ipoint,3) ) + b_mat(ipoint,k,i) = weight1 * ( ao_k_r * ao_i_r * env_square_lapl(ipoint) & + + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,1) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,1)) * env_square_grad(ipoint,1) & + + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,2) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,2)) * env_square_grad(ipoint,2) & + + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,3) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,3)) * env_square_grad(ipoint,3) ) enddo enddo enddo @@ -202,7 +147,7 @@ subroutine test_ipp() !$OMP END PARALLEL call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & - , int2_u2_j1b2(1,1,1), ao_num*ao_num, b_mat(1,1,1), n_points_final_grid & + , int2_u2_env2(1,1,1), ao_num*ao_num, b_mat(1,1,1), n_points_final_grid & , 0.d0, I2, ao_num*ao_num) ! --- @@ -268,7 +213,7 @@ subroutine I_grade_gradu_naive1(i, j, k, l, int) double precision :: weight2_x, weight2_y, weight2_z double precision :: aor_i, aor_j, aor_k, aor_l double precision :: e1_val, e2_val, e1_der(3), u12_val, u12_der(3) - double precision, external :: j1b_nucl, j12_mu + double precision, external :: env_nucl, j12_mu int = 0.d0 @@ -281,8 +226,8 @@ subroutine I_grade_gradu_naive1(i, j, k, l, int) aor_i = aos_in_r_array_transp(ipoint,i) aor_k = aos_in_r_array_transp(ipoint,k) - e1_val = j1b_nucl(r1) - call grad1_j1b_nucl(r1, e1_der) + e1_val = env_nucl(r1) + call grad1_env_nucl(r1, e1_der) weight1_x = aor_i * aor_k * e1_val * final_weight_at_r_vector(ipoint) * e1_der(1) weight1_y = aor_i * aor_k * e1_val * final_weight_at_r_vector(ipoint) * e1_der(2) @@ -297,7 +242,7 @@ subroutine I_grade_gradu_naive1(i, j, k, l, int) aor_j = aos_in_r_array_extra_transp(jpoint,j) aor_l = aos_in_r_array_extra_transp(jpoint,l) - e2_val = j1b_nucl(r2) + e2_val = env_nucl(r2) u12_val = j12_mu(r1, r2) call grad1_j12_mu(r1, r2, u12_der) @@ -326,7 +271,7 @@ subroutine I_grade_gradu_naive2(i, j, k, l, int) double precision :: weight2_x, weight2_y, weight2_z double precision :: aor_i, aor_j, aor_k, aor_l double precision :: e1_square_der(3), e2_val, u12_square_der(3) - double precision, external :: j1b_nucl + double precision, external :: env_nucl int = 0.d0 @@ -339,7 +284,7 @@ subroutine I_grade_gradu_naive2(i, j, k, l, int) aor_i = aos_in_r_array_transp(ipoint,i) aor_k = aos_in_r_array_transp(ipoint,k) - call grad1_j1b_nucl_square_num(r1, e1_square_der) + call grad1_env_nucl_square_num(r1, e1_square_der) weight1_x = aor_i * aor_k * final_weight_at_r_vector(ipoint) * e1_square_der(1) weight1_y = aor_i * aor_k * final_weight_at_r_vector(ipoint) * e1_square_der(2) @@ -354,7 +299,7 @@ subroutine I_grade_gradu_naive2(i, j, k, l, int) aor_j = aos_in_r_array_extra_transp(jpoint,j) aor_l = aos_in_r_array_extra_transp(jpoint,l) - e2_val = j1b_nucl(r2) + e2_val = env_nucl(r2) call grad1_j12_mu_square_num(r1, r2, u12_square_der) weight2_x = aor_j * aor_l * e2_val * e2_val * final_weight_at_r_vector_extra(jpoint) * u12_square_der(1) @@ -380,7 +325,7 @@ subroutine I_grade_gradu_naive3(i, j, k, l, int) double precision :: weight1, weight2 double precision :: aor_j, aor_l double precision :: grad(3), e2_val, u12_val - double precision, external :: j1b_nucl, j12_mu + double precision, external :: env_nucl, j12_mu int = 0.d0 @@ -403,7 +348,7 @@ subroutine I_grade_gradu_naive3(i, j, k, l, int) aor_j = aos_in_r_array_extra_transp(jpoint,j) aor_l = aos_in_r_array_extra_transp(jpoint,l) - e2_val = j1b_nucl(r2) + e2_val = env_nucl(r2) u12_val = j12_mu(r1, r2) weight2 = aor_j * aor_l * e2_val * e2_val * u12_val * u12_val * final_weight_at_r_vector_extra(jpoint) @@ -427,7 +372,7 @@ subroutine I_grade_gradu_naive4(i, j, k, l, int) double precision :: weight1, weight2 double precision :: aor_j, aor_l, aor_k, aor_i double precision :: grad(3), e2_val, u12_val - double precision, external :: j1b_nucl, j12_mu + double precision, external :: env_nucl, j12_mu int = 0.d0 @@ -440,10 +385,10 @@ subroutine I_grade_gradu_naive4(i, j, k, l, int) aor_i = aos_in_r_array_transp(ipoint,i) aor_k = aos_in_r_array_transp(ipoint,k) - weight1 = final_weight_at_r_vector(ipoint) * ( aor_k * aor_i * v_1b_square_lapl(ipoint) & - + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,1) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,1)) * v_1b_square_grad(ipoint,1) & - + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,2) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,2)) * v_1b_square_grad(ipoint,2) & - + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,3) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,3)) * v_1b_square_grad(ipoint,3) ) + weight1 = final_weight_at_r_vector(ipoint) * ( aor_k * aor_i * env_square_lapl(ipoint) & + + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,1) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,1)) * env_square_grad(ipoint,1) & + + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,2) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,2)) * env_square_grad(ipoint,2) & + + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,3) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,3)) * env_square_grad(ipoint,3) ) do jpoint = 1, n_points_extra_final_grid ! r2 @@ -454,7 +399,7 @@ subroutine I_grade_gradu_naive4(i, j, k, l, int) aor_j = aos_in_r_array_extra_transp(jpoint,j) aor_l = aos_in_r_array_extra_transp(jpoint,l) - e2_val = j1b_nucl(r2) + e2_val = env_nucl(r2) u12_val = j12_mu(r1, r2) weight2 = aor_j * aor_l * e2_val * e2_val * u12_val * u12_val * final_weight_at_r_vector_extra(jpoint) @@ -464,7 +409,7 @@ subroutine I_grade_gradu_naive4(i, j, k, l, int) enddo return -end subroutine I_grade_gradu_naive4 +end ! --- @@ -485,16 +430,16 @@ subroutine I_grade_gradu_seminaive(i, j, k, l, int) aor_i = aos_in_r_array_transp(ipoint,i) aor_k = aos_in_r_array_transp(ipoint,k) - weight1 = 0.25d0 * final_weight_at_r_vector(ipoint) * ( aor_k * aor_i * v_1b_square_lapl(ipoint) & - + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,1) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,1)) * v_1b_square_grad(ipoint,1) & - + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,2) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,2)) * v_1b_square_grad(ipoint,2) & - + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,3) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,3)) * v_1b_square_grad(ipoint,3) ) + weight1 = 0.25d0 * final_weight_at_r_vector(ipoint) * ( aor_k * aor_i * env_square_lapl(ipoint) & + + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,1) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,1)) * env_square_grad(ipoint,1) & + + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,2) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,2)) * env_square_grad(ipoint,2) & + + (aor_k * aos_grad_in_r_array_transp_bis(ipoint,i,3) + aor_i * aos_grad_in_r_array_transp_bis(ipoint,k,3)) * env_square_grad(ipoint,3) ) - int = int + weight1 * int2_u2_j1b2(j,l,ipoint) + int = int + weight1 * int2_u2_env2(j,l,ipoint) enddo return -end subroutine I_grade_gradu_seminaive +end ! --- @@ -508,7 +453,7 @@ subroutine aos_ik_grad1_esquare(i, k, r1, val) double precision :: der(3), aos_array(ao_num), aos_grad_array(3,ao_num) call give_all_aos_and_grad_at_r(r1, aos_array, aos_grad_array) - call grad1_j1b_nucl_square_num(r1, der) + call grad1_env_nucl_square_num(r1, der) tmp = aos_array(i) * aos_array(k) val(1) = tmp * der(1) @@ -559,14 +504,14 @@ end subroutine grad1_aos_ik_grad1_esquare ! --- -subroutine test_v_ij_u_cst_mu_j1b_an() +subroutine test_v_ij_u_cst_mu_env_an() implicit none integer :: i, j, ipoint double precision :: I_old, I_new double precision :: norm, accu, thr, diff - PROVIDE v_ij_u_cst_mu_j1b_an_old v_ij_u_cst_mu_j1b_an + PROVIDE v_ij_u_cst_mu_env_an_old v_ij_u_cst_mu_env_an thr = 1d-12 norm = 0.d0 @@ -575,8 +520,8 @@ subroutine test_v_ij_u_cst_mu_j1b_an() do i = 1, ao_num do j = 1, ao_num - I_old = v_ij_u_cst_mu_j1b_an_old(j,i,ipoint) - I_new = v_ij_u_cst_mu_j1b_an (j,i,ipoint) + I_old = v_ij_u_cst_mu_env_an_old(j,i,ipoint) + I_new = v_ij_u_cst_mu_env_an (j,i,ipoint) diff = dabs(I_new-I_old) if(diff .gt. thr) then @@ -595,7 +540,7 @@ subroutine test_v_ij_u_cst_mu_j1b_an() print*, ' accuracy(%) = ', 100.d0 * accu / norm return -end subroutine test_v_ij_u_cst_mu_j1b_an +end ! --- @@ -637,7 +582,7 @@ subroutine test_int2_grad1_u12_square_ao() print*, ' accuracy(%) = ', 100.d0 * accu / norm return -end subroutine test_int2_grad1_u12_square_ao +end ! --- @@ -681,7 +626,904 @@ subroutine test_int2_grad1_u12_ao() print*, ' accuracy(%) = ', 100.d0 * accu / norm return -end subroutine test_int2_grad1_u12_ao +end + +! --- + +subroutine test_j1e_grad() + + implicit none + integer :: i, j, ipoint + double precision :: g + double precision :: x_loops, x_dgemm, diff, thr, accu, norm + double precision, allocatable :: pa(:,:), Pb(:,:), Pt(:,:) + double precision, allocatable :: x(:), y(:), z(:) + + PROVIDE int2_grad1_u2e_ao + PROVIDE mo_coef + + allocate(Pa(ao_num,ao_num), Pb(ao_num,ao_num), Pt(ao_num,ao_num)) + + call dgemm( 'N', 'T', ao_num, ao_num, elec_alpha_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pa, size(Pa, 1)) + + if(elec_alpha_num .eq. elec_beta_num) then + Pb = Pa + else + call dgemm( 'N', 'T', ao_num, ao_num, elec_beta_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pb, size(Pb, 1)) + endif + Pt = Pa + Pa + + + g = 0.5d0 * (dble(elec_num) - 1.d0) / dble(elec_num) + + allocate(x(n_points_final_grid), y(n_points_final_grid), z(n_points_final_grid)) + + do ipoint = 1, n_points_final_grid + x(ipoint) = 0.d0 + y(ipoint) = 0.d0 + z(ipoint) = 0.d0 + do i = 1, ao_num + do j = 1, ao_num + x(ipoint) = x(ipoint) + g * Pt(i,j) * int2_grad1_u2e_ao(i,j,ipoint,1) + y(ipoint) = y(ipoint) + g * Pt(i,j) * int2_grad1_u2e_ao(i,j,ipoint,2) + z(ipoint) = z(ipoint) + g * Pt(i,j) * int2_grad1_u2e_ao(i,j,ipoint,3) + enddo + enddo + enddo + + deallocate(Pa, Pb, Pt) + + ! --- + + thr = 1d-10 + norm = 0.d0 + accu = 0.d0 + do ipoint = 1, n_points_final_grid + + x_loops = x (ipoint) + x_dgemm = j1e_gradx(ipoint) + diff = dabs(x_loops - x_dgemm) + if(diff .gt. thr) then + print *, ' problem in j1e_gradx on:', ipoint + print *, ' loops :', x_loops + print *, ' dgemm :', x_dgemm + stop + endif + accu += diff + norm += dabs(x_loops) + + x_loops = y (ipoint) + x_dgemm = j1e_grady(ipoint) + diff = dabs(x_loops - x_dgemm) + if(diff .gt. thr) then + print *, ' problem in j1e_grady on:', ipoint + print *, ' loops :', x_loops + print *, ' dgemm :', x_dgemm + stop + endif + accu += diff + norm += dabs(x_loops) + + x_loops = z (ipoint) + x_dgemm = j1e_gradz(ipoint) + diff = dabs(x_loops - x_dgemm) + if(diff .gt. thr) then + print *, ' problem in j1e_gradz on:', ipoint + print *, ' loops :', x_loops + print *, ' dgemm :', x_dgemm + stop + endif + accu += diff + norm += dabs(x_loops) + + enddo + + deallocate(x, y, z) + + print*, ' accuracy(%) = ', 100.d0 * accu / norm + + return +end + +! --- + +subroutine test_j1e_fit_ao() + + implicit none + integer :: i, j, ipoint + double precision :: g, c + double precision :: x_loops, x_dgemm, diff, thr, accu, norm + double precision, allocatable :: pa(:,:), Pb(:,:), Pt(:,:) + double precision, allocatable :: x(:), y(:), z(:) + double precision, allocatable :: x_fit(:), y_fit(:), z_fit(:), coef_fit(:) + + PROVIDE mo_coef + PROVIDE int2_grad1_u2e_ao + + ! --- + + allocate(Pa(ao_num,ao_num), Pb(ao_num,ao_num), Pt(ao_num,ao_num)) + + call dgemm( 'N', 'T', ao_num, ao_num, elec_alpha_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pa, size(Pa, 1)) + + if(elec_alpha_num .eq. elec_beta_num) then + Pb = Pa + else + call dgemm( 'N', 'T', ao_num, ao_num, elec_beta_num, 1.d0 & + , mo_coef, size(mo_coef, 1), mo_coef, size(mo_coef, 1) & + , 0.d0, Pb, size(Pb, 1)) + endif + Pt = Pa + Pa + + allocate(x(n_points_final_grid), y(n_points_final_grid), z(n_points_final_grid)) + + g = -0.5d0 * (dble(elec_num) - 1.d0) / dble(elec_num) + + call dgemv("T", ao_num*ao_num, n_points_final_grid, g, int2_grad1_u2e_ao(1,1,1,1), ao_num*ao_num, Pt, 1, 0.d0, x, 1) + call dgemv("T", ao_num*ao_num, n_points_final_grid, g, int2_grad1_u2e_ao(1,1,1,2), ao_num*ao_num, Pt, 1, 0.d0, y, 1) + call dgemv("T", ao_num*ao_num, n_points_final_grid, g, int2_grad1_u2e_ao(1,1,1,3), ao_num*ao_num, Pt, 1, 0.d0, z, 1) + + FREE int2_grad1_u2e_ao + + deallocate(Pa, Pb, Pt) + + ! --- + + allocate(x_fit(n_points_final_grid), y_fit(n_points_final_grid), z_fit(n_points_final_grid)) + allocate(coef_fit(ao_num)) + + call get_j1e_coef_fit_ao(ao_num, coef_fit) + !print *, ' coef fit in AO:' + !print*, coef_fit + +! !$OMP PARALLEL & +! !$OMP DEFAULT (NONE) & +! !$OMP PRIVATE (i, ipoint, c) & +! !$OMP SHARED (n_points_final_grid, ao_num, & +! !$OMP aos_grad_in_r_array, coef_fit, x_fit, y_fit, z_fit) +! !$OMP DO SCHEDULE (static) + do ipoint = 1, n_points_final_grid + x_fit(ipoint) = 0.d0 + y_fit(ipoint) = 0.d0 + z_fit(ipoint) = 0.d0 + do i = 1, ao_num + c = coef_fit(i) + x_fit(ipoint) = x_fit(ipoint) + c * aos_grad_in_r_array(i,ipoint,1) + y_fit(ipoint) = y_fit(ipoint) + c * aos_grad_in_r_array(i,ipoint,2) + z_fit(ipoint) = z_fit(ipoint) + c * aos_grad_in_r_array(i,ipoint,3) + enddo + enddo +! !$OMP END DO +! !$OMP END PARALLEL + + deallocate(coef_fit) + + ! --- + + thr = 1d-10 + norm = 0.d0 + accu = 0.d0 + do ipoint = 1, n_points_final_grid + + x_loops = x (ipoint) + x_dgemm = x_fit(ipoint) + diff = dabs(x_loops - x_dgemm) + !if(diff .gt. thr) then + ! print *, ' problem in j1e_gradx on:', ipoint + ! print *, ' loops :', x_loops + ! print *, ' dgemm :', x_dgemm + ! stop + !endif + accu += diff + norm += dabs(x_loops) + + x_loops = y (ipoint) + x_dgemm = y_fit(ipoint) + diff = dabs(x_loops - x_dgemm) + !if(diff .gt. thr) then + ! print *, ' problem in j1e_grady on:', ipoint + ! print *, ' loops :', x_loops + ! print *, ' dgemm :', x_dgemm + ! stop + !endif + accu += diff + norm += dabs(x_loops) + + x_loops = z (ipoint) + x_dgemm = z_fit(ipoint) + diff = dabs(x_loops - x_dgemm) + !if(diff .gt. thr) then + ! print *, ' problem in j1e_gradz on:', ipoint + ! print *, ' loops :', x_loops + ! print *, ' dgemm :', x_dgemm + ! stop + !endif + accu += diff + norm += dabs(x_loops) + enddo + + deallocate(x, y, z) + deallocate(x_fit, y_fit, z_fit) + + print*, ' fit accuracy (%) = ', 100.d0 * accu / norm + +end + +! --- + +subroutine test_tc_grad_and_lapl_ao_new() + + implicit none + integer :: i, j, k, l + double precision :: i_old, i_new, diff, thr, accu, norm + double precision, allocatable :: tc_grad_and_lapl_ao_old(:,:,:,:) + + PROVIDE tc_grad_and_lapl_ao_new + + thr = 1d-10 + norm = 0.d0 + accu = 0.d0 + + allocate(tc_grad_and_lapl_ao_old(ao_num,ao_num,ao_num,ao_num)) + + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/tc_grad_and_lapl_ao_old', action="read") + read(11) tc_grad_and_lapl_ao_old + close(11) + + do i = 1, ao_num + do j = 1, ao_num + do k = 1, ao_num + do l = 1, ao_num + + i_old = tc_grad_and_lapl_ao_old(l,k,j,i) + i_new = tc_grad_and_lapl_ao_new(l,k,j,i) + diff = dabs(i_old - i_new) + if(diff .gt. thr) then + print *, ' problem in tc_grad_and_lapl_ao_new on:', l, k, j, i + print *, ' old :', i_old + print *, ' new :', i_new + stop + endif + accu += diff + norm += dabs(i_old) + enddo + enddo + enddo + enddo + + deallocate(tc_grad_and_lapl_ao_old) + + print*, ' accuracy (%) = ', 100.d0 * accu / norm + +end + +! --- + +subroutine test_tc_grad_square_ao_new() + + implicit none + integer :: i, j, k, l + double precision :: i_old, i_new, diff, thr, accu, norm + double precision, allocatable :: tc_grad_square_ao_old(:,:,:,:) + + PROVIDE tc_grad_square_ao_new + + thr = 1d-10 + norm = 0.d0 + accu = 0.d0 + + allocate(tc_grad_square_ao_old(ao_num,ao_num,ao_num,ao_num)) + + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/tc_grad_square_ao_old', action="read") + read(11) tc_grad_square_ao_old + close(11) + + do i = 1, ao_num + do j = 1, ao_num + do k = 1, ao_num + do l = 1, ao_num + + i_old = tc_grad_square_ao_old(l,k,j,i) + i_new = tc_grad_square_ao_new(l,k,j,i) + diff = dabs(i_old - i_new) + if(diff .gt. thr) then + print *, ' problem in tc_grad_and_lapl_ao_new on:', l, k, j, i + print *, ' old :', i_old + print *, ' new :', i_new + stop + endif + accu += diff + norm += dabs(i_old) + enddo + enddo + enddo + enddo + + deallocate(tc_grad_square_ao_old) + + print*, ' accuracy (%) = ', 100.d0 * accu / norm + +end + +! --- + +BEGIN_PROVIDER [double precision, tc_grad_square_ao_new, (ao_num, ao_num, ao_num, ao_num)] + + implicit none + integer :: i, j, k, l, m, ipoint + double precision :: weight1, ao_k_r, ao_i_r + double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq + double precision :: time0, time1 + double precision, allocatable :: b_mat(:,:,:,:), c_mat(:,:,:) + double precision, external :: get_ao_two_e_integral + + PROVIDe tc_integ_type + PROVIDE env_type + PROVIDE j2e_type + PROVIDE j1e_type + + call wall_time(time0) + + print *, ' providing tc_grad_square_ao_new ...' + + PROVIDE int2_grad1_u12_square_ao + + allocate(c_mat(n_points_final_grid,ao_num,ao_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint) & + !$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + c_mat(ipoint,k,i) = final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,k) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , int2_grad1_u12_square_ao(1,1,1), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 0.d0, tc_grad_square_ao_new, ao_num*ao_num) + + FREE int2_grad1_u12_square_ao + + if( (tc_integ_type .eq. "semi-analytic") .and. & + (j2e_type .eq. "Mu") .and. & + ((env_type .eq. "Prod_Gauss") .or. (env_type .eq. "Sum_Gauss")) .and. & + use_ipp ) then + + ! an additional term is added here directly instead of + ! being added in int2_grad1_u12_square_ao for performance + + PROVIDE int2_u2_env2 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & + !$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector, & + !$OMP env_square_grad, env_square_lapl, aos_grad_in_r_array_transp_bis) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + + weight1 = 0.25d0 * final_weight_at_r_vector(ipoint) + + ao_i_r = aos_in_r_array_transp(ipoint,i) + ao_k_r = aos_in_r_array_transp(ipoint,k) + + c_mat(ipoint,k,i) = weight1 * ( ao_k_r * ao_i_r * env_square_lapl(ipoint) & + + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,1) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,1)) * env_square_grad(ipoint,1) & + + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,2) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,2)) * env_square_grad(ipoint,2) & + + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,3) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,3)) * env_square_grad(ipoint,3) ) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , int2_u2_env2(1,1,1), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 1.d0, tc_grad_square_ao_new, ao_num*ao_num) + + FREE int2_u2_env2 + endif ! use_ipp + + deallocate(c_mat) + + call sum_A_At(tc_grad_square_ao_new(1,1,1,1), ao_num*ao_num) + + call wall_time(time1) + print*, ' Wall time for tc_grad_square_ao_new (min) = ', (time1 - time0) / 60.d0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, tc_grad_and_lapl_ao_new, (ao_num, ao_num, ao_num, ao_num)] + + implicit none + integer :: i, j, k, l, m, ipoint + double precision :: weight1, ao_k_r, ao_i_r + double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq + double precision :: time0, time1 + double precision, allocatable :: b_mat(:,:,:,:), c_mat(:,:,:) + double precision, external :: get_ao_two_e_integral + + PROVIDe tc_integ_type + PROVIDE env_type + PROVIDE j2e_type + PROVIDE j1e_type + + call wall_time(time0) + + print *, ' providing tc_grad_square_ao_new ...' + + + PROVIDE int2_grad1_u12_ao + + allocate(b_mat(n_points_final_grid,ao_num,ao_num,3)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & + !$OMP SHARED (aos_in_r_array_transp, aos_grad_in_r_array_transp_bis, b_mat, & + !$OMP ao_num, n_points_final_grid, final_weight_at_r_vector) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + + weight1 = 0.5d0 * final_weight_at_r_vector(ipoint) + ao_i_r = aos_in_r_array_transp(ipoint,i) + ao_k_r = aos_in_r_array_transp(ipoint,k) + + b_mat(ipoint,k,i,1) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,1) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,1)) + b_mat(ipoint,k,i,2) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,2) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,2)) + b_mat(ipoint,k,i,3) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,3) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,3)) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + tc_grad_and_lapl_ao_new = 0.d0 + do m = 1, 3 + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 & + , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, b_mat(1,1,1,m), n_points_final_grid & + , 1.d0, tc_grad_and_lapl_ao_new, ao_num*ao_num) + enddo + deallocate(b_mat) + + FREE int2_grad1_u12_ao + FREE int2_grad1_u2e_ao + + call sum_A_At(tc_grad_and_lapl_ao_new(1,1,1,1), ao_num*ao_num) + + call wall_time(time1) + print*, ' Wall time for tc_grad_and_lapl_ao_new (min) = ', (time1 - time0) / 60.d0 + +END_PROVIDER + +! --- + +subroutine test_fit_coef_A1() + + implicit none + integer :: i, j, k, l, ij, kl, ipoint + double precision :: t1, t2 + double precision :: accu, norm, diff + double precision, allocatable :: A1(:,:) + double precision, allocatable :: A2(:,:,:,:), tmp(:,:,:) + + ! --- + + allocate(A1(ao_num*ao_num,ao_num*ao_num)) + + call wall_time(t1) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k, l, ij, kl, ipoint) & + !$OMP SHARED (n_points_final_grid, ao_num, & + !$OMP final_weight_at_r_vector, aos_in_r_array_transp, A1) + !$OMP DO COLLAPSE(2) + do k = 1, ao_num + do l = 1, ao_num + kl = (k-1)*ao_num + l + + do i = 1, ao_num + do j = 1, ao_num + ij = (i-1)*ao_num + j + + A1(ij,kl) = 0.d0 + do ipoint = 1, n_points_final_grid + A1(ij,kl) += final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,j) & + * aos_in_r_array_transp(ipoint,k) * aos_in_r_array_transp(ipoint,l) + enddo + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(t2) + print*, ' WALL TIME FOR A1 (min) =', (t2-t1)/60.d0 + + ! --- + + call wall_time(t1) + + allocate(tmp(ao_num,ao_num,n_points_final_grid)) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (n_points_final_grid, ao_num, final_weight_at_r_vector, aos_in_r_array_transp, tmp) + !$OMP DO COLLAPSE(2) + do j = 1, ao_num + do i = 1, ao_num + do ipoint = 1, n_points_final_grid + tmp(i,j,ipoint) = dsqrt(final_weight_at_r_vector(ipoint)) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + allocate(A2(ao_num,ao_num,ao_num,ao_num)) + + call dgemm( "N", "T", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , tmp(1,1,1), ao_num*ao_num, tmp(1,1,1), ao_num*ao_num & + , 0.d0, A2(1,1,1,1), ao_num*ao_num) + deallocate(tmp) + + call wall_time(t2) + print*, ' WALL TIME FOR A2 (min) =', (t2-t1)/60.d0 + + ! --- + + accu = 0.d0 + norm = 0.d0 + do k = 1, ao_num + do l = 1, ao_num + kl = (k-1)*ao_num + l + + do i = 1, ao_num + do j = 1, ao_num + ij = (i-1)*ao_num + j + + diff = dabs(A2(j,i,l,k) - A1(ij,kl)) + if(diff .gt. 1d-10) then + print *, ' problem in A2 on:', i, i, l, k + print *, ' A1 :', A1(ij,kl) + print *, ' A2 :', A2(j,i,l,k) + stop + endif + + accu += diff + norm += dabs(A1(ij,kl)) + enddo + enddo + enddo + enddo + + deallocate(A1, A2) + + print*, ' accuracy (%) = ', 100.d0 * accu / norm + + return +end + +! --- + +subroutine test_fit_coef_inv() + + implicit none + integer :: i, j, k, l, ij, kl, ipoint + integer :: n_svd, info, lwork, mn, m, n + double precision :: t1, t2 + double precision :: accu, norm, diff + double precision :: cutoff_svd, D1_inv + double precision, allocatable :: A1(:,:), A1_inv(:,:), A1_tmp(:,:) + double precision, allocatable :: A2(:,:,:,:), tmp(:,:,:), A2_inv(:,:,:,:) + double precision, allocatable :: U(:,:), D(:), Vt(:,:), work(:), A2_tmp(:,:,:,:) + + cutoff_svd = 5d-8 + + ! --- + + call wall_time(t1) + + allocate(A1(ao_num*ao_num,ao_num*ao_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k, l, ij, kl, ipoint) & + !$OMP SHARED (n_points_final_grid, ao_num, & + !$OMP final_weight_at_r_vector, aos_in_r_array_transp, A1) + !$OMP DO COLLAPSE(2) + do k = 1, ao_num + do l = 1, ao_num + kl = (k-1)*ao_num + l + + do i = 1, ao_num + do j = 1, ao_num + ij = (i-1)*ao_num + j + + A1(ij,kl) = 0.d0 + do ipoint = 1, n_points_final_grid + A1(ij,kl) += final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,j) & + * aos_in_r_array_transp(ipoint,k) * aos_in_r_array_transp(ipoint,l) + enddo + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(t2) + print*, ' WALL TIME FOR A1 (min) =', (t2-t1)/60.d0 + + allocate(A1_inv(ao_num*ao_num,ao_num*ao_num)) + call get_pseudo_inverse(A1, ao_num*ao_num, ao_num*ao_num, ao_num*ao_num, A1_inv, ao_num*ao_num, cutoff_svd) + + call wall_time(t1) + print*, ' WALL TIME FOR A1_inv (min) =', (t1-t2)/60.d0 + + ! --- + + call wall_time(t1) + + allocate(tmp(n_points_final_grid,ao_num,ao_num)) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (n_points_final_grid, ao_num, final_weight_at_r_vector, aos_in_r_array_transp, tmp) + !$OMP DO COLLAPSE(2) + do j = 1, ao_num + do i = 1, ao_num + do ipoint = 1, n_points_final_grid + tmp(ipoint,i,j) = dsqrt(final_weight_at_r_vector(ipoint)) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + allocate(A2(ao_num,ao_num,ao_num,ao_num)) + + call dgemm( "T", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , tmp(1,1,1), n_points_final_grid, tmp(1,1,1), n_points_final_grid & + , 0.d0, A2(1,1,1,1), ao_num*ao_num) + + deallocate(tmp) + + call wall_time(t2) + print*, ' WALL TIME FOR A2 (min) =', (t2-t1)/60.d0 + + allocate(A1_tmp(ao_num*ao_num,ao_num*ao_num)) + A1_tmp = A1 + allocate(A2_tmp(ao_num,ao_num,ao_num,ao_num)) + A2_tmp = A2 + + allocate(A2_inv(ao_num,ao_num,ao_num,ao_num)) + + allocate(D(ao_num*ao_num), U(ao_num*ao_num,ao_num*ao_num), Vt(ao_num*ao_num,ao_num*ao_num)) + + allocate(work(1)) + lwork = -1 + + call dgesvd( 'S', 'A', ao_num*ao_num, ao_num*ao_num, A1_tmp(1,1), ao_num*ao_num & + !call dgesvd( 'S', 'A', ao_num*ao_num, ao_num*ao_num, A2_tmp(1,1,1,1), ao_num*ao_num & + , D(1), U(1,1), ao_num*ao_num, Vt(1,1), ao_num*ao_num, work, lwork, info) + if(info /= 0) then + print *, info, ': SVD failed' + stop + endif + + LWORK = max(5*ao_num*ao_num, int(WORK(1))) + deallocate(work) + allocate(work(lwork)) + + call dgesvd( 'S', 'A', ao_num*ao_num, ao_num*ao_num, A1_tmp(1,1), ao_num*ao_num & + !call dgesvd( 'S', 'A', ao_num*ao_num, ao_num*ao_num, A2_tmp(1,1,1,1), ao_num*ao_num & + , D(1), U(1,1), ao_num*ao_num, Vt(1,1), ao_num*ao_num, work, lwork, info) + if(info /= 0) then + print *, info, ':: SVD failed' + stop 1 + endif + + deallocate(A2_tmp) + deallocate(work) + + n_svd = 0 + D1_inv = 1.d0 / D(1) + do ij = 1, ao_num*ao_num + if(D(ij)*D1_inv > cutoff_svd) then + D(ij) = 1.d0 / D(ij) + n_svd = n_svd + 1 + else + D(ij) = 0.d0 + endif + enddo + print*, ' n_svd = ', n_svd + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ij, kl) & + !$OMP SHARED (ao_num, n_svd, D, Vt) + !$OMP DO + do kl = 1, ao_num*ao_num + do ij = 1, n_svd + Vt(ij,kl) = Vt(ij,kl) * D(ij) + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_svd, 1.d0 & + , U(1,1), ao_num*ao_num, Vt(1,1), ao_num*ao_num & + , 0.d0, A2_inv(1,1,1,1), ao_num*ao_num) + + deallocate(D, U, Vt) + + call wall_time(t1) + print*, ' WALL TIME FOR A2_inv (min) =', (t1-t2)/60.d0 + + ! --- + + accu = 0.d0 + norm = 0.d0 + do k = 1, ao_num + do l = 1, ao_num + kl = (k-1)*ao_num + l + + do i = 1, ao_num + do j = 1, ao_num + ij = (i-1)*ao_num + j + + diff = dabs(A2(j,i,l,k) - A1(ij,kl)) + if(diff .gt. 1d-10) then + print *, ' problem in A2 on:', i, i, l, k + print *, ' A1 :', A1(ij,kl) + print *, ' A2 :', A2(j,i,l,k) + stop + endif + + accu += diff + norm += dabs(A1(ij,kl)) + enddo + enddo + enddo + enddo + + print*, ' accuracy on A (%) = ', 100.d0 * accu / norm + + accu = 0.d0 + norm = 0.d0 + do k = 1, ao_num + do l = 1, ao_num + kl = (k-1)*ao_num + l + + do i = 1, ao_num + do j = 1, ao_num + ij = (i-1)*ao_num + j + + diff = dabs(A2_inv(j,i,l,k) - A1_inv(ij,kl)) + if(diff .gt. cutoff_svd) then + print *, ' problem in A2_inv on:', i, i, l, k + print *, ' A1_inv :', A1_inv(ij,kl) + print *, ' A2_inv :', A2_inv(j,i,l,k) + stop + endif + + accu += diff + norm += dabs(A1_inv(ij,kl)) + enddo + enddo + enddo + enddo + + print*, ' accuracy on A_inv (%) = ', 100.d0 * accu / norm + + deallocate(A1_inv, A2_inv) + deallocate(A1, A2) + + return +end + +! --- + +subroutine test_fit_coef_testinvA() + + implicit none + integer :: i, j, k, l, m, n, ij, kl, mn, ipoint + double precision :: t1, t2 + double precision :: accu, norm, diff + double precision :: cutoff_svd + double precision, allocatable :: A1(:,:), A1_inv(:,:) + + cutoff_svd = 1d-17 + + ! --- + + call wall_time(t1) + + allocate(A1(ao_num*ao_num,ao_num*ao_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k, l, ij, kl, ipoint) & + !$OMP SHARED (n_points_final_grid, ao_num, & + !$OMP final_weight_at_r_vector, aos_in_r_array_transp, A1) + !$OMP DO COLLAPSE(2) + do k = 1, ao_num + do l = 1, ao_num + kl = (k-1)*ao_num + l + + do i = 1, ao_num + do j = 1, ao_num + ij = (i-1)*ao_num + j + + A1(ij,kl) = 0.d0 + do ipoint = 1, n_points_final_grid + A1(ij,kl) += final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,j) & + * aos_in_r_array_transp(ipoint,k) * aos_in_r_array_transp(ipoint,l) + enddo + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(t2) + print*, ' WALL TIME FOR A1 (min) =', (t2-t1)/60.d0 + + allocate(A1_inv(ao_num*ao_num,ao_num*ao_num)) + call get_pseudo_inverse(A1, ao_num*ao_num, ao_num*ao_num, ao_num*ao_num, A1_inv, ao_num*ao_num, cutoff_svd) + + call wall_time(t1) + print*, ' WALL TIME FOR A1_inv (min) =', (t1-t2)/60.d0 + + ! --- + + print*, ' check inv' + + do kl = 1, ao_num*ao_num + do ij = 1, ao_num*ao_num + + diff = 0.d0 + do mn = 1, ao_num*ao_num + diff += A1(kl,mn) * A1_inv(mn,ij) + enddo + + if(kl .eq. ij) then + accu += dabs(diff - 1.d0) + else + accu += dabs(diff - 0.d0) + endif + enddo + enddo + + print*, ' accuracy (%) = ', accu * 100.d0 + + deallocate(A1, A1_inv) + + return +end ! --- diff --git a/plugins/local/non_h_ints_mu/total_tc_int.irp.f b/plugins/local/non_h_ints_mu/total_tc_int.irp.f index 9c19e0ac..9d3cf565 100644 --- a/plugins/local/non_h_ints_mu/total_tc_int.irp.f +++ b/plugins/local/non_h_ints_mu/total_tc_int.irp.f @@ -1,190 +1,226 @@ ! --- -BEGIN_PROVIDER [double precision, ao_vartc_int_chemist, (ao_num, ao_num, ao_num, ao_num)] +BEGIN_PROVIDER [double precision, ao_two_e_tc_tot, (ao_num, ao_num, ao_num, ao_num)] + + BEGIN_DOC + ! + ! CHEMIST NOTATION IS USED + ! + ! ao_two_e_tc_tot(k,i,l,j) = (ki|V^TC(r_12)|lj) + ! = where V^TC(r_12) is the total TC operator + ! = tc_grad_and_lapl_ao(k,i,l,j) + tc_grad_square_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j) + ! AND IF(var_tc): + ! + ! ao_two_e_tot(k,i,l,j) = (ki|V^TC(r_12) + [(V^TC)(r_12)]^\dagger|lj) / 2.0 + ! = tc_grad_square_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j) + ! + ! + ! where: + ! + ! tc_grad_and_lapl_ao(k,i,l,j) = < k l | -1/2 \Delta_1 u(r1,r2) - \grad_1 u(r1,r2) . \grad_1 | ij > + ! = -1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) + ! = 1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 (-1) \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) + ! + ! tc_grad_square_ao(k,i,l,j) = -1/2 + ! + ! ao_two_e_coul(k,i,l,j) = < l k | 1/r12 | j i > = ( k i | 1/r12 | l j ) + ! + END_DOC implicit none - integer :: i, j, k, l - double precision :: wall1, wall0 + integer :: i, j, k, l, m, ipoint + double precision :: weight1, ao_k_r, ao_i_r + double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq + double precision :: time0, time1 + double precision, allocatable :: b_mat(:,:,:,:), c_mat(:,:,:) + double precision, external :: get_ao_two_e_integral - print *, ' providing ao_vartc_int_chemist ...' - call wall_time(wall0) - - if(test_cycle_tc) then + PROVIDe tc_integ_type + PROVIDE env_type + PROVIDE j2e_type + PROVIDE j1e_type - PROVIDE j1b_type - if(j1b_type .ne. 3) then - print*, ' TC integrals with cycle can not be used for j1b_type =', j1b_type - stop - endif + call wall_time(time0) - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - ao_vartc_int_chemist(k,i,l,j) = tc_grad_square_ao_test(k,i,l,j) + ao_two_e_coul(k,i,l,j) - enddo - enddo - enddo - enddo + print *, ' providing ao_two_e_tc_tot ...' + print*, ' j2e_type: ', j2e_type + print*, ' j1e_type: ', j1e_type + print*, ' env_type: ', env_type + + if(read_tc_integ) then + + print*, ' Reading ao_two_e_tc_tot from ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' + + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/ao_two_e_tc_tot', action="read") + read(11) ao_two_e_tc_tot + close(11) else + PROVIDE tc_integ_type + print*, ' approach for integrals: ', tc_integ_type + + ! --- + + PROVIDE int2_grad1_u12_square_ao + + allocate(c_mat(n_points_final_grid,ao_num,ao_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint) & + !$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + c_mat(ipoint,k,i) = final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,k) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , int2_grad1_u12_square_ao(1,1,1), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 0.d0, ao_two_e_tc_tot, ao_num*ao_num) + + FREE int2_grad1_u12_square_ao + + if( (tc_integ_type .eq. "semi-analytic") .and. & + (j2e_type .eq. "Mu") .and. & + ((env_type .eq. "Prod_Gauss") .or. (env_type .eq. "Sum_Gauss")) .and. & + use_ipp ) then + + ! an additional term is added here directly instead of + ! being added in int2_grad1_u12_square_ao for performance + + PROVIDE int2_u2_env2 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & + !$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector, & + !$OMP env_square_grad, env_square_lapl, aos_grad_in_r_array_transp_bis) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + + weight1 = 0.25d0 * final_weight_at_r_vector(ipoint) + + ao_i_r = aos_in_r_array_transp(ipoint,i) + ao_k_r = aos_in_r_array_transp(ipoint,k) + + c_mat(ipoint,k,i) = weight1 * ( ao_k_r * ao_i_r * env_square_lapl(ipoint) & + + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,1) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,1)) * env_square_grad(ipoint,1) & + + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,2) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,2)) * env_square_grad(ipoint,2) & + + (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,3) + ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,3)) * env_square_grad(ipoint,3) ) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , int2_u2_env2(1,1,1), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 1.d0, ao_two_e_tc_tot(1,1,1,1), ao_num*ao_num) + + FREE int2_u2_env2 + endif ! use_ipp + + deallocate(c_mat) + + ! --- + + if(.not. var_tc) then + + PROVIDE int2_grad1_u12_ao + + allocate(b_mat(n_points_final_grid,ao_num,ao_num,3)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & + !$OMP SHARED (aos_in_r_array_transp, aos_grad_in_r_array_transp_bis, b_mat, & + !$OMP ao_num, n_points_final_grid, final_weight_at_r_vector) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + + weight1 = 0.5d0 * final_weight_at_r_vector(ipoint) + ao_i_r = aos_in_r_array_transp(ipoint,i) + ao_k_r = aos_in_r_array_transp(ipoint,k) + + b_mat(ipoint,k,i,1) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,1) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,1)) + b_mat(ipoint,k,i,2) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,2) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,2)) + b_mat(ipoint,k,i,3) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,3) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,3)) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + do m = 1, 3 + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 & + , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, b_mat(1,1,1,m), n_points_final_grid & + , 1.d0, ao_two_e_tc_tot(1,1,1,1), ao_num*ao_num) + enddo + deallocate(b_mat) + + FREE int2_grad1_u12_ao + + if(tc_integ_type .eq. "semi-analytic") then + FREE int2_grad1_u2e_ao + endif + + endif ! var_tc + + ! --- + + call sum_A_At(ao_two_e_tc_tot(1,1,1,1), ao_num*ao_num) + + PROVIDE ao_integrals_map + + !$OMP PARALLEL DEFAULT(NONE) & + !$OMP SHARED(ao_num, ao_two_e_tc_tot, ao_integrals_map) & + !$OMP PRIVATE(i, j, k, l) + !$OMP DO do j = 1, ao_num do l = 1, ao_num do i = 1, ao_num do k = 1, ao_num - ao_vartc_int_chemist(k,i,l,j) = tc_grad_square_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j) + ! < 1:i, 2:j | 1:k, 2:l > + ao_two_e_tc_tot(k,i,l,j) = ao_two_e_tc_tot(k,i,l,j) + get_ao_two_e_integral(i, j, k, l, ao_integrals_map) enddo enddo enddo enddo + !$OMP END DO + !$OMP END PARALLEL - endif - - call wall_time(wall1) - print *, ' wall time for ao_vartc_int_chemist ', wall1 - wall0 - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [double precision, ao_tc_int_chemist, (ao_num, ao_num, ao_num, ao_num)] - - implicit none - integer :: i, j, k, l - double precision :: wall1, wall0 - - PROVIDE j1b_type - - print *, ' providing ao_tc_int_chemist ...' - call wall_time(wall0) - - if(test_cycle_tc) then - - if(j1b_type .ne. 3) then - print*, ' TC integrals with cycle can not be used for j1b_type =', j1b_type - stop + if(tc_integ_type .eq. "numeric") then + FREE int2_grad1_u12_ao_num int2_grad1_u12_square_ao_num endif - ao_tc_int_chemist = ao_tc_int_chemist_test + endif ! read_tc_integ - else - - PROVIDE tc_grad_square_ao tc_grad_and_lapl_ao ao_two_e_coul - - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - ao_tc_int_chemist(k,i,l,j) = tc_grad_square_ao(k,i,l,j) + tc_grad_and_lapl_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j) -! ao_tc_int_chemist(k,i,l,j) = ao_two_e_coul(k,i,l,j) - enddo - enddo - enddo - enddo + if(write_tc_integ .and. mpi_master) then + print*, ' Saving ao_two_e_tc_tot in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/ao_two_e_tc_tot', action="write") + call ezfio_set_work_empty(.False.) + write(11) ao_two_e_tc_tot + close(11) + call ezfio_set_tc_keywords_io_tc_integ('Read') endif - FREE tc_grad_square_ao tc_grad_and_lapl_ao ao_two_e_coul - - if(j1b_type .ge. 100) then - FREE int2_grad1_u12_ao_num int2_grad1_u12_square_ao_num - endif - - - call wall_time(wall1) - print *, ' wall time for ao_tc_int_chemist ', wall1 - wall0 + call wall_time(time1) + print*, ' Wall time for ao_two_e_tc_tot (min) = ', (time1 - time0) / 60.d0 call print_memory_usage() END_PROVIDER ! --- -BEGIN_PROVIDER [double precision, ao_tc_int_chemist_no_cycle, (ao_num, ao_num, ao_num, ao_num)] - - implicit none - integer :: i, j, k, l - double precision :: wall1, wall0 - - print *, ' providing ao_tc_int_chemist_no_cycle ...' - call wall_time(wall0) - - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - ao_tc_int_chemist_no_cycle(k,i,l,j) = tc_grad_square_ao(k,i,l,j) + tc_grad_and_lapl_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j) - !ao_tc_int_chemist(k,i,l,j) = ao_two_e_coul(k,i,l,j) - enddo - enddo - enddo - enddo - - call wall_time(wall1) - print *, ' wall time for ao_tc_int_chemist_no_cycle ', wall1 - wall0 - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [double precision, ao_tc_int_chemist_test, (ao_num, ao_num, ao_num, ao_num)] - - implicit none - integer :: i, j, k, l - double precision :: wall1, wall0 - - print *, ' providing ao_tc_int_chemist_test ...' - call wall_time(wall0) - - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - ao_tc_int_chemist_test(k,i,l,j) = tc_grad_square_ao_test(k,i,l,j) + tc_grad_and_lapl_ao_test(k,i,l,j) + ao_two_e_coul(k,i,l,j) -! ao_tc_int_chemist_test(k,i,l,j) = ao_two_e_coul(k,i,l,j) - enddo - enddo - enddo - enddo - - call wall_time(wall1) - print *, ' wall time for ao_tc_int_chemist_test ', wall1 - wall0 - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [double precision, ao_two_e_coul, (ao_num, ao_num, ao_num, ao_num) ] - - BEGIN_DOC - ! - ! ao_two_e_coul(k,i,l,j) = ( k i | 1/r12 | l j ) = < l k | 1/r12 | j i > - ! - END_DOC - - integer :: i, j, k, l - double precision, external :: get_ao_two_e_integral - - PROVIDE ao_integrals_map - - !$OMP PARALLEL DEFAULT(NONE) & - !$OMP SHARED(ao_num, ao_two_e_coul, ao_integrals_map) & - !$OMP PRIVATE(i, j, k, l) - !$OMP DO - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - ! < 1:k, 2:l | 1:i, 2:j > - ao_two_e_coul(k,i,l,j) = get_ao_two_e_integral(i, j, k, l, ao_integrals_map) - enddo - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - -END_PROVIDER - -! --- - diff --git a/plugins/local/non_hermit_dav/biorthog.irp.f b/plugins/local/non_hermit_dav/biorthog.irp.f index 13917c5a..2229e17d 100644 --- a/plugins/local/non_hermit_dav/biorthog.irp.f +++ b/plugins/local/non_hermit_dav/biorthog.irp.f @@ -142,7 +142,7 @@ subroutine non_hrmt_diag_split_degen(n, A, leigvec, reigvec, n_real_eigv, eigval enddo enddo -end subroutine non_hrmt_diag_split_degen +end ! --- @@ -248,7 +248,7 @@ subroutine non_hrmt_real_diag_new(n, A, leigvec, reigvec, n_real_eigv, eigval) print*,'Your matrix intrinsically contains complex eigenvalues' endif -end subroutine non_hrmt_real_diag_new +end ! --- @@ -275,10 +275,11 @@ subroutine non_hrmt_bieig(n, A, thr_d, thr_nd, leigvec, reigvec, n_real_eigv, ei double precision :: thr, thr_cut, thr_diag, thr_norm double precision :: accu_d, accu_nd - integer, allocatable :: list_good(:), iorder(:) + integer, allocatable :: list_good(:), iorder(:), deg_num(:) double precision, allocatable :: WR(:), WI(:), VL(:,:), VR(:,:) double precision, allocatable :: S(:,:) double precision, allocatable :: phi_1_tilde(:),phi_2_tilde(:),chi_1_tilde(:),chi_2_tilde(:) + allocate(phi_1_tilde(n),phi_2_tilde(n),chi_1_tilde(n),chi_2_tilde(n)) @@ -305,11 +306,11 @@ subroutine non_hrmt_bieig(n, A, thr_d, thr_nd, leigvec, reigvec, n_real_eigv, ei - print *, ' ' - print *, ' eigenvalues' + !print *, ' ' + !print *, ' eigenvalues' i = 1 do while(i .le. n) - write(*, '(I3,X,1000(F16.10,X))')i, WR(i), WI(i) + !write(*, '(I3,X,1000(F16.10,X))')i, WR(i), WI(i) if(.false.)then if(WI(i).ne.0.d0)then print*,'*****************' @@ -386,7 +387,7 @@ subroutine non_hrmt_bieig(n, A, thr_d, thr_nd, leigvec, reigvec, n_real_eigv, ei thr_diag = 1d-06 thr_norm = 1d+10 - call check_EIGVEC(n, n, A, WR, VL, VR, thr_diag, thr_norm, .false.) + !call check_EIGVEC(n, n, A, WR, VL, VR, thr_diag, thr_norm, .false.) ! ! ------------------------------------------------------------------------------------- @@ -400,7 +401,7 @@ subroutine non_hrmt_bieig(n, A, thr_d, thr_nd, leigvec, reigvec, n_real_eigv, ei !thr = 100d0 thr = Im_thresh_tcscf do i = 1, n - print*, 'Re(i) + Im(i)', WR(i), WI(i) + !print*, 'Re(i) + Im(i)', WR(i), WI(i) if(dabs(WI(i)) .lt. thr) then n_good += 1 else @@ -479,15 +480,16 @@ subroutine non_hrmt_bieig(n, A, thr_d, thr_nd, leigvec, reigvec, n_real_eigv, ei return ! accu_nd is modified after adding the normalization - !elseif( (accu_nd .lt. thr_nd) .and. (dabs(accu_d-dble(n_real_eigv))/dble(n_real_eigv) .gt. thr_d) ) then + elseif( (accu_nd .lt. thr_nd) .and. (dabs(accu_d-dble(n_real_eigv))/dble(n_real_eigv) .gt. thr_d) ) then - ! print *, ' lapack vectors are not normalized but bi-orthogonalized' - ! call check_biorthog_binormalize(n, n_real_eigv, leigvec, reigvec, thr_d, thr_nd, .true.) + print *, ' lapack vectors are not normalized but bi-orthogonalized' + call check_biorthog_binormalize(n, n_real_eigv, leigvec, reigvec, thr_d, thr_nd, .true.) - ! call check_EIGVEC(n, n, A, eigval, leigvec, reigvec, thr_diag, thr_norm, .true.) + call check_biorthog(n, n_real_eigv, leigvec, reigvec, accu_d, accu_nd, S, thr_d, thr_nd, .true.) + call check_EIGVEC(n, n, A, eigval, leigvec, reigvec, thr_diag, thr_norm, .true.) - ! deallocate(S) - ! return + deallocate(S) + return else @@ -495,18 +497,10 @@ subroutine non_hrmt_bieig(n, A, thr_d, thr_nd, leigvec, reigvec, n_real_eigv, ei ! --- -! call impose_orthog_degen_eigvec(n, eigval, reigvec) -! call impose_orthog_degen_eigvec(n, eigval, leigvec) - - call reorder_degen_eigvec(n, eigval, leigvec, reigvec) - call impose_biorthog_degen_eigvec(n, eigval, leigvec, reigvec) - - - !call impose_orthog_biorthog_degen_eigvec(n, thr_d, thr_nd, eigval, leigvec, reigvec) - - !call impose_unique_biorthog_degen_eigvec(n, eigval, mo_coef, ao_overlap, leigvec, reigvec) - - ! --- + allocate(deg_num(n)) + call reorder_degen_eigvec(n, deg_num, eigval, leigvec, reigvec) + call impose_biorthog_degen_eigvec(n, deg_num, eigval, leigvec, reigvec) + deallocate(deg_num) call check_biorthog(n, n_real_eigv, leigvec, reigvec, accu_d, accu_nd, S, thr_d, thr_nd, .false.) if( (accu_nd .lt. thr_nd) .and. (dabs(accu_d-dble(n_real_eigv)) .gt. thr_d) ) then @@ -514,12 +508,7 @@ subroutine non_hrmt_bieig(n, A, thr_d, thr_nd, leigvec, reigvec, n_real_eigv, ei endif call check_biorthog(n, n_real_eigv, leigvec, reigvec, accu_d, accu_nd, S, thr_d, thr_nd, .true.) - !call impose_biorthog_qr(n, n_real_eigv, thr_d, thr_nd, leigvec, reigvec) - !call impose_biorthog_lu(n, n_real_eigv, thr_d, thr_nd, leigvec, reigvec) - - ! --- - - call check_EIGVEC(n, n, A, eigval, leigvec, reigvec, thr_diag, thr_norm, .true.) + !call check_EIGVEC(n, n, A, eigval, leigvec, reigvec, thr_diag, thr_norm, .true.) deallocate(S) @@ -530,7 +519,7 @@ subroutine non_hrmt_bieig(n, A, thr_d, thr_nd, leigvec, reigvec, n_real_eigv, ei return -end subroutine non_hrmt_bieig +end ! --- @@ -703,7 +692,7 @@ subroutine non_hrmt_bieig_random_diag(n, A, leigvec, reigvec, n_real_eigv, eigva return -end subroutine non_hrmt_bieig_random_diag +end ! --- @@ -812,7 +801,7 @@ subroutine non_hrmt_real_im(n, A, leigvec, reigvec, n_real_eigv, eigval) deallocate( S ) -end subroutine non_hrmt_real_im +end ! --- @@ -917,7 +906,7 @@ subroutine non_hrmt_generalized_real_im(n, A, B, leigvec, reigvec, n_real_eigv, deallocate( S ) -end subroutine non_hrmt_generalized_real_im +end ! --- @@ -1053,7 +1042,7 @@ subroutine non_hrmt_bieig_fullvect(n, A, leigvec, reigvec, n_real_eigv, eigval) return -end subroutine non_hrmt_bieig_fullvect +end ! --- diff --git a/plugins/local/non_hermit_dav/lapack_diag_non_hermit.irp.f b/plugins/local/non_hermit_dav/lapack_diag_non_hermit.irp.f index 836bf707..cb38347e 100644 --- a/plugins/local/non_hermit_dav/lapack_diag_non_hermit.irp.f +++ b/plugins/local/non_hermit_dav/lapack_diag_non_hermit.irp.f @@ -54,7 +54,7 @@ subroutine lapack_diag_non_sym(n, A, WR, WI, VL, VR) deallocate(Atmp, WORK) -end subroutine lapack_diag_non_sym +end subroutine non_sym_diag_inv_right(n,A,leigvec,reigvec,n_real_eigv,eigval) @@ -269,7 +269,7 @@ subroutine lapack_diag_non_sym_new(n, A, WR, WI, VL, VR) deallocate( Atmp ) deallocate( WORK, SCALE_array, RCONDE, RCONDV, IWORK ) -end subroutine lapack_diag_non_sym_new +end ! --- @@ -323,7 +323,7 @@ subroutine lapack_diag_non_sym_right(n, A, WR, WI, VR) ! write(*, '(1000(F16.10,X))') VR(:,i) ! enddo -end subroutine lapack_diag_non_sym_right +end ! --- @@ -437,7 +437,7 @@ subroutine non_hrmt_real_diag(n, A, leigvec, reigvec, n_real_eigv, eigval) print*, ' Notice that if you are interested in ground state it is not a problem :)' endif -end subroutine non_hrmt_real_diag +end ! --- @@ -495,7 +495,7 @@ subroutine lapack_diag_general_non_sym(n, A, B, WR, WI, VL, VR) deallocate( WORK, Atmp ) -end subroutine lapack_diag_general_non_sym +end ! --- @@ -570,7 +570,7 @@ subroutine non_hrmt_general_real_diag(n, A, B, reigvec, leigvec, n_real_eigv, ei enddo enddo -end subroutine non_hrmt_general_real_diag +end ! --- @@ -727,7 +727,7 @@ subroutine impose_biorthog_qr(m, n, thr_d, thr_nd, Vl, Vr) deallocate(tmp) return -end subroutine impose_biorthog_qr +end ! --- @@ -890,7 +890,7 @@ subroutine impose_biorthog_lu(m, n, Vl, Vr, S) !stop return -end subroutine impose_biorthog_lu +end ! --- @@ -996,7 +996,7 @@ subroutine check_EIGVEC(n, m, A, eigval, leigvec, reigvec, thr_diag, thr_norm, s deallocate( Mtmp ) -end subroutine check_EIGVEC +end ! --- @@ -1066,7 +1066,7 @@ subroutine check_degen(n, m, eigval, leigvec, reigvec) stop endif -end subroutine check_degen +end ! --- @@ -1169,7 +1169,7 @@ subroutine impose_weighted_orthog_svd(n, m, W, C) ! --- -end subroutine impose_weighted_orthog_svd +end ! --- @@ -1266,7 +1266,7 @@ subroutine impose_orthog_svd(n, m, C) ! --- -end subroutine impose_orthog_svd +end ! --- @@ -1365,7 +1365,7 @@ subroutine impose_orthog_svd_overlap(n, m, C, overlap) !enddo deallocate(S) -end subroutine impose_orthog_svd_overlap +end ! --- @@ -1442,7 +1442,7 @@ subroutine impose_orthog_GramSchmidt(n, m, C) ! --- -end subroutine impose_orthog_GramSchmidt +end ! --- @@ -1484,7 +1484,7 @@ subroutine impose_orthog_ones(n, deg_num, C) endif enddo -end subroutine impose_orthog_ones +end ! --- @@ -1577,7 +1577,7 @@ subroutine impose_orthog_degen_eigvec(n, e0, C0) endif enddo -end subroutine impose_orthog_degen_eigvec +end ! --- @@ -1661,7 +1661,7 @@ subroutine get_halfinv_svd(n, S) deallocate(S0, Stmp, Stmp2) -end subroutine get_halfinv_svd +end ! --- @@ -1776,7 +1776,7 @@ subroutine check_biorthog_binormalize(n, m, Vl, Vr, thr_d, thr_nd, stop_ifnot) stop endif -end subroutine check_biorthog_binormalize +end ! --- @@ -1840,7 +1840,7 @@ subroutine check_weighted_biorthog(n, m, W, Vl, Vr, thr_d, thr_nd, accu_d, accu_ stop endif -end subroutine check_weighted_biorthog +end ! --- @@ -1865,10 +1865,11 @@ subroutine check_biorthog(n, m, Vl, Vr, accu_d, accu_nd, S, thr_d, thr_nd, stop_ , Vl, size(Vl, 1), Vr, size(Vr, 1) & , 0.d0, S, size(S, 1) ) - print *, ' overlap matrix:' - do i = 1, m - write(*,'(1000(F16.10,X))') S(i,:) - enddo + ! print S s'il y a besoin + !print *, ' overlap matrix:' + !do i = 1, m + ! write(*,'(1000(F16.10,X))') S(i,:) + !enddo accu_d = 0.d0 accu_nd = 0.d0 @@ -1876,15 +1877,22 @@ subroutine check_biorthog(n, m, Vl, Vr, accu_d, accu_nd, S, thr_d, thr_nd, stop_ do j = 1, m if(i==j) then accu_d = accu_d + dabs(S(i,i)) + !print*, i, S(i,i) else accu_nd = accu_nd + S(j,i) * S(j,i) endif enddo enddo + !accu_nd = dsqrt(accu_nd) / dble(m*m) accu_nd = dsqrt(accu_nd) / dble(m) - print *, ' accu_nd = ', accu_nd - print *, ' accu_d = ', dabs(accu_d-dble(m))/dble(m) + if((accu_nd .gt. thr_nd) .or. dabs(accu_d-dble(m))/dble(m) .gt. thr_d) then + print *, ' non bi-orthogonal vectors !' + print *, ' accu_nd = ', accu_nd + print *, ' accu_d = ', dabs(accu_d-dble(m))/dble(m) + else + print *, ' vectors are bi-orthogonals' + endif ! --- @@ -1899,7 +1907,7 @@ subroutine check_biorthog(n, m, Vl, Vr, accu_d, accu_nd, S, thr_d, thr_nd, stop_ stop endif -end subroutine check_biorthog +end ! --- @@ -1941,27 +1949,25 @@ subroutine check_orthog(n, m, V, accu_d, accu_nd, S) !print*, ' diag acc: ', accu_d !print*, ' nondiag acc: ', accu_nd -end subroutine check_orthog +end ! --- -subroutine reorder_degen_eigvec(n, e0, L0, R0) + +subroutine reorder_degen_eigvec(n, deg_num, e0, L0, R0) implicit none integer, intent(in) :: n - double precision, intent(in) :: e0(n) - double precision, intent(inout) :: L0(n,n), R0(n,n) + double precision, intent(inout) :: e0(n), L0(n,n), R0(n,n) + integer, intent(out) :: deg_num(n) logical :: complex_root - integer :: i, j, k, m + integer :: i, j, k, m, ii, j_tmp double precision :: ei, ej, de, de_thr double precision :: accu_d, accu_nd - integer, allocatable :: deg_num(:) + double precision :: e0_tmp, L0_tmp(n), R0_tmp(n) double precision, allocatable :: L(:,:), R(:,:), S(:,:), S_inv_half(:,:) - ! --- - - allocate( deg_num(n) ) do i = 1, n deg_num(i) = 1 enddo @@ -1972,74 +1978,104 @@ subroutine reorder_degen_eigvec(n, e0, L0, R0) ei = e0(i) ! already considered in degen vectors - if(deg_num(i).eq.0) cycle + if(deg_num(i) .eq. 0) cycle + ii = 0 do j = i+1, n ej = e0(j) de = dabs(ei - ej) if(de .lt. de_thr) then - deg_num(i) = deg_num(i) + 1 - deg_num(j) = 0 - endif + ii = ii + 1 + + j_tmp = i + ii + deg_num(j_tmp) = 0 + + e0_tmp = e0(j_tmp) + e0(j_tmp) = e0(j) + e0(j) = e0_tmp + + L0_tmp(1:n) = L0(1:n,j_tmp) + L0(1:n,j_tmp) = L0(1:n,j) + L0(1:n,j) = L0_tmp(1:n) + + R0_tmp(1:n) = R0(1:n,j_tmp) + R0(1:n,j_tmp) = R0(1:n,j) + R0(1:n,j) = R0_tmp(1:n) + endif enddo + + deg_num(i) = ii + 1 enddo + ii = 0 do i = 1, n if(deg_num(i) .gt. 1) then - print *, ' degen on', i, deg_num(i), e0(i) + !print *, ' degen on', i, deg_num(i), e0(i) + ii = ii + 1 endif enddo + if(ii .eq. 0) then + print*, ' WARNING: bi-orthogonality is lost but there is no degeneracies' + print*, ' rotations may change energy' + stop + endif + + print *, ii, ' type of degeneracies' + ! --- - do i = 1, n - m = deg_num(i) +! do i = 1, n +! m = deg_num(i) +! +! if(m .gt. 1) then +! +! allocate(L(n,m)) +! allocate(R(n,m),S(m,m)) +! +! do j = 1, m +! L(1:n,j) = L0(1:n,i+j-1) +! R(1:n,j) = R0(1:n,i+j-1) +! enddo +! +! !call dgemm( 'T', 'N', m, m, n, 1.d0 & +! ! , L, size(L, 1), R, size(R, 1) & +! ! , 0.d0, S, size(S, 1) ) +! !print*, 'Overlap matrix ' +! !accu_nd = 0.d0 +! !do j = 1, m +! ! write(*,'(100(F16.10,X))') S(1:m,j) +! ! do k = 1, m +! ! if(j==k) cycle +! ! accu_nd += dabs(S(j,k)) +! ! enddo +! !enddo +! !print*,'accu_nd = ',accu_nd +!! if(accu_nd .gt.1.d-10) then +!! stop +!! endif +! +! do j = 1, m +! L0(1:n,i+j-1) = L(1:n,j) +! R0(1:n,i+j-1) = R(1:n,j) +! enddo +! +! deallocate(L, R, S) +! +! endif +! enddo +! +end - if(m .gt. 1) then - - allocate(L(n,m)) - allocate(R(n,m),S(m,m)) +! --- - do j = 1, m - L(1:n,j) = L0(1:n,i+j-1) - R(1:n,j) = R0(1:n,i+j-1) - enddo - - call dgemm( 'T', 'N', m, m, n, 1.d0 & - , L, size(L, 1), R, size(R, 1) & - , 0.d0, S, size(S, 1) ) - print*,'Overlap matrix ' - accu_nd = 0.D0 - do j = 1, m - write(*,'(100(F16.10,X))')S(1:m,j) - do k = 1, m - if(j==k)cycle - accu_nd += dabs(S(j,k)) - enddo - enddo - print*,'accu_nd = ',accu_nd -! if(accu_nd .gt.1.d-10)then -! stop -! endif - do j = 1, m - L0(1:n,i+j-1) = L(1:n,j) - R0(1:n,i+j-1) = R(1:n,j) - enddo - - deallocate(L, R,S) - - endif - enddo - -end subroutine reorder_degen_eigvec - -subroutine impose_biorthog_degen_eigvec(n, e0, L0, R0) +subroutine impose_biorthog_degen_eigvec(n, deg_num, e0, L0, R0) implicit none - integer, intent(in) :: n + integer, intent(in) :: n, deg_num(n) double precision, intent(in) :: e0(n) double precision, intent(inout) :: L0(n,n), R0(n,n) @@ -2047,41 +2083,13 @@ subroutine impose_biorthog_degen_eigvec(n, e0, L0, R0) integer :: i, j, k, m double precision :: ei, ej, de, de_thr double precision :: accu_d, accu_nd - integer, allocatable :: deg_num(:) double precision, allocatable :: L(:,:), R(:,:), S(:,:), S_inv_half(:,:) - ! --- - - allocate( deg_num(n) ) - do i = 1, n - deg_num(i) = 1 - enddo - - de_thr = thr_degen_tc - - do i = 1, n-1 - ei = e0(i) - - ! already considered in degen vectors - if(deg_num(i).eq.0) cycle - - do j = i+1, n - ej = e0(j) - de = dabs(ei - ej) - - if(de .lt. de_thr) then - deg_num(i) = deg_num(i) + 1 - deg_num(j) = 0 - endif - - enddo - enddo - - do i = 1, n - if(deg_num(i) .gt. 1) then - print *, ' degen on', i, deg_num(i), e0(i) - endif - enddo + !do i = 1, n + ! if(deg_num(i) .gt. 1) then + ! print *, ' degen on', i, deg_num(i), e0(i) + ! endif + !enddo ! --- @@ -2090,8 +2098,7 @@ subroutine impose_biorthog_degen_eigvec(n, e0, L0, R0) if(m .gt. 1) then - allocate(L(n,m)) - allocate(R(n,m)) + allocate(L(n,m), R(n,m), S(m,m)) do j = 1, m L(1:n,j) = L0(1:n,i+j-1) @@ -2100,8 +2107,53 @@ subroutine impose_biorthog_degen_eigvec(n, e0, L0, R0) ! --- -! call impose_orthog_svd(n, m, L) - call impose_orthog_svd(n, m, R) + !print*, 'Overlap matrix before' + call dgemm( 'T', 'N', m, m, n, 1.d0 & + , L, size(L, 1), R, size(R, 1) & + , 0.d0, S, size(S, 1) ) + + accu_nd = 0.d0 + do j = 1, m + !write(*,'(100(F16.10,X))') S(1:m,j) + do k = 1, m + if(j==k) cycle + accu_nd += dabs(S(j,k)) + enddo + enddo + + if(accu_nd .lt. 1d-12) then + deallocate(S, L, R) + cycle + endif + + !print*, ' accu_nd before = ', accu_nd + + call impose_biorthog_svd(n, m, L, R) + + !print*, 'Overlap matrix after' + call dgemm( 'T', 'N', m, m, n, 1.d0 & + , L, size(L, 1), R, size(R, 1) & + , 0.d0, S, size(S, 1) ) + accu_nd = 0.d0 + do j = 1, m + !write(*,'(100(F16.10,X))') S(1:m,j) + do k = 1, m + if(j==k) cycle + accu_nd += dabs(S(j,k)) + enddo + enddo + !print*,' accu_nd after = ', accu_nd + if(accu_nd .gt. 1d-12) then + print*, ' your strategy for degenerates orbitals failed !' + print*, m, 'deg on', i + stop + endif + + deallocate(S) + + ! --- + + !call impose_orthog_svd(n, m, L) !call impose_orthog_GramSchmidt(n, m, L) !call impose_orthog_GramSchmidt(n, m, R) @@ -2120,8 +2172,7 @@ subroutine impose_biorthog_degen_eigvec(n, e0, L0, R0) !call bi_ortho_s_inv_half(m, L, R, S_inv_half) !deallocate(S, S_inv_half) - call impose_biorthog_svd(n, m, L, R) -! call impose_biorthog_inverse(n, m, L, R) + !call impose_biorthog_inverse(n, m, L, R) !call impose_biorthog_qr(n, m, thr_d, thr_nd, L, R) @@ -2136,9 +2187,8 @@ subroutine impose_biorthog_degen_eigvec(n, e0, L0, R0) endif enddo -! call impose_biorthog_inverse(n, n, L0, R0) -end subroutine impose_biorthog_degen_eigvec +end ! --- @@ -2232,7 +2282,7 @@ subroutine impose_orthog_biorthog_degen_eigvec(n, thr_d, thr_nd, e0, L0, R0) endif enddo -end subroutine impose_orthog_biorthog_degen_eigvec +end ! --- @@ -2370,7 +2420,7 @@ subroutine impose_unique_biorthog_degen_eigvec(n, thr_d, thr_nd, e0, C0, W0, L0, endif enddo -end subroutine impose_unique_biorthog_degen_eigvec +end ! --- @@ -2453,7 +2503,7 @@ subroutine max_overlap_qr(m, n, S0, V) ! --- return -end subroutine max_overlap_qr +end ! --- @@ -2488,7 +2538,7 @@ subroutine max_overlap_invprod(n, m, S, V) deallocate(tmp, invS) return -end subroutine max_overlap_invprod +end ! --- @@ -2504,18 +2554,16 @@ subroutine impose_biorthog_svd(n, m, L, R) double precision, allocatable :: S(:,:), tmp(:,:) double precision, allocatable :: U(:,:), V(:,:), Vt(:,:), D(:) - ! --- - allocate(S(m,m)) call dgemm( 'T', 'N', m, m, n, 1.d0 & , L, size(L, 1), R, size(R, 1) & , 0.d0, S, size(S, 1) ) - print *, ' overlap bef SVD: ' - do i = 1, m - write(*, '(1000(F16.10,X))') S(i,:) - enddo + !print *, ' overlap bef SVD: ' + !do i = 1, m + ! write(*, '(1000(F16.10,X))') S(i,:) + !enddo ! --- @@ -2552,51 +2600,32 @@ subroutine impose_biorthog_svd(n, m, L, R) ! --- - allocate(tmp(n,m)) + ! R <-- R x V x D^{-0.5} + ! L <-- L x U x D^{-0.5} - ! tmp <-- R x V - call dgemm( 'N', 'N', n, m, m, 1.d0 & - , R, size(R, 1), V, size(V, 1) & - , 0.d0, tmp, size(tmp, 1) ) - deallocate(V) - ! R <-- tmp x sigma^-0.5 - do j = 1, m - do i = 1, n - R(i,j) = tmp(i,j) * D(j) - enddo - enddo - - ! tmp <-- L x U - call dgemm( 'N', 'N', n, m, m, 1.d0 & - , L, size(L, 1), U, size(U, 1) & - , 0.d0, tmp, size(tmp, 1) ) - deallocate(U) - ! L <-- tmp x sigma^-0.5 - do j = 1, m - do i = 1, n - L(i,j) = tmp(i,j) * D(j) - enddo - enddo - - deallocate(D, tmp) - - ! --- - - allocate(S(m,m)) - call dgemm( 'T', 'N', m, m, n, 1.d0 & - , L, size(L, 1), R, size(R, 1) & - , 0.d0, S, size(S, 1) ) - - print *, ' overlap aft SVD: ' do i = 1, m - write(*, '(1000(F16.10,X))') S(i,:) + do j = 1, m + V(j,i) = V(j,i) * D(i) + U(j,i) = U(j,i) * D(i) + enddo enddo - deallocate(S) + allocate(tmp(n,m)) + tmp(:,:) = R(:,:) + call dgemm( 'N', 'N', n, m, m, 1.d0 & + , tmp, size(tmp, 1), V, size(V, 1) & + , 0.d0, R, size(R, 1)) - ! --- + tmp(:,:) = L(:,:) + call dgemm( 'N', 'N', n, m, m, 1.d0 & + , tmp, size(tmp, 1), U, size(U, 1) & + , 0.d0, L, size(L, 1)) -end subroutine impose_biorthog_svd + deallocate(tmp, U, V, D) + +end + +! --- subroutine impose_biorthog_inverse(n, m, L, R) @@ -2639,8 +2668,7 @@ subroutine impose_biorthog_inverse(n, m, L, R) deallocate(S,Lt) -end subroutine impose_biorthog_svd - +end ! --- @@ -2802,7 +2830,7 @@ subroutine impose_weighted_biorthog_qr(m, n, thr_d, thr_nd, Vl, W, Vr) call check_weighted_biorthog_binormalize(m, n, Vl, W, Vr, thr_d, thr_nd, .false.) return -end subroutine impose_weighted_biorthog_qr +end ! --- @@ -2919,7 +2947,7 @@ subroutine check_weighted_biorthog_binormalize(n, m, Vl, W, Vr, thr_d, thr_nd, s stop endif -end subroutine check_weighted_biorthog_binormalize +end ! --- @@ -3037,7 +3065,7 @@ subroutine impose_weighted_biorthog_svd(n, m, overlap, L, R) deallocate(S) return -end subroutine impose_weighted_biorthog_svd +end ! --- diff --git a/plugins/local/tc_bi_ortho/compute_deltamu_right.irp.f b/plugins/local/tc_bi_ortho/compute_deltamu_right.irp.f index ab9dc093..1142658d 100644 --- a/plugins/local/tc_bi_ortho/compute_deltamu_right.irp.f +++ b/plugins/local/tc_bi_ortho/compute_deltamu_right.irp.f @@ -24,10 +24,6 @@ subroutine delta_right() integer :: k double precision, allocatable :: delta(:,:) - print *, j1b_type - print *, j1b_pen - print *, mu_erf - allocate( delta(N_det,N_states) ) delta = 0.d0 @@ -48,7 +44,7 @@ subroutine delta_right() deallocate(delta) return -end subroutine delta_right +end ! --- diff --git a/plugins/local/tc_bi_ortho/print_tc_energy.irp.f b/plugins/local/tc_bi_ortho/print_tc_energy.irp.f index 7bca72a1..ef38cbcc 100644 --- a/plugins/local/tc_bi_ortho/print_tc_energy.irp.f +++ b/plugins/local/tc_bi_ortho/print_tc_energy.irp.f @@ -17,8 +17,14 @@ program print_tc_energy read_wf = .True. touch read_wf - PROVIDE j1b_type - print*, 'j1b_type = ', j1b_type + + PROVIDE j2e_type + PROVIDE j1e_type + PROVIDE env_type + + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type call write_tc_energy() diff --git a/plugins/local/tc_bi_ortho/save_tc_bi_ortho_nat.irp.f b/plugins/local/tc_bi_ortho/save_tc_bi_ortho_nat.irp.f index ffcd9b22..6b3acce6 100644 --- a/plugins/local/tc_bi_ortho/save_tc_bi_ortho_nat.irp.f +++ b/plugins/local/tc_bi_ortho/save_tc_bi_ortho_nat.irp.f @@ -17,7 +17,7 @@ program tc_natorb_bi_ortho my_n_pt_a_grid = tc_grid1_a touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid - if(j1b_type .ge. 100) then + if(tc_integ_type .eq. "numeric") then my_extra_grid_becke = .True. PROVIDE tc_grid2_a tc_grid2_r my_n_pt_r_extra_grid = tc_grid2_r diff --git a/plugins/local/tc_bi_ortho/slater_tc_slow.irp.f b/plugins/local/tc_bi_ortho/slater_tc_slow.irp.f index b1751069..caf7d665 100644 --- a/plugins/local/tc_bi_ortho/slater_tc_slow.irp.f +++ b/plugins/local/tc_bi_ortho/slater_tc_slow.irp.f @@ -27,7 +27,7 @@ subroutine htilde_mu_mat_bi_ortho_tot_slow(key_j, key_i, Nint, htot) call htilde_mu_mat_bi_ortho_slow(key_j, key_i, Nint, hmono, htwoe, hthree, htot) endif -end subroutine htilde_mu_mat_bi_ortho_tot_slow +end ! -- @@ -260,7 +260,6 @@ subroutine single_htilde_mu_mat_bi_ortho_slow(Nint, key_j, key_i, hmono, htwoe, ! ! PROVIDE core_bitmask core_fock_operator mo_integrals_erf_map -! PROVIDE j1b_gauss other_spin(1) = 2 other_spin(2) = 1 @@ -295,15 +294,6 @@ subroutine single_htilde_mu_mat_bi_ortho_slow(Nint, key_j, key_i, hmono, htwoe, hmono = mo_bi_ortho_tc_one_e(p1,h1) * phase -! if(j1b_gauss .eq. 1) then -! print*,'j1b not implemented for bi ortho TC' -! print*,'stopping ....' -! stop -! !hmono += ( mo_j1b_gauss_hermI (h1,p1) & -! ! + mo_j1b_gauss_hermII (h1,p1) & -! ! + mo_j1b_gauss_nonherm(h1,p1) ) * phase -! endif - ! if(core_tc_op)then ! print*,'core_tc_op not already taken into account for bi ortho' ! print*,'stopping ...' diff --git a/plugins/local/tc_bi_ortho/tc_bi_ortho.irp.f b/plugins/local/tc_bi_ortho/tc_bi_ortho.irp.f index e27672a2..64982ab6 100644 --- a/plugins/local/tc_bi_ortho/tc_bi_ortho.irp.f +++ b/plugins/local/tc_bi_ortho/tc_bi_ortho.irp.f @@ -13,7 +13,7 @@ program tc_bi_ortho my_n_pt_a_grid = tc_grid1_a touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid - if(j1b_type .ge. 100) then + if(tc_integ_type .eq. "numeric") then my_extra_grid_becke = .True. PROVIDE tc_grid2_a tc_grid2_r my_n_pt_r_extra_grid = tc_grid2_r diff --git a/plugins/local/tc_bi_ortho/tc_som.irp.f b/plugins/local/tc_bi_ortho/tc_som.irp.f index 427508d2..1d11c81b 100644 --- a/plugins/local/tc_bi_ortho/tc_som.irp.f +++ b/plugins/local/tc_bi_ortho/tc_som.irp.f @@ -17,12 +17,6 @@ program tc_som my_n_pt_a_grid = tc_grid1_a touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid - PROVIDE mu_erf - print *, ' mu = ', mu_erf - PROVIDE j1b_type - print *, ' j1b_type = ', j1b_type - print *, j1b_pen - read_wf = .true. touch read_wf diff --git a/plugins/local/tc_keywords/EZFIO.cfg b/plugins/local/tc_keywords/EZFIO.cfg index ac2cfda2..93ff790f 100644 --- a/plugins/local/tc_keywords/EZFIO.cfg +++ b/plugins/local/tc_keywords/EZFIO.cfg @@ -130,30 +130,6 @@ doc: if +1: only positive is selected, -1: only negative is selected, :0 both po interface: ezfio,provider,ocaml default: 0 -[j1b_pen] -type: double precision -doc: exponents of the 1-body Jastrow -interface: ezfio -size: (nuclei.nucl_num) - -[j1b_pen_coef] -type: double precision -doc: coefficients of the 1-body Jastrow -interface: ezfio -size: (nuclei.nucl_num) - -[j1b_coeff] -type: double precision -doc: coeff of the 1-body Jastrow -interface: ezfio -size: (nuclei.nucl_num) - -[j1b_type] -type: integer -doc: type of 1-body Jastrow -interface: ezfio, provider, ocaml -default: 0 - [mu_r_ct] type: double precision doc: a parameter used to define mu(r) @@ -184,12 +160,6 @@ doc: If |true|, maximize the overlap between orthogonalized left- and right eige interface: ezfio,provider,ocaml default: False -[ng_fit_jast] -type: integer -doc: nb of Gaussians used to fit Jastrow fcts -interface: ezfio,provider,ocaml -default: 20 - [max_dim_diis_tcscf] type: integer doc: Maximum size of the DIIS extrapolation procedure @@ -282,7 +252,7 @@ default: True [tc_grid1_a] type: integer -doc: size of angular grid over r1 +doc: size of angular grid over r1: [ 6 | 14 | 26 | 38 | 50 | 74 | 86 | 110 | 146 | 170 | 194 | 230 | 266 | 302 | 350 | 434 | 590 | 770 | 974 | 1202 | 1454 | 1730 | 2030 | 2354 | 2702 | 3074 | 3470 | 3890 | 4334 | 4802 | 5294 | 5810 ] interface: ezfio,provider,ocaml default: 50 @@ -294,13 +264,19 @@ default: 30 [tc_grid2_a] type: integer -doc: size of angular grid over r2 +doc: size of angular grid over r2: [ 6 | 14 | 26 | 38 | 50 | 74 | 86 | 110 | 146 | 170 | 194 | 230 | 266 | 302 | 350 | 434 | 590 | 770 | 974 | 1202 | 1454 | 1730 | 2030 | 2354 | 2702 | 3074 | 3470 | 3890 | 4334 | 4802 | 5294 | 5810 ] interface: ezfio,provider,ocaml -default: 194 +default: 266 [tc_grid2_r] type: integer doc: size of radial grid over r2 interface: ezfio,provider,ocaml -default: 50 +default: 70 + +[tc_integ_type] +type: character*(32) +doc: approach used to evaluate TC integrals [ analytic | numeric | semi-analytic ] +interface: ezfio,ocaml,provider +default: semi-analytic diff --git a/plugins/local/tc_keywords/j1b_pen.irp.f b/plugins/local/tc_keywords/j1b_pen.irp.f deleted file mode 100644 index d509fc7e..00000000 --- a/plugins/local/tc_keywords/j1b_pen.irp.f +++ /dev/null @@ -1,155 +0,0 @@ - -! --- - - BEGIN_PROVIDER [ double precision, j1b_pen , (nucl_num) ] -&BEGIN_PROVIDER [ double precision, j1b_pen_coef, (nucl_num) ] - - BEGIN_DOC - ! parameters of the 1-body Jastrow - END_DOC - - implicit none - logical :: exists - integer :: i - integer :: ierr - - PROVIDE ezfio_filename - - ! --- - - if (mpi_master) then - call ezfio_has_tc_keywords_j1b_pen(exists) - endif - - IRP_IF MPI_DEBUG - print *, irp_here, mpi_rank - call MPI_BARRIER(MPI_COMM_WORLD, ierr) - IRP_ENDIF - - IRP_IF MPI - include 'mpif.h' - call MPI_BCAST(j1b_pen, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) - if (ierr /= MPI_SUCCESS) then - stop 'Unable to read j1b_pen with MPI' - endif - IRP_ENDIF - - if (exists) then - if (mpi_master) then - write(6,'(A)') '.. >>>>> [ IO READ: j1b_pen ] <<<<< ..' - call ezfio_get_tc_keywords_j1b_pen(j1b_pen) - IRP_IF MPI - call MPI_BCAST(j1b_pen, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) - if (ierr /= MPI_SUCCESS) then - stop 'Unable to read j1b_pen with MPI' - endif - IRP_ENDIF - endif - else - do i = 1, nucl_num - j1b_pen(i) = 1d5 - enddo - endif - - ! --- - - if (mpi_master) then - call ezfio_has_tc_keywords_j1b_pen_coef(exists) - endif - - IRP_IF MPI_DEBUG - print *, irp_here, mpi_rank - call MPI_BARRIER(MPI_COMM_WORLD, ierr) - IRP_ENDIF - - IRP_IF MPI - call MPI_BCAST(j1b_pen_coef, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) - if (ierr /= MPI_SUCCESS) then - stop 'Unable to read j1b_pen_coef with MPI' - endif - IRP_ENDIF - - if (exists) then - if (mpi_master) then - write(6,'(A)') '.. >>>>> [ IO READ: j1b_pen_coef ] <<<<< ..' - call ezfio_get_tc_keywords_j1b_pen_coef(j1b_pen_coef) - IRP_IF MPI - call MPI_BCAST(j1b_pen_coef, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) - if (ierr /= MPI_SUCCESS) then - stop 'Unable to read j1b_pen_coef with MPI' - endif - IRP_ENDIF - endif - else - do i = 1, nucl_num - j1b_pen_coef(i) = 1d0 - enddo - endif - - ! --- - - print *, ' parameters for nuclei jastrow' - print *, ' i, Z, j1b_pen, j1b_pen_coef' - do i = 1, nucl_num - write(*,'(I4, 2x, 3(E15.7, 2X))') i, nucl_charge(i), j1b_pen(i), j1b_pen_coef(i) - enddo - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ double precision, j1b_coeff, (nucl_num) ] - - BEGIN_DOC - ! coefficients of the 1-body Jastrow - END_DOC - - implicit none - logical :: exists - - PROVIDE ezfio_filename - - if (mpi_master) then - call ezfio_has_tc_keywords_j1b_coeff(exists) - endif - - IRP_IF MPI_DEBUG - print *, irp_here, mpi_rank - call MPI_BARRIER(MPI_COMM_WORLD, ierr) - IRP_ENDIF - - IRP_IF MPI - include 'mpif.h' - integer :: ierr - call MPI_BCAST(j1b_coeff, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) - if (ierr /= MPI_SUCCESS) then - stop 'Unable to read j1b_coeff with MPI' - endif - IRP_ENDIF - - if (exists) then - - if (mpi_master) then - write(6,'(A)') '.. >>>>> [ IO READ: j1b_coeff ] <<<<< ..' - call ezfio_get_tc_keywords_j1b_coeff(j1b_coeff) - IRP_IF MPI - call MPI_BCAST(j1b_coeff, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) - if (ierr /= MPI_SUCCESS) then - stop 'Unable to read j1b_coeff with MPI' - endif - IRP_ENDIF - endif - - else - - integer :: i - do i = 1, nucl_num - j1b_coeff(i) = 0d5 - enddo - - endif - -END_PROVIDER - -! --- - diff --git a/plugins/local/tc_scf/fock_vartc.irp.f b/plugins/local/tc_scf/fock_vartc.irp.f index 03899b07..2b4a57e5 100644 --- a/plugins/local/tc_scf/fock_vartc.irp.f +++ b/plugins/local/tc_scf/fock_vartc.irp.f @@ -13,9 +13,9 @@ two_e_vartc_integral_alpha = 0.d0 two_e_vartc_integral_beta = 0.d0 - !$OMP PARALLEL DEFAULT (NONE) & - !$OMP PRIVATE (i, j, k, l, density_a, density_b, density, tmp_a, tmp_b, I_coul, I_kjli) & - !$OMP SHARED (ao_num, TCSCF_density_matrix_ao_alpha, TCSCF_density_matrix_ao_beta, ao_two_e_vartc_tot, & + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k, l, density_a, density_b, density, tmp_a, tmp_b, I_coul, I_kjli) & + !$OMP SHARED (ao_num, TCSCF_density_matrix_ao_alpha, TCSCF_density_matrix_ao_beta, ao_two_e_tc_tot, & !$OMP two_e_vartc_integral_alpha, two_e_vartc_integral_beta) allocate(tmp_a(ao_num,ao_num), tmp_b(ao_num,ao_num)) @@ -31,8 +31,8 @@ do i = 1, ao_num do k = 1, ao_num - I_coul = density * ao_two_e_vartc_tot(k,i,l,j) - I_kjli = ao_two_e_vartc_tot(k,j,l,i) + I_coul = density * ao_two_e_tc_tot(k,i,l,j) + I_kjli = ao_two_e_tc_tot(k,j,l,i) tmp_a(k,i) += I_coul - density_a * I_kjli tmp_b(k,i) += I_coul - density_b * I_kjli diff --git a/plugins/local/tc_scf/print_tcscf_energy.irp.f b/plugins/local/tc_scf/print_tcscf_energy.irp.f index 05b8df23..6f9afd9a 100644 --- a/plugins/local/tc_scf/print_tcscf_energy.irp.f +++ b/plugins/local/tc_scf/print_tcscf_energy.irp.f @@ -24,11 +24,15 @@ subroutine main() implicit none double precision :: etc_tot, etc_1e, etc_2e, etc_3e - PROVIDE mu_erf - PROVIDE j1b_type + PROVIDE j2e_type mu_erf + PROVIDE j1e_type j1e_coef j1e_expo + PROVIDE env_type env_coef env_expo + + print*, ' j2e_type = ', j2e_type + print*, ' j1e_type = ', j1e_type + print*, ' env_type = ', env_type print*, ' mu_erf = ', mu_erf - print*, ' j1b_type = ', j1b_type etc_tot = TC_HF_energy etc_1e = TC_HF_one_e_energy diff --git a/plugins/local/tc_scf/tc_scf.irp.f b/plugins/local/tc_scf/tc_scf.irp.f index 22f66484..d8c5ab66 100644 --- a/plugins/local/tc_scf/tc_scf.irp.f +++ b/plugins/local/tc_scf/tc_scf.irp.f @@ -7,11 +7,20 @@ program tc_scf END_DOC implicit none + integer :: i + logical :: good_angles + + PROVIDE j1e_type + PROVIDE j2e_type + PROVIDE tcscf_algorithm + PROVIDE var_tc + + print *, ' TC-SCF with:' + print *, ' j1e_type = ', j1e_type + print *, ' j2e_type = ', j2e_type write(json_unit,json_array_open_fmt) 'tc-scf' - print *, ' starting ...' - my_grid_becke = .True. PROVIDE tc_grid1_a tc_grid1_r my_n_pt_r_grid = tc_grid1_r @@ -22,13 +31,7 @@ program tc_scf call write_int(6, my_n_pt_a_grid, 'angular external grid over') - PROVIDE mu_erf - print *, ' mu = ', mu_erf - PROVIDE j1b_type - print *, ' j1b_type = ', j1b_type - print *, j1b_pen - - if(j1b_type .ge. 100) then + if(tc_integ_type .eq. "numeric") then my_extra_grid_becke = .True. PROVIDE tc_grid2_a tc_grid2_r my_n_pt_r_extra_grid = tc_grid2_r @@ -42,8 +45,6 @@ program tc_scf !call create_guess() !call orthonormalize_mos() - PROVIDE tcscf_algorithm - PROVIDE var_tc if(var_tc) then @@ -69,7 +70,16 @@ program tc_scf stop endif - call minimize_tc_orb_angles() + PROVIDE Fock_matrix_tc_diag_mo_tot + print*, ' Eigenvalues:' + do i = 1, mo_num + print*, i, Fock_matrix_tc_diag_mo_tot(i) + enddo + + ! TODO + ! rotate angles in separate code only if necessary + !call minimize_tc_orb_angles() + call print_energy_and_mos(good_angles) endif diff --git a/plugins/local/tc_scf/test_int.irp.f b/plugins/local/tc_scf/test_int.irp.f index 4aa67d04..e135fcd8 100644 --- a/plugins/local/tc_scf/test_int.irp.f +++ b/plugins/local/tc_scf/test_int.irp.f @@ -1,7 +1,7 @@ program test_ints BEGIN_DOC -! TODO : Put the documentation of the program here + ! TODO : Put the documentation of the program here END_DOC implicit none @@ -20,41 +20,31 @@ program test_ints touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid !! OK -! call routine_int2_u_grad1u_j1b2 +! call routine_int2_u_grad1u_env2 ! OK -! call routine_v_ij_erf_rk_cst_mu_j1b +! call routine_v_ij_erf_rk_cst_mu_env ! OK -! call routine_x_v_ij_erf_rk_cst_mu_j1b +! call routine_x_v_ij_erf_rk_cst_mu_env ! OK -! call routine_int2_u2_j1b2 +! call routine_int2_u2_env2 ! OK -! call routine_int2_u_grad1u_x_j1b2 +! call routine_int2_u_grad1u_x_env2 ! OK -! call routine_int2_grad1u2_grad2u2_j1b2 -! call routine_int2_u_grad1u_j1b2 -! call test_total_grad_lapl -! call test_total_grad_square +! call routine_int2_grad1u2_grad2u2_env2 +! call routine_int2_u_grad1u_env2 ! call test_int2_grad1_u12_ao_test -! call routine_v_ij_u_cst_mu_j1b_test -! call test_ao_tc_int_chemist +! call routine_v_ij_u_cst_mu_env_test ! call test_grid_points_ao -! call test_tc_scf !call test_int_gauss !call test_fock_3e_uhf_ao() !call test_fock_3e_uhf_mo() - !call test_tc_grad_and_lapl_ao() - !call test_tc_grad_square_ao() - !call test_two_e_tc_non_hermit_integral() -! call test_tc_grad_square_ao_test() - !!PROVIDE TC_HF_energy VARTC_HF_energy !!print *, ' TC_HF_energy = ', TC_HF_energy !!print *, ' VARTC_HF_energy = ', VARTC_HF_energy -! call test_old_ints call test_fock_3e_uhf_mo_cs() call test_fock_3e_uhf_mo_a() @@ -64,47 +54,21 @@ end ! --- -subroutine test_tc_scf - implicit none - integer :: i -! provide int2_u_grad1u_x_j1b2_test - provide x_v_ij_erf_rk_cst_mu_j1b_test -! do i = 1, ng_fit_jast -! print*,expo_gauss_1_erf_x_2(i),coef_gauss_1_erf_x_2(i) -! enddo -! provide tc_grad_square_ao_test -! provide tc_grad_and_lapl_ao_test -! provide int2_u_grad1u_x_j1b2_test -! provide x_v_ij_erf_rk_cst_mu_j1b_test -! print*,'TC_HF_energy = ',TC_HF_energy -! print*,'grad_non_hermit = ',grad_non_hermit -end - -subroutine test_ao_tc_int_chemist - implicit none - provide ao_tc_int_chemist -! provide ao_tc_int_chemist_test -! provide tc_grad_square_ao_test -! provide tc_grad_and_lapl_ao_test -end - -! --- - -subroutine routine_test_j1b +subroutine routine_test_env implicit none integer :: i,icount,j icount = 0 - do i = 1, List_all_comb_b3_size - if(dabs(List_all_comb_b3_coef(i)).gt.1.d-10)then + do i = 1, List_env1s_square_size + if(dabs(List_env1s_square_coef(i)).gt.1.d-10)then print*,'' - print*,List_all_comb_b3_expo(i),List_all_comb_b3_coef(i) - print*,List_all_comb_b3_cent(1:3,i) + print*,List_env1s_square_expo(i),List_env1s_square_coef(i) + print*,List_env1s_square_cent(1:3,i) print*,'' icount += 1 endif enddo - print*,'List_all_comb_b3_coef,icount = ',List_all_comb_b3_size,icount + print*,'List_env1s_square_coef,icount = ',List_env1s_square_size,icount do i = 1, ao_num do j = 1, ao_num do icount = 1, List_comb_thr_b3_size(j,i) @@ -116,11 +80,11 @@ subroutine routine_test_j1b ! enddo enddo enddo - print*,'max_List_comb_thr_b3_size = ',max_List_comb_thr_b3_size,List_all_comb_b3_size + print*,'max_List_comb_thr_b3_size = ',max_List_comb_thr_b3_size,List_env1s_square_size end -subroutine routine_int2_u_grad1u_j1b2 +subroutine routine_int2_u_grad1u_env2 implicit none integer :: i,j,ipoint,k,l double precision :: weight,accu_relat, accu_abs, contrib @@ -136,8 +100,8 @@ subroutine routine_int2_u_grad1u_j1b2 do l = 1, ao_num do i = 1, ao_num do j = 1, ao_num - array(j,i,l,k) += int2_u_grad1u_j1b2_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight - array_ref(j,i,l,k) += int2_u_grad1u_j1b2(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array(j,i,l,k) += int2_u_grad1u_env2_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array_ref(j,i,l,k) += int2_u_grad1u_env2(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight enddo enddo enddo @@ -160,7 +124,7 @@ subroutine routine_int2_u_grad1u_j1b2 enddo print*,'******' print*,'******' - print*,'routine_int2_u_grad1u_j1b2' + print*,'routine_int2_u_grad1u_env2' print*,'accu_abs = ',accu_abs/dble(ao_num)**4 print*,'accu_relat = ',accu_relat/dble(ao_num)**4 @@ -168,7 +132,7 @@ subroutine routine_int2_u_grad1u_j1b2 end -subroutine routine_v_ij_erf_rk_cst_mu_j1b +subroutine routine_v_ij_erf_rk_cst_mu_env implicit none integer :: i,j,ipoint,k,l double precision :: weight,accu_relat, accu_abs, contrib @@ -183,8 +147,8 @@ subroutine routine_v_ij_erf_rk_cst_mu_j1b do l = 1, ao_num do i = 1, ao_num do j = 1, ao_num - array(j,i,l,k) += v_ij_erf_rk_cst_mu_j1b_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight - array_ref(j,i,l,k) += v_ij_erf_rk_cst_mu_j1b(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array(j,i,l,k) += v_ij_erf_rk_cst_mu_env_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array_ref(j,i,l,k) += v_ij_erf_rk_cst_mu_env(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight enddo enddo enddo @@ -207,7 +171,7 @@ subroutine routine_v_ij_erf_rk_cst_mu_j1b enddo print*,'******' print*,'******' - print*,'routine_v_ij_erf_rk_cst_mu_j1b' + print*,'routine_v_ij_erf_rk_cst_mu_env' print*,'accu_abs = ',accu_abs/dble(ao_num)**4 print*,'accu_relat = ',accu_relat/dble(ao_num)**4 @@ -216,7 +180,7 @@ subroutine routine_v_ij_erf_rk_cst_mu_j1b end -subroutine routine_x_v_ij_erf_rk_cst_mu_j1b +subroutine routine_x_v_ij_erf_rk_cst_mu_env implicit none integer :: i,j,ipoint,k,l,m double precision :: weight,accu_relat, accu_abs, contrib @@ -232,8 +196,8 @@ subroutine routine_x_v_ij_erf_rk_cst_mu_j1b do i = 1, ao_num do j = 1, ao_num do m = 1, 3 - array(j,i,l,k) += x_v_ij_erf_rk_cst_mu_j1b_test(j,i,ipoint,m) * aos_grad_in_r_array_transp(m,k,ipoint) * aos_in_r_array(l,ipoint) * weight - array_ref(j,i,l,k) += x_v_ij_erf_rk_cst_mu_j1b (j,i,ipoint,m) * aos_grad_in_r_array_transp(m,k,ipoint) * aos_in_r_array(l,ipoint) * weight + array(j,i,l,k) += x_v_ij_erf_rk_cst_mu_env_test(j,i,ipoint,m) * aos_grad_in_r_array_transp(m,k,ipoint) * aos_in_r_array(l,ipoint) * weight + array_ref(j,i,l,k) += x_v_ij_erf_rk_cst_mu_env (j,i,ipoint,m) * aos_grad_in_r_array_transp(m,k,ipoint) * aos_in_r_array(l,ipoint) * weight enddo enddo enddo @@ -258,7 +222,7 @@ subroutine routine_x_v_ij_erf_rk_cst_mu_j1b print*,'******' print*,'******' - print*,'routine_x_v_ij_erf_rk_cst_mu_j1b' + print*,'routine_x_v_ij_erf_rk_cst_mu_env' print*,'accu_abs = ',accu_abs/dble(ao_num)**4 print*,'accu_relat = ',accu_relat/dble(ao_num)**4 @@ -268,7 +232,7 @@ end -subroutine routine_v_ij_u_cst_mu_j1b_test +subroutine routine_v_ij_u_cst_mu_env_test implicit none integer :: i,j,ipoint,k,l double precision :: weight,accu_relat, accu_abs, contrib @@ -283,8 +247,8 @@ subroutine routine_v_ij_u_cst_mu_j1b_test do l = 1, ao_num do i = 1, ao_num do j = 1, ao_num - array(j,i,l,k) += v_ij_u_cst_mu_j1b_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight - array_ref(j,i,l,k) += v_ij_u_cst_mu_j1b_fit (j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array(j,i,l,k) += v_ij_u_cst_mu_env_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array_ref(j,i,l,k) += v_ij_u_cst_mu_env_fit (j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight enddo enddo enddo @@ -307,15 +271,13 @@ subroutine routine_v_ij_u_cst_mu_j1b_test enddo print*,'******' print*,'******' - print*,'routine_v_ij_u_cst_mu_j1b_test' + print*,'routine_v_ij_u_cst_mu_env_test' print*,'accu_abs = ',accu_abs/dble(ao_num)**4 print*,'accu_relat = ',accu_relat/dble(ao_num)**4 - - end -subroutine routine_int2_grad1u2_grad2u2_j1b2 +subroutine routine_int2_grad1u2_grad2u2_env2 implicit none integer :: i,j,ipoint,k,l integer :: ii , jj @@ -341,17 +303,17 @@ subroutine routine_int2_grad1u2_grad2u2_j1b2 do l = 1, ao_num do i = 1, ao_num do j = 1, ao_num - array(j,i,l,k) += int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight -! !array(j,i,l,k) += int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight -! array_ref(j,i,l,k) += int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array(j,i,l,k) += int2_grad1u2_grad2u2_env2_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight +! !array(j,i,l,k) += int2_grad1u2_grad2u2_env2_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight +! array_ref(j,i,l,k) += int2_grad1u2_grad2u2_env2_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight ! !array(j,i,l,k) += ints(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight -! array_ref(j,i,l,k) += int2_grad1u2_grad2u2_j1b2(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight +! array_ref(j,i,l,k) += int2_grad1u2_grad2u2_env2(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight array_ref(j,i,l,k) += ints(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight -! if(dabs(int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint)).gt.1.d-6)then -! if(dabs(int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint) - int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint)).gt.1.d-6)then +! if(dabs(int2_grad1u2_grad2u2_env2_test(j,i,ipoint)).gt.1.d-6)then +! if(dabs(int2_grad1u2_grad2u2_env2_test(j,i,ipoint) - int2_grad1u2_grad2u2_env2_test(j,i,ipoint)).gt.1.d-6)then ! print*,j,i,ipoint -! print*,int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint) , int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint), dabs(int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint) - int2_grad1u2_grad2u2_j1b2_test(j,i,ipoint)) -! print*,int2_grad1u2_grad2u2_j1b2_test(i,j,ipoint) , int2_grad1u2_grad2u2_j1b2_test(i,j,ipoint), dabs(int2_grad1u2_grad2u2_j1b2_test(i,j,ipoint) - int2_grad1u2_grad2u2_j1b2_test(i,j,ipoint)) +! print*,int2_grad1u2_grad2u2_env2_test(j,i,ipoint) , int2_grad1u2_grad2u2_env2_test(j,i,ipoint), dabs(int2_grad1u2_grad2u2_env2_test(j,i,ipoint) - int2_grad1u2_grad2u2_env2_test(j,i,ipoint)) +! print*,int2_grad1u2_grad2u2_env2_test(i,j,ipoint) , int2_grad1u2_grad2u2_env2_test(i,j,ipoint), dabs(int2_grad1u2_grad2u2_env2_test(i,j,ipoint) - int2_grad1u2_grad2u2_env2_test(i,j,ipoint)) ! stop ! endif ! endif @@ -394,7 +356,7 @@ subroutine routine_int2_grad1u2_grad2u2_j1b2 end -subroutine routine_int2_u2_j1b2 +subroutine routine_int2_u2_env2 implicit none integer :: i,j,ipoint,k,l double precision :: weight,accu_relat, accu_abs, contrib @@ -410,8 +372,8 @@ subroutine routine_int2_u2_j1b2 do l = 1, ao_num do i = 1, ao_num do j = 1, ao_num - array(j,i,l,k) += int2_u2_j1b2_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight - array_ref(j,i,l,k) += int2_u2_j1b2(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array(j,i,l,k) += int2_u2_env2_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array_ref(j,i,l,k) += int2_u2_env2(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight enddo enddo enddo @@ -434,7 +396,7 @@ subroutine routine_int2_u2_j1b2 enddo print*,'******' print*,'******' - print*,'routine_int2_u2_j1b2' + print*,'routine_int2_u2_env2' print*,'accu_abs = ',accu_abs/dble(ao_num)**4 print*,'accu_relat = ',accu_relat/dble(ao_num)**4 @@ -443,7 +405,7 @@ subroutine routine_int2_u2_j1b2 end -subroutine routine_int2_u_grad1u_x_j1b2 +subroutine routine_int2_u_grad1u_x_env2 implicit none integer :: i,j,ipoint,k,l,m double precision :: weight,accu_relat, accu_abs, contrib @@ -460,8 +422,8 @@ subroutine routine_int2_u_grad1u_x_j1b2 do i = 1, ao_num do j = 1, ao_num do m = 1, 3 - array(j,i,l,k) += int2_u_grad1u_x_j1b2_test(j,i,ipoint,m) * aos_grad_in_r_array_transp(m,k,ipoint) * aos_in_r_array(l,ipoint) * weight - array_ref(j,i,l,k) += int2_u_grad1u_x_j1b2 (j,i,ipoint,m) * aos_grad_in_r_array_transp(m,k,ipoint) * aos_in_r_array(l,ipoint) * weight + array(j,i,l,k) += int2_u_grad1u_x_env2_test(j,i,ipoint,m) * aos_grad_in_r_array_transp(m,k,ipoint) * aos_in_r_array(l,ipoint) * weight + array_ref(j,i,l,k) += int2_u_grad1u_x_env2 (j,i,ipoint,m) * aos_grad_in_r_array_transp(m,k,ipoint) * aos_in_r_array(l,ipoint) * weight enddo enddo enddo @@ -485,7 +447,7 @@ subroutine routine_int2_u_grad1u_x_j1b2 enddo print*,'******' print*,'******' - print*,'routine_int2_u_grad1u_x_j1b2' + print*,'routine_int2_u_grad1u_x_env2' print*,'accu_abs = ',accu_abs/dble(ao_num)**4 print*,'accu_relat = ',accu_relat/dble(ao_num)**4 @@ -493,7 +455,7 @@ subroutine routine_int2_u_grad1u_x_j1b2 end -subroutine routine_v_ij_u_cst_mu_j1b +subroutine routine_v_ij_u_cst_mu_env implicit none integer :: i,j,ipoint,k,l double precision :: weight,accu_relat, accu_abs, contrib @@ -509,8 +471,8 @@ subroutine routine_v_ij_u_cst_mu_j1b do l = 1, ao_num do i = 1, ao_num do j = 1, ao_num - array(j,i,l,k) += v_ij_u_cst_mu_j1b_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight - array_ref(j,i,l,k) += v_ij_u_cst_mu_j1b_fit (j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array(j,i,l,k) += v_ij_u_cst_mu_env_test(j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight + array_ref(j,i,l,k) += v_ij_u_cst_mu_env_fit (j,i,ipoint) * aos_in_r_array(k,ipoint) * aos_in_r_array(l,ipoint) * weight enddo enddo enddo @@ -533,7 +495,7 @@ subroutine routine_v_ij_u_cst_mu_j1b enddo print*,'******' print*,'******' - print*,'routine_v_ij_u_cst_mu_j1b' + print*,'routine_v_ij_u_cst_mu_env' print*,'accu_abs = ',accu_abs/dble(ao_num)**4 print*,'accu_relat = ',accu_relat/dble(ao_num)**4 @@ -674,66 +636,10 @@ subroutine test_fock_3e_uhf_mo() ! --- -end subroutine test_fock_3e_uhf_mo +end ! --- -subroutine test_total_grad_lapl - implicit none - integer :: i,j,ipoint,k,l - double precision :: weight,accu_relat, accu_abs, contrib - accu_relat = 0.d0 - accu_abs = 0.d0 - do k = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do j = 1, ao_num - contrib = dabs(tc_grad_and_lapl_ao_test(j,i,l,k) - tc_grad_and_lapl_ao(j,i,l,k)) - accu_abs += contrib - if(dabs(tc_grad_and_lapl_ao(j,i,l,k)).gt.1.d-10)then - accu_relat += contrib/dabs(tc_grad_and_lapl_ao(j,i,l,k)) - endif - enddo - enddo - enddo - enddo - print*,'******' - print*,'******' - print*,' test_total_grad_lapl' - print*,'accu_abs = ',accu_abs/dble(ao_num)**4 - print*,'accu_relat = ',accu_relat/dble(ao_num)**4 - - -end - -subroutine test_total_grad_square - implicit none - integer :: i,j,ipoint,k,l - double precision :: weight,accu_relat, accu_abs, contrib - accu_relat = 0.d0 - accu_abs = 0.d0 - do k = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do j = 1, ao_num - contrib = dabs(tc_grad_square_ao_test(j,i,l,k) - tc_grad_square_ao(j,i,l,k)) - accu_abs += contrib - if(dabs(tc_grad_square_ao(j,i,l,k)).gt.1.d-10)then - accu_relat += contrib/dabs(tc_grad_square_ao(j,i,l,k)) - endif - enddo - enddo - enddo - enddo - print*,'******' - print*,'******' - print*,'test_total_grad_square' - print*,'accu_abs = ',accu_abs/dble(ao_num)**4 - print*,'accu_relat = ',accu_relat/dble(ao_num)**4 - - -end - subroutine test_grid_points_ao implicit none integer :: i,j,ipoint,icount,icount_good, icount_bad,icount_full @@ -748,26 +654,26 @@ subroutine test_grid_points_ao icount_bad = 0 icount_full = 0 do ipoint = 1, n_points_final_grid -! if(dabs(int2_u_grad1u_x_j1b2_test(j,i,ipoint,1)) & -! + dabs(int2_u_grad1u_x_j1b2_test(j,i,ipoint,2)) & -! + dabs(int2_u_grad1u_x_j1b2_test(j,i,ipoint,3)) ) -! if(dabs(int2_u2_j1b2_test(j,i,ipoint)).gt.thr)then +! if(dabs(int2_u_grad1u_x_env2_test(j,i,ipoint,1)) & +! + dabs(int2_u_grad1u_x_env2_test(j,i,ipoint,2)) & +! + dabs(int2_u_grad1u_x_env2_test(j,i,ipoint,3)) ) +! if(dabs(int2_u2_env2_test(j,i,ipoint)).gt.thr)then ! icount += 1 ! endif - if(dabs(v_ij_u_cst_mu_j1b_ng_1_test(j,i,ipoint)).gt.thr*0.1d0)then + if(dabs(v_ij_u_cst_mu_env_ng_1_test(j,i,ipoint)).gt.thr*0.1d0)then icount_full += 1 endif - if(dabs(v_ij_u_cst_mu_j1b_test(j,i,ipoint)).gt.thr)then + if(dabs(v_ij_u_cst_mu_env_test(j,i,ipoint)).gt.thr)then icount += 1 - if(dabs(v_ij_u_cst_mu_j1b_ng_1_test(j,i,ipoint)).gt.thr*0.1d0)then + if(dabs(v_ij_u_cst_mu_env_ng_1_test(j,i,ipoint)).gt.thr*0.1d0)then icount_good += 1 else print*,j,i,ipoint - print*,dabs(v_ij_u_cst_mu_j1b_test(j,i,ipoint)),dabs(v_ij_u_cst_mu_j1b_ng_1_test(j,i,ipoint)),dabs(v_ij_u_cst_mu_j1b_ng_1_test(j,i,ipoint))/dabs(v_ij_u_cst_mu_j1b_test(j,i,ipoint)) + print*,dabs(v_ij_u_cst_mu_env_test(j,i,ipoint)), dabs(v_ij_u_cst_mu_env_ng_1_test(j,i,ipoint)),dabs(v_ij_u_cst_mu_env_ng_1_test(j,i,ipoint))/dabs(v_ij_u_cst_mu_env_test(j,i,ipoint)) icount_bad += 1 endif endif -! if(dabs(v_ij_u_cst_mu_j1b_ng_1_test(j,i,ipoint)).gt.thr)then +! if(dabs(v_ij_u_cst_mu_env_ng_1_test(j,i,ipoint)).gt.thr)then ! endif enddo print*,'' @@ -822,90 +728,6 @@ end ! --- -subroutine test_tc_grad_and_lapl_ao() - - implicit none - integer :: i, j, k, l - double precision :: diff_tot, diff, thr_ih, norm - - thr_ih = 1d-10 - - PROVIDE tc_grad_and_lapl_ao tc_grad_and_lapl_ao_loop - - norm = 0.d0 - diff_tot = 0.d0 - do i = 1, ao_num - do j = 1, ao_num - do k = 1, ao_num - do l = 1, ao_num - - diff = dabs(tc_grad_and_lapl_ao_loop(l,k,j,i) - tc_grad_and_lapl_ao(l,k,j,i)) - if(diff .gt. thr_ih) then - print *, ' difference on ', l, k, j, i - print *, ' loops : ', tc_grad_and_lapl_ao_loop(l,k,j,i) - print *, ' lapack: ', tc_grad_and_lapl_ao (l,k,j,i) - !stop - endif - - norm += dabs(tc_grad_and_lapl_ao_loop(l,k,j,i)) - diff_tot += diff - enddo - enddo - enddo - enddo - - print *, ' diff tot = ', diff_tot / norm - print *, ' norm = ', norm - print *, ' ' - - return - -end - -! --- - -subroutine test_tc_grad_square_ao() - - implicit none - integer :: i, j, k, l - double precision :: diff_tot, diff, thr_ih, norm - - thr_ih = 1d-10 - - PROVIDE tc_grad_square_ao tc_grad_square_ao_loop - - norm = 0.d0 - diff_tot = 0.d0 - do i = 1, ao_num - do j = 1, ao_num - do k = 1, ao_num - do l = 1, ao_num - - diff = dabs(tc_grad_square_ao_loop(l,k,j,i) - tc_grad_square_ao(l,k,j,i)) - if(diff .gt. thr_ih) then - print *, ' difference on ', l, k, j, i - print *, ' loops : ', tc_grad_square_ao_loop(l,k,j,i) - print *, ' lapack: ', tc_grad_square_ao (l,k,j,i) - !stop - endif - - norm += dabs(tc_grad_square_ao_loop(l,k,j,i)) - diff_tot += diff - enddo - enddo - enddo - enddo - - print *, ' diff tot = ', diff_tot / norm - print *, ' norm = ', norm - print *, ' ' - - return - -end - -! --- - subroutine test_two_e_tc_non_hermit_integral() implicit none @@ -973,88 +795,6 @@ end ! --- -subroutine test_tc_grad_square_ao_test() - - implicit none - integer :: i, j, k, l - double precision :: diff_tot, diff, thr_ih, norm - - print*, ' test_tc_grad_square_ao_test ' - - thr_ih = 1d-7 - - PROVIDE tc_grad_square_ao_test tc_grad_square_ao_test_ref - - norm = 0.d0 - diff_tot = 0.d0 - do i = 1, ao_num - do j = 1, ao_num - do k = 1, ao_num - do l = 1, ao_num - - - diff = dabs(tc_grad_square_ao_test(l,k,j,i) - tc_grad_square_ao_test_ref(l,k,j,i)) - if(diff .gt. thr_ih) then - print *, ' difference on ', l, k, j, i - print *, ' new : ', tc_grad_square_ao_test (l,k,j,i) - print *, ' ref : ', tc_grad_square_ao_test_ref(l,k,j,i) - !stop - endif - - norm += dabs(tc_grad_square_ao_test_ref(l,k,j,i)) - diff_tot += diff - enddo - enddo - enddo - enddo - - print *, ' diff tot = ', diff_tot / norm - print *, ' norm = ', norm - print *, ' ' - - return -end - -! --- - - - -subroutine test_old_ints - implicit none - integer :: i,j,k,l - double precision :: old, new, contrib, get_ao_tc_sym_two_e_pot - double precision :: integral_sym , integral_nsym,accu - PROVIDE ao_tc_sym_two_e_pot_in_map - accu = 0.d0 - do j = 1, ao_num - do l= 1, ao_num - do i = 1, ao_num - do k = 1, ao_num -! integral_sym = get_ao_tc_sym_two_e_pot(i, j, k, l, ao_tc_sym_two_e_pot_map) - ! ao_non_hermit_term_chemist(k,i,l,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the AO basis -! integral_nsym = ao_non_hermit_term_chemist(k,i,l,j) -! old = integral_sym + integral_nsym -! old = tc_grad_square_ao(k,i,l,j) + tc_grad_and_lapl_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j) - new = ao_tc_int_chemist_test(k,i,l,j) - old = ao_tc_int_chemist_no_cycle(k,i,l,j) - contrib = dabs(old - new) - if(contrib.gt.1.d-6)then - print*,'problem !!' - print*,i,j,k,l - print*,old, new, contrib - endif - accu += contrib - enddo - enddo - enddo - enddo - print*,'******' - print*,'******' - print*,'in test_old_ints' - print*,'accu = ',accu/dble(ao_num**4) - -end - subroutine test_int2_grad1_u12_ao_test implicit none integer :: i,j,ipoint,m,k,l @@ -1146,7 +886,7 @@ subroutine test_fock_3e_uhf_mo_cs() print *, ' diff tot (%) = ', 100.d0 * diff_tot / norm return -end subroutine test_fock_3e_uhf_mo_cs +end ! --- @@ -1185,7 +925,7 @@ subroutine test_fock_3e_uhf_mo_a() print *, ' diff tot (%) = ', 100.d0 * diff_tot / norm return -end subroutine test_fock_3e_uhf_mo_a +end ! --- @@ -1224,7 +964,7 @@ subroutine test_fock_3e_uhf_mo_b() print *, ' diff tot (%) = ', 100.d0 * diff_tot / norm return -end subroutine test_fock_3e_uhf_mo_b +end ! --- diff --git a/src/ao_one_e_ints/spread_dipole_ao.irp.f b/src/ao_one_e_ints/spread_dipole_ao.irp.f index c52d0548..86469a3f 100644 --- a/src/ao_one_e_ints/spread_dipole_ao.irp.f +++ b/src/ao_one_e_ints/spread_dipole_ao.irp.f @@ -224,7 +224,7 @@ subroutine overlap_bourrin_spread(A_center,B_center,alpha,beta,power_A,power_B,overlap_x,lower_exp_val,dx,nx) BEGIN_DOC ! Computes the following integral : -! int [-infty ; +infty] of [(x-A_center)^(power_A) * (x-B_center)^power_B * exp(-alpha(x-A_center)^2) * exp(-beta(x-B_center)^2) * x ] +! int [-infty ; +infty] of [(x-A_center)^(power_A) * (x-B_center)^power_B * exp(-alpha(x-A_center)^2) * exp(-beta(x-B_center)^2) * x^2 ] ! needed for the dipole and those things END_DOC implicit none diff --git a/src/becke_numerical_grid/grid_becke_vector.irp.f b/src/becke_numerical_grid/grid_becke_vector.irp.f index 0386f3c6..473096d0 100644 --- a/src/becke_numerical_grid/grid_becke_vector.irp.f +++ b/src/becke_numerical_grid/grid_becke_vector.irp.f @@ -55,7 +55,7 @@ END_PROVIDER do j = 1, nucl_num do i = 1, n_points_radial_grid -1 do k = 1, n_points_integration_angular - if(dabs(final_weight_at_r(k,i,j)) < thresh_grid)then + if(dabs(final_weight_at_r(k,i,j)) < thresh_grid) then cycle endif i_count += 1 @@ -67,6 +67,13 @@ END_PROVIDER index_final_points(2,i_count) = i index_final_points(3,i_count) = j index_final_points_reverse(k,i,j) = i_count + + if(final_weight_at_r_vector(i_count) .lt. 0.d0) then + print *, ' !!! WARNING !!!' + print *, ' negative weight !!!!' + print *, i_count, final_weight_at_r_vector(i_count) + stop + endif enddo enddo enddo diff --git a/src/casscf_cipsi/README.rst b/src/casscf_cipsi/README.rst index f84cde75..75c99de2 100644 --- a/src/casscf_cipsi/README.rst +++ b/src/casscf_cipsi/README.rst @@ -4,13 +4,15 @@ casscf |CASSCF| program with the CIPSI algorithm. -Example of inputs ------------------ + +Example of inputs for GROUND STATE calculations +----------------------------------------------- +NOTICE :: FOR EXCITED STATES CALCULATIONS SEE THE FILE "example_casscf_multistate.sh" a) Small active space : standard CASSCF --------------------------------------- Let's do O2 (triplet) in aug-cc-pvdz with the following geometry (xyz format, Bohr units) -3 +2 O 0.0000000000 0.0000000000 -1.1408000000 O 0.0000000000 0.0000000000 1.1408000000 @@ -45,3 +47,4 @@ qp set casscf_cipsi small_active_space False qp run casscf | tee ${EZFIO_FILE}.casscf_large.out # you should find around -149.9046 + diff --git a/src/casscf_cipsi/casscf.irp.f b/src/casscf_cipsi/casscf.irp.f index addca236..d0a26d36 100644 --- a/src/casscf_cipsi/casscf.irp.f +++ b/src/casscf_cipsi/casscf.irp.f @@ -54,14 +54,24 @@ subroutine run call write_time(6) call write_int(6,iteration,'CAS-SCF iteration = ') - call write_double(6,energy,'CAS-SCF energy = ') + call write_double(6,energy,'State-average CAS-SCF energy = ') ! if(n_states == 1)then ! call ezfio_get_casscf_cipsi_energy_pt2(E_PT2) ! call ezfio_get_casscf_cipsi_energy(PT2) + double precision :: delta_E_istate, e_av + e_av = 0.d0 do istate=1,N_states - call write_double(6,E_PT2(istate),'E + PT2 energy = ') - call write_double(6,PT2(istate),' PT2 = ') + e_av += state_average_weight(istate) * Ev(istate) + if(istate.gt.1)then + delta_E_istate = E_PT2(istate) - E_PT2(1) + write(*,'(A6,I2,A18,F16.10)')'state ',istate,' Delta E+PT2 = ',delta_E_istate + endif + write(*,'(A6,I2,A18,F16.10)')'state ',istate,' E + PT2 energy = ',E_PT2(istate) + write(*,'(A6,I2,A18,F16.10)')'state ',istate,' PT2 energy = ',PT2(istate) +! call write_double(6,E_PT2(istate),'E + PT2 energy = ') +! call write_double(6,PT2(istate),' PT2 = ') enddo + call write_double(6,e_av,'State-average CAS-SCF energy bis = ') call write_double(6,pt2_max,' PT2_MAX = ') ! endif @@ -99,8 +109,8 @@ subroutine run mo_coef = NewOrbs mo_occ = occnum - call save_mos if(.not.converged)then + call save_mos iteration += 1 if(norm_grad_vec2.gt.0.01d0)then N_det = N_states diff --git a/src/casscf_cipsi/example_casscf_multistate.sh b/src/casscf_cipsi/example_casscf_multistate.sh new file mode 100755 index 00000000..716c211a --- /dev/null +++ b/src/casscf_cipsi/example_casscf_multistate.sh @@ -0,0 +1,66 @@ +# This is an example for MULTI STATE CALCULATION STATE AVERAGE CASSCF +# We will compute 3 states on the O2 molecule +# The Ground state and 2 degenerate excited states +# Please follow carefully the tuto :) + +##### PREPARING THE EZFIO +# Set the path to your QP2 directory +QP_ROOT=my_fancy_path +source ${QP_ROOT}/quantum_package.rc +# Create the EZFIO folder +qp create_ezfio -b aug-cc-pvdz O2.xyz -m 3 -a -o O2_avdz_multi_state +# Start with ROHF orbitals +qp run scf # ROHF energy : -149.619992871398 +# Freeze the 1s orbitals of the two oxygen +qp set_frozen_core + +##### PREPARING THE ORBITALS WITH NATURAL ORBITALS OF A CIS +# Tell that you want 3 states in your WF +qp set determinants n_states 3 +# Run a CIS wave function to start your calculation +qp run cis | tee ${EZFIO_FILE}.cis_3_states.out # -149.6652601409258 -149.4714726176746 -149.4686165431939 +# Save the STATE AVERAGE natural orbitals for having a balanced description +# This will also order the orbitals according to their occupation number +# Which makes the active space selection easyer ! +qp run save_natorb | tee ${EZFIO_FILE}.natorb_3states.out + +##### PREPARING A CIS GUESS WITHIN THE ACTIVE SPACE +# Set an active space which has the most of important excitations +# and that maintains symmetry : the ACTIVE ORBITALS are from """6 to 13""" + +# YOU FIRST FREEZE THE VIRTUALS THAT ARE NOT IN THE ACTIVE SPACE +# !!!!! WE SET TO "-D" for DELETED !!!! +qp set_mo_class -c "[1-5]" -a "[6-13]" -d "[14-46]" +# You create a guess of CIS type WITHIN THE ACTIVE SPACE +qp run cis | tee ${EZFIO_FILE}.cis_3_states_active_space.out # -149.6515472533511 -149.4622878024821 -149.4622878024817 +# You tell to read the WFT stored (i.e. the guess we just created) +qp set determinants read_wf True + +##### DOING THE CASSCF +### SETTING PROPERLY THE ACTIVE SPACE FOR CASSCF +# You set the active space WITH THE VIRTUAL ORBITALS !!! +# !!!!! NOW WE SET TO "-v" for VIRTUALS !!!!! +qp set_mo_class -c "[1-5]" -a "[6-13]" -v "[14-46]" + +# You tell that it is a small actice space so the CIPSI can take all Slater determinants +qp set casscf_cipsi small_active_space True +# You specify the output file +output=${EZFIO_FILE}.casscf_3states.out +# You run the CASSCF calculation +qp run casscf | tee ${output} # -149.7175867510 -149.5059010227 -149.5059010226 + +# Some grep in order to get some numbers useful to check convergence +# State average energy +grep "State-average CAS-SCF energy =" $output | cut -d "=" -f 2 > data_e_average +# Delta E anticipated for State-average energy, only usefull to check convergence +grep "Predicted energy improvement =" $output | cut -d "=" -f 2 > data_improve +# Ground state energy +grep "state 1 E + PT2 energy" $output | cut -d "=" -f 2 > data_1 +# First excited state energy +grep "state 2 E + PT2 energy" $output | cut -d "=" -f 2 > data_2 +# First excitation energy +grep "state 2 Delta E+PT2" $output | cut -d "=" -f 2 > data_delta_E2 +# Second excited state energy +grep "state 3 E + PT2 energy" $output | cut -d "=" -f 2 > data_3 +# Second excitation energy +grep "state 3 Delta E+PT2" $output | cut -d "=" -f 2 > data_delta_E3 diff --git a/src/casscf_cipsi/neworbs.irp.f b/src/casscf_cipsi/neworbs.irp.f index a7cebbb2..ca2deebb 100644 --- a/src/casscf_cipsi/neworbs.irp.f +++ b/src/casscf_cipsi/neworbs.irp.f @@ -226,27 +226,28 @@ BEGIN_PROVIDER [real*8, Umat, (mo_num,mo_num) ] end do ! Form the exponential + call exp_matrix_taylor(Tmat,mo_num,Umat,converged) - Tpotmat(:,:)=0.D0 - Umat(:,:) =0.D0 - do i=1,mo_num - Tpotmat(i,i)=1.D0 - Umat(i,i) =1.d0 - end do - iter=0 - converged=.false. - do while (.not.converged) - iter+=1 - f = 1.d0 / dble(iter) - Tpotmat2(:,:) = Tpotmat(:,:) * f - call dgemm('N','N', mo_num,mo_num,mo_num,1.d0, & - Tpotmat2, size(Tpotmat2,1), & - Tmat, size(Tmat,1), 0.d0, & - Tpotmat, size(Tpotmat,1)) - Umat(:,:) = Umat(:,:) + Tpotmat(:,:) - - converged = ( sum(abs(Tpotmat(:,:))) < 1.d-6).or.(iter>30) - end do +! Tpotmat(:,:)=0.D0 +! Umat(:,:) =0.D0 +! do i=1,mo_num +! Tpotmat(i,i)=1.D0 +! Umat(i,i) =1.d0 +! end do +! iter=0 +! converged=.false. +! do while (.not.converged) +! iter+=1 +! f = 1.d0 / dble(iter) +! Tpotmat2(:,:) = Tpotmat(:,:) * f +! call dgemm('N','N', mo_num,mo_num,mo_num,1.d0, & +! Tpotmat2, size(Tpotmat2,1), & +! Tmat, size(Tmat,1), 0.d0, & +! Tpotmat, size(Tpotmat,1)) +! Umat(:,:) = Umat(:,:) + Tpotmat(:,:) +! +! converged = ( sum(abs(Tpotmat(:,:))) < 1.d-6).or.(iter>30) +! end do END_PROVIDER diff --git a/src/dav_general_mat/dav_ext_rout_nonsym_B1space.irp.f b/src/dav_general_mat/dav_ext_rout_nonsym_B1space.irp.f index 4b7b9cc9..d89aaadb 100644 --- a/src/dav_general_mat/dav_ext_rout_nonsym_B1space.irp.f +++ b/src/dav_general_mat/dav_ext_rout_nonsym_B1space.irp.f @@ -346,7 +346,7 @@ subroutine davidson_general_ext_rout_nonsym_b1space(u_in, H_jj, energies, sze, N endif if(i_omax(l) .ne. l) then - print *, ' !!! WARNONG !!!' + print *, ' !!! WARNING !!!' print *, ' index of state', l, i_omax(l) endif enddo diff --git a/src/davidson/u0_wee_u0.irp.f b/src/davidson/u0_wee_u0.irp.f index 0c543aca..bd3525e1 100644 --- a/src/davidson/u0_wee_u0.irp.f +++ b/src/davidson/u0_wee_u0.irp.f @@ -492,3 +492,25 @@ subroutine u_0_H_u_0_two_e(e_0,u_0,n,keys_tmp,Nint,N_st,sze) deallocate (s_0, v_0) end +BEGIN_PROVIDER [double precision, psi_energy_two_e_trans, (N_states, N_states)] + implicit none + BEGIN_DOC +! psi_energy_two_e_trans(istate,jstate) = + END_dOC + integer :: i,j,istate,jstate + double precision :: hij, coef_i, coef_j + psi_energy_two_e_trans = 0.d0 + do i = 1, N_det + do j = 1, N_det + call i_H_j_two_e(psi_det(1,1,i),psi_det(1,1,j),N_int,hij) + do istate = 1, N_states + coef_i = psi_coef(i,istate) + do jstate = 1, N_states + coef_j = psi_coef(j,jstate) + psi_energy_two_e_trans(jstate,istate) += coef_i * coef_j * hij + enddo + enddo + enddo + enddo + +END_PROVIDER diff --git a/src/determinants/generate_cas_space.irp.f b/src/determinants/generate_cas_space.irp.f new file mode 100644 index 00000000..47a2ca30 --- /dev/null +++ b/src/determinants/generate_cas_space.irp.f @@ -0,0 +1,87 @@ +subroutine generate_cas_space + use bitmasks + implicit none + BEGIN_DOC +! Generates the CAS space + END_DOC + integer :: i, sze, ncore, n_alpha_act, n_beta_act + integer(bit_kind) :: o(N_int) + integer(bit_kind) :: u + integer :: mo_list(elec_alpha_num) + + integer :: k,n,m + integer(bit_kind) :: t, t1, t2 + + call list_to_bitstring(o, list_core_inact, n_core_inact_orb, N_int) + + ! Count number of active electrons + n_alpha_act = 0 + n_beta_act = 0 + do i=1, n_act_orb + if (list_act(i) <= elec_alpha_num) then + n_alpha_act += 1 + endif + if (list_act(i) <= elec_beta_num) then + n_beta_act += 1 + endif + enddo + if (n_act_orb > 64) then + stop 'More than 64 active MOs' + endif + + print *, '' + print *, 'CAS(', n_alpha_act+n_beta_act, ', ', n_act_orb, ')' + print *, '' + + n_det_alpha_unique = binom_int(n_act_orb, n_alpha_act) + TOUCH n_det_alpha_unique + + n = n_alpha_act + u = shiftl(1_bit_kind,n) - 1_bit_kind + + k=0 + do while (u < shiftl(1_bit_kind,n_act_orb)) + k = k+1 + call bitstring_to_list(u, mo_list, m, 1) + do i=1,m + mo_list(i) = list_act( mo_list(i) ) + enddo + call list_to_bitstring(psi_det_alpha_unique(1,k), mo_list, m, N_int) + do i=1,N_int + psi_det_alpha_unique(i,k) = ior(psi_det_alpha_unique(i,k), o(i)) + enddo + t = ior(u,u-1) + t1 = t+1 + t2 = shiftr((iand(not(t),t1)-1), trailz(u)+1) + u = ior(t1,t2) + enddo + + n_det_beta_unique = binom_int(n_act_orb, n_beta_act) + TOUCH n_det_beta_unique + + n = n_beta_act + u = shiftl(1_bit_kind,n) -1_bit_kind + + k=0 + do while (u < shiftl(1_bit_kind,n_act_orb)) + k = k+1 + call bitstring_to_list(u, mo_list, m, 1) + do i=1,m + mo_list(i) = list_act( mo_list(i) ) + enddo + call list_to_bitstring(psi_det_beta_unique(1,k), mo_list, m, N_int) + do i=1,N_int + psi_det_beta_unique(i,k) = ior(psi_det_beta_unique(i,k), o(i)) + enddo + t = ior(u,u-1) + t1 = t+1 + t2 = shiftr((iand(not(t),t1)-1), trailz(u)+1) + u = ior(t1,t2) + enddo + + call generate_all_alpha_beta_det_products + + print *, 'Ndet = ', N_det + +end + diff --git a/src/dft_utils_in_r/ao_prod_mlti_pl.irp.f b/src/dft_utils_in_r/ao_prod_mlti_pl.irp.f index 39ea0cdf..dac7c1cc 100644 --- a/src/dft_utils_in_r/ao_prod_mlti_pl.irp.f +++ b/src/dft_utils_in_r/ao_prod_mlti_pl.irp.f @@ -149,7 +149,3 @@ BEGIN_PROVIDER [ double precision, ao_prod_dist_grid, (ao_num, ao_num, n_points_ END_PROVIDER -!BEGIN_PROVIDER [ double precision, ao_abs_prod_j1b, (ao_num, ao_num)] -! implicit none -! -!END_PROVIDER diff --git a/src/hamiltonian/EZFIO.cfg b/src/hamiltonian/EZFIO.cfg index 672bfdfa..9b51c560 100644 --- a/src/hamiltonian/EZFIO.cfg +++ b/src/hamiltonian/EZFIO.cfg @@ -5,4 +5,3 @@ interface: ezfio,provider,ocaml default: 0.5 ezfio_name: mu_erf - diff --git a/src/hamiltonian/NEED b/src/hamiltonian/NEED index e69de29b..f1c051ff 100644 --- a/src/hamiltonian/NEED +++ b/src/hamiltonian/NEED @@ -0,0 +1,2 @@ +ezfio_files +nuclei diff --git a/src/hartree_fock/print_scf_int.irp.f b/src/hartree_fock/print_scf_int.irp.f new file mode 100644 index 00000000..ee7590f6 --- /dev/null +++ b/src/hartree_fock/print_scf_int.irp.f @@ -0,0 +1,114 @@ + +program print_scf_int + + call main() + +end + +subroutine main() + + implicit none + integer :: i, j, k, l + + print *, " Hcore:" + do j = 1, ao_num + do i = 1, ao_num + print *, i, j, ao_one_e_integrals(i,j) + enddo + enddo + + print *, " P:" + do j = 1, ao_num + do i = 1, ao_num + print *, i, j, SCF_density_matrix_ao_alpha(i,j) + enddo + enddo + + + double precision :: integ, density_a, density_b, density + double precision :: J_scf(ao_num, ao_num) + double precision :: K_scf(ao_num, ao_num) + + + double precision, external :: get_ao_two_e_integral + PROVIDE ao_integrals_map + + print *, " J:" + !do j = 1, ao_num + ! do l = 1, ao_num + ! do i = 1, ao_num + ! do k = 1, ao_num + ! ! < 1:k, 2:l | 1:i, 2:j > + ! print *, '< k l | i j >', k, l, i, j + ! print *, get_ao_two_e_integral(i, j, k, l, ao_integrals_map) + ! enddo + ! enddo + ! enddo + !enddo + + !do k = 1, ao_num + ! do i = 1, ao_num + ! do j = 1, ao_num + ! do l = 1, ao_num + ! ! ( 1:k, 1:i | 2:l, 2:j ) + ! print *, '(k i | l j)', k, i, l, j + ! print *, get_ao_two_e_integral(l, j, k, i, ao_integrals_map) + ! enddo + ! enddo + ! print *, '' + ! enddo + !enddo + + J_scf = 0.d0 + K_scf = 0.d0 + do i = 1, ao_num + do k = 1, ao_num + do j = 1, ao_num + do l = 1, ao_num + + density_a = SCF_density_matrix_ao_alpha(l,j) + density_b = SCF_density_matrix_ao_beta (l,j) + density = density_a + density_b + + integ = get_ao_two_e_integral(l, j, k, i, ao_integrals_map) + J_scf(k,i) += density * integ + integ = get_ao_two_e_integral(l, i, k, j, ao_integrals_map) + K_scf(k,i) -= density_a * integ + enddo + enddo + enddo + enddo + + print *, 'J x P' + do i = 1, ao_num + do k = 1, ao_num + print *, k, i, J_scf(k,i) + enddo + enddo + + print *, '' + print *, 'K x P' + do i = 1, ao_num + do k = 1, ao_num + print *, k, i, K_scf(k,i) + enddo + enddo + + print *, '' + print *, 'F in AO' + do i = 1, ao_num + do k = 1, ao_num + print *, k, i, Fock_matrix_ao(k,i) + enddo + enddo + + print *, '' + print *, 'F in MO' + do i = 1, ao_num + do k = 1, ao_num + print *, k, i, 2.d0 * Fock_matrix_mo_alpha(k,i) + enddo + enddo + +end + diff --git a/src/mol_properties/EZFIO.cfg b/src/mol_properties/EZFIO.cfg index 35a095fb..3ddba227 100644 --- a/src/mol_properties/EZFIO.cfg +++ b/src/mol_properties/EZFIO.cfg @@ -21,3 +21,10 @@ type: logical doc: If true and N_states > 1, the oscillator strength will be computed interface: ezfio,provider,ocaml default: false + +[calc_energy_components] +type: logical +doc: If true, the components of the energy (1e, 2e, kinetic) will be computed +interface: ezfio,provider,ocaml +default: false + diff --git a/src/two_body_rdm/print_e_components.irp.f b/src/mol_properties/print_e_components.irp.f similarity index 100% rename from src/two_body_rdm/print_e_components.irp.f rename to src/mol_properties/print_e_components.irp.f diff --git a/src/mol_properties/print_mol_properties.irp.f b/src/mol_properties/print_mol_properties.irp.f index 3753a3dd..00ccb826 100644 --- a/src/mol_properties/print_mol_properties.irp.f +++ b/src/mol_properties/print_mol_properties.irp.f @@ -6,6 +6,11 @@ subroutine print_mol_properties() ! Run the propertie calculations END_DOC + ! Energy components + if (calc_energy_components) then + call print_energy_components + endif + ! Electric dipole moment if (calc_dipole_moment) then call print_dipole_moment @@ -18,7 +23,7 @@ subroutine print_mol_properties() ! Oscillator strength if (calc_osc_str .and. N_states > 1) then - call print_oscillator_strength + call print_oscillator_strength endif end diff --git a/src/tools/cas_complete.irp.f b/src/tools/cas_complete.irp.f new file mode 100644 index 00000000..301c9979 --- /dev/null +++ b/src/tools/cas_complete.irp.f @@ -0,0 +1,13 @@ +program cas_complete + implicit none + BEGIN_DOC +! Diagonalizes the Hamiltonian in the complete active space + END_DOC + + call generate_cas_space + call diagonalize_ci + call save_wavefunction + +end + + diff --git a/src/two_body_rdm/act_2_transition_rdm.irp.f b/src/two_body_rdm/act_2_transition_rdm.irp.f new file mode 100644 index 00000000..612213e2 --- /dev/null +++ b/src/two_body_rdm/act_2_transition_rdm.irp.f @@ -0,0 +1,39 @@ + BEGIN_PROVIDER [double precision, act_2_rdm_trans_spin_trace_mo, (n_act_orb,n_act_orb,n_act_orb,n_act_orb,N_states,N_states)] + implicit none + BEGIN_DOC +! act_2_rdm_trans_spin_trace_mo(i,j,k,l,istate,jstate) = STATE SPECIFIC physicist notation for 2rdm_trans +! +! \sum_{\sigma,\sigma'} +! +! WHERE ALL ORBITALS (i,j,k,l) BELONGS TO AN ACTIVE SPACE DEFINED BY "list_act" +! +! THE NORMALIZATION (i.e. sum of diagonal elements) IS SET TO N_{elec}^{act} * (N_{elec}^{act} - 1) +! +! !!!!! WARNING !!!!! ALL SLATER DETERMINANTS IN PSI_DET MUST BELONG TO AN ACTIVE SPACE DEFINED BY "list_act" + END_DOC + integer :: ispin + double precision :: wall_1, wall_2 + ! condition for beta/beta spin + print*,'' + print*,'Providing act_2_rdm_trans_spin_trace_mo ' + character*(128) :: name_file + name_file = 'act_2_rdm_trans_spin_trace_mo' + ispin = 4 + act_2_rdm_trans_spin_trace_mo = 0.d0 + call wall_time(wall_1) +! if(read_two_body_rdm_trans_spin_trace)then +! print*,'Reading act_2_rdm_trans_spin_trace_mo from disk ...' +! call read_array_two_rdm_trans(n_act_orb,N_states,act_2_rdm_trans_spin_trace_mo,name_file) +! else + call orb_range_2_trans_rdm_openmp(act_2_rdm_trans_spin_trace_mo,n_act_orb,n_act_orb,list_act,ispin,psi_coef,size(psi_coef,2),size(psi_coef,1)) +! endif +! if(write_two_body_rdm_trans_spin_trace)then +! print*,'Writing act_2_rdm_trans_spin_trace_mo on disk ...' +! call write_array_two_rdm_trans(n_act_orb,n_states,act_2_rdm_trans_spin_trace_mo,name_file) +! call ezfio_set_two_body_rdm_trans_io_two_body_rdm_trans_spin_trace("Read") +! endif + + act_2_rdm_trans_spin_trace_mo *= 2.d0 + call wall_time(wall_2) + print*,'Wall time to provide act_2_rdm_trans_spin_trace_mo',wall_2 - wall_1 + END_PROVIDER diff --git a/src/two_body_rdm/example.irp.f b/src/two_body_rdm/example.irp.f index 30e2685a..38510fe9 100644 --- a/src/two_body_rdm/example.irp.f +++ b/src/two_body_rdm/example.irp.f @@ -365,3 +365,91 @@ subroutine routine_full_mos end + +subroutine routine_active_only_trans + implicit none + integer :: i,j,k,l,iorb,jorb,korb,lorb,istate,jstate + BEGIN_DOC +! This routine computes the two electron repulsion within the active space using various providers +! + END_DOC + + double precision :: vijkl,get_two_e_integral + double precision :: wee_tot(N_states,N_states),rdm_transtot + double precision :: spin_trace + double precision :: accu_tot + + wee_tot = 0.d0 + + + iorb = 1 + jorb = 1 + korb = 1 + lorb = 1 + vijkl = get_two_e_integral(lorb,korb,jorb,iorb,mo_integrals_map) + provide act_2_rdm_trans_spin_trace_mo + i = 1 + j = 2 + + print*,'**************************' + print*,'**************************' + do jstate = 1, N_states + do istate = 1, N_states + !! PURE ACTIVE PART + !! + accu_tot = 0.d0 + do i = 1, n_act_orb + iorb = list_act(i) + do j = 1, n_act_orb + jorb = list_act(j) + do k = 1, n_act_orb + korb = list_act(k) + do l = 1, n_act_orb + lorb = list_act(l) + ! 1 2 1 2 2 1 2 1 +! if(dabs(act_2_rdm_trans_spin_trace_mo(i,j,k,l,istate,jstate) - act_2_rdm_trans_spin_trace_mo(j,i,l,k,istate,jstate)).gt.1.d-10)then +! print*,'Error in act_2_rdm_trans_spin_trace_mo' +! print*,"dabs(act_2_rdm_trans_spin_trace_mo(i,j,k,l) - act_2_rdm_trans_spin_trace_mo(j,i,l,k)).gt.1.d-10" +! print*,i,j,k,l +! print*,act_2_rdm_trans_spin_trace_mo(i,j,k,l,istate,jstate),act_2_rdm_trans_spin_trace_mo(j,i,l,k,istate,jstate),dabs(act_2_rdm_trans_spin_trace_mo(i,j,k,l,istate,jstate) - act_2_rdm_trans_spin_trace_mo(j,i,l,k,istate,jstate)) +! endif + + ! 1 2 1 2 1 2 1 2 +! if(dabs(act_2_rdm_trans_spin_trace_mo(i,j,k,l,istate,jstate) - act_2_rdm_trans_spin_trace_mo(k,l,i,j,istate,jstate)).gt.1.d-10)then +! print*,'Error in act_2_rdm_trans_spin_trace_mo' +! print*,"dabs(act_2_rdm_trans_spin_trace_mo(i,j,k,l,istate,jstate) - act_2_rdm_trans_spin_trace_mo(k,l,i,j,istate,jstate)).gt.1.d-10" +! print*,i,j,k,l +! print*,act_2_rdm_trans_spin_trace_mo(i,j,k,l,istate,jstate),act_2_rdm_trans_spin_trace_mo(k,l,i,j,istate,jstate),dabs(act_2_rdm_trans_spin_trace_mo(i,j,k,l,istate,jstate) - act_2_rdm_trans_spin_trace_mo(k,l,i,j,istate,jstate)) +! endif + + vijkl = get_two_e_integral(lorb,korb,jorb,iorb,mo_integrals_map) + + + rdm_transtot = act_2_rdm_trans_spin_trace_mo(l,k,j,i,istate,jstate) + + wee_tot(istate,jstate) += 0.5d0 * vijkl * rdm_transtot + + enddo + enddo + enddo + enddo + print*,'' + print*,'' + print*,'Active space only energy for state ',istate,jstate + print*,'wee_tot = ',wee_tot(istate,jstate) + print*,'Full energy ' + print*,'psi_energy_two_e(istate,jstate)= ',psi_energy_two_e_trans(istate,jstate) + print*,'--------------------------' + enddo + enddo + print*,'Wee from DM ' + do istate = 1,N_states + write(*,'(100(F16.10,X))')wee_tot(1:N_states,istate) + enddo + print*,'Wee from Psi det' + do istate = 1,N_states + write(*,'(100(F16.10,X))')psi_energy_two_e_trans(1:N_states,istate) + enddo + +end + diff --git a/src/two_body_rdm/io_two_rdm.irp.f b/src/two_body_rdm/io_two_rdm.irp.f index bdd8a4f9..0b30d76f 100644 --- a/src/two_body_rdm/io_two_rdm.irp.f +++ b/src/two_body_rdm/io_two_rdm.irp.f @@ -31,3 +31,37 @@ subroutine read_array_two_rdm(n_orb,nstates,array_tmp,name_file) close(unit=i_unit_output) end + +subroutine write_array_two_trans_rdm(n_orb,nstates,array_tmp,name_file) + implicit none + integer, intent(in) :: n_orb,nstates + character*(128), intent(in) :: name_file + double precision, intent(in) :: array_tmp(n_orb,n_orb,n_orb,n_orb,nstates,nstates) + + character*(128) :: output + integer :: i_unit_output,getUnitAndOpen + PROVIDE ezfio_filename + output=trim(ezfio_filename)//'/work/'//trim(name_file) + i_unit_output = getUnitAndOpen(output,'W') + call lock_io() + write(i_unit_output)array_tmp + call unlock_io() + close(unit=i_unit_output) +end + +subroutine read_array_two_trans_rdm(n_orb,nstates,array_tmp,name_file) + implicit none + character*(128) :: output + integer :: i_unit_output,getUnitAndOpen + integer, intent(in) :: n_orb,nstates + character*(128), intent(in) :: name_file + double precision, intent(out) :: array_tmp(n_orb,n_orb,n_orb,n_orb,N_states,nstates) + PROVIDE ezfio_filename + output=trim(ezfio_filename)//'/work/'//trim(name_file) + i_unit_output = getUnitAndOpen(output,'R') + call lock_io() + read(i_unit_output)array_tmp + call unlock_io() + close(unit=i_unit_output) +end + diff --git a/src/two_body_rdm/test_2_rdm.irp.f b/src/two_body_rdm/test_2_rdm.irp.f index 123261d8..de2606a7 100644 --- a/src/two_body_rdm/test_2_rdm.irp.f +++ b/src/two_body_rdm/test_2_rdm.irp.f @@ -4,5 +4,6 @@ program test_2_rdm touch read_wf call routine_active_only call routine_full_mos + call routine_active_only_trans end diff --git a/src/two_rdm_routines/davidson_like_trans_2rdm.irp.f b/src/two_rdm_routines/davidson_like_trans_2rdm.irp.f new file mode 100644 index 00000000..9e68a0e1 --- /dev/null +++ b/src/two_rdm_routines/davidson_like_trans_2rdm.irp.f @@ -0,0 +1,585 @@ +subroutine orb_range_2_trans_rdm_openmp(big_array,dim1,norb,list_orb,ispin,u_0,N_st,sze) + use bitmasks + implicit none + BEGIN_DOC + ! if ispin == 1 :: alpha/alpha 2_rdm + ! == 2 :: beta /beta 2_rdm + ! == 3 :: alpha/beta + beta/alpha 2trans_rdm + ! == 4 :: spin traced 2_rdm :: aa + bb + ab + ba + ! + ! notice that here it is the TRANSITION RDM THAT IS COMPUTED + ! + ! THE DIAGONAL PART IS THE USUAL ONE FOR A GIVEN STATE + ! Assumes that the determinants are in psi_det + ! + ! istart, iend, ishift, istep are used in ZMQ parallelization. + END_DOC + integer, intent(in) :: N_st,sze + integer, intent(in) :: dim1,norb,list_orb(norb),ispin + double precision, intent(inout) :: big_array(dim1,dim1,dim1,dim1,N_st,N_st) + double precision, intent(in) :: u_0(sze,N_st) + + integer :: k + double precision, allocatable :: u_t(:,:) + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: u_t + PROVIDE mo_two_e_integrals_in_map + allocate(u_t(N_st,N_det)) + do k=1,N_st + call dset_order(u_0(1,k),psi_bilinear_matrix_order,N_det) + enddo + call dtranspose( & + u_0, & + size(u_0, 1), & + u_t, & + size(u_t, 1), & + N_det, N_st) + + call orb_range_2_trans_rdm_openmp_work(big_array,dim1,norb,list_orb,ispin,u_t,N_st,sze,1,N_det,0,1) + deallocate(u_t) + + do k=1,N_st + call dset_order(u_0(1,k),psi_bilinear_matrix_order_reverse,N_det) + enddo + +end + +subroutine orb_range_2_trans_rdm_openmp_work(big_array,dim1,norb,list_orb,ispin,u_t,N_st,sze,istart,iend,ishift,istep) + use bitmasks + implicit none + BEGIN_DOC + ! Computes two-trans_rdm + ! + ! Default should be 1,N_det,0,1 + END_DOC + integer, intent(in) :: N_st,sze,istart,iend,ishift,istep + integer, intent(in) :: dim1,norb,list_orb(norb),ispin + double precision, intent(inout) :: big_array(dim1,dim1,dim1,dim1,N_st,N_st) + double precision, intent(in) :: u_t(N_st,N_det) + + integer :: k + + PROVIDE N_int + + select case (N_int) + case (1) + call orb_range_2_trans_rdm_openmp_work_1(big_array,dim1,norb,list_orb,ispin,u_t,N_st,sze,istart,iend,ishift,istep) + case (2) + call orb_range_2_trans_rdm_openmp_work_2(big_array,dim1,norb,list_orb,ispin,u_t,N_st,sze,istart,iend,ishift,istep) + case (3) + call orb_range_2_trans_rdm_openmp_work_3(big_array,dim1,norb,list_orb,ispin,u_t,N_st,sze,istart,iend,ishift,istep) + case (4) + call orb_range_2_trans_rdm_openmp_work_4(big_array,dim1,norb,list_orb,ispin,u_t,N_st,sze,istart,iend,ishift,istep) + case default + call orb_range_2_trans_rdm_openmp_work_N_int(big_array,dim1,norb,list_orb,ispin,u_t,N_st,sze,istart,iend,ishift,istep) + end select +end + + + BEGIN_TEMPLATE +subroutine orb_range_2_trans_rdm_openmp_work_$N_int(big_array,dim1,norb,list_orb,ispin,u_t,N_st,sze,istart,iend,ishift,istep) + use bitmasks + use omp_lib + implicit none + BEGIN_DOC + ! Computes the two trans_rdm for the N_st vectors |u_t> + ! if ispin == 1 :: alpha/alpha 2trans_rdm + ! == 2 :: beta /beta 2trans_rdm + ! == 3 :: alpha/beta 2trans_rdm + ! == 4 :: spin traced 2trans_rdm :: aa + bb + 0.5 (ab + ba)) + ! The 2trans_rdm will be computed only on the list of orbitals list_orb, which contains norb + ! Default should be 1,N_det,0,1 for istart,iend,ishift,istep + END_DOC + integer, intent(in) :: N_st,sze,istart,iend,ishift,istep + double precision, intent(in) :: u_t(N_st,N_det) + integer, intent(in) :: dim1,norb,list_orb(norb),ispin + double precision, intent(inout) :: big_array(dim1,dim1,dim1,dim1,N_st,N_st) + + integer(omp_lock_kind) :: lock_2trans_rdm + integer :: i,j,k,l + integer :: k_a, k_b, l_a, l_b + integer :: krow, kcol + integer :: lrow, lcol + integer(bit_kind) :: spindet($N_int) + integer(bit_kind) :: tmp_det($N_int,2) + integer(bit_kind) :: tmp_det2($N_int,2) + integer(bit_kind) :: tmp_det3($N_int,2) + integer(bit_kind), allocatable :: buffer(:,:) + integer :: n_doubles + integer, allocatable :: doubles(:) + integer, allocatable :: singles_a(:) + integer, allocatable :: singles_b(:) + integer, allocatable :: idx(:), idx0(:) + integer :: maxab, n_singles_a, n_singles_b, kcol_prev + + logical :: alpha_alpha,beta_beta,alpha_beta,spin_trace + integer(bit_kind) :: orb_bitmask($N_int) + integer :: list_orb_reverse(mo_num) + integer, allocatable :: keys(:,:) + double precision, allocatable :: values(:,:,:) + integer :: nkeys,sze_buff + integer :: ll + alpha_alpha = .False. + beta_beta = .False. + alpha_beta = .False. + spin_trace = .False. + if( ispin == 1)then + alpha_alpha = .True. + else if(ispin == 2)then + beta_beta = .True. + else if(ispin == 3)then + alpha_beta = .True. + else if(ispin == 4)then + spin_trace = .True. + else + print*,'Wrong parameter for ispin in general_2_trans_rdm_state_av_openmp_work' + print*,'ispin = ',ispin + stop + endif + + + PROVIDE N_int + + call list_to_bitstring( orb_bitmask, list_orb, norb, N_int) + sze_buff = 6 * norb + elec_alpha_num * elec_alpha_num * 60 + list_orb_reverse = -1000 + do i = 1, norb + list_orb_reverse(list_orb(i)) = i + enddo + maxab = max(N_det_alpha_unique, N_det_beta_unique)+1 + allocate(idx0(maxab)) + + do i=1,maxab + idx0(i) = i + enddo + call omp_init_lock(lock_2trans_rdm) + + ! Prepare the array of all alpha single excitations + ! ------------------------------------------------- + + PROVIDE N_int nthreads_davidson elec_alpha_num + !$OMP PARALLEL DEFAULT(NONE) NUM_THREADS(nthreads_davidson) & + !$OMP SHARED(psi_bilinear_matrix_rows, N_det,lock_2trans_rdm,& + !$OMP psi_bilinear_matrix_columns, & + !$OMP psi_det_alpha_unique, psi_det_beta_unique,& + !$OMP n_det_alpha_unique, n_det_beta_unique, N_int,& + !$OMP psi_bilinear_matrix_transp_rows, & + !$OMP psi_bilinear_matrix_transp_columns, & + !$OMP psi_bilinear_matrix_transp_order, N_st, & + !$OMP psi_bilinear_matrix_order_transp_reverse, & + !$OMP psi_bilinear_matrix_columns_loc, & + !$OMP psi_bilinear_matrix_transp_rows_loc,elec_alpha_num, & + !$OMP istart, iend, istep, irp_here,list_orb_reverse, n_states, dim1, & + !$OMP ishift, idx0, u_t, maxab, alpha_alpha,beta_beta,alpha_beta,spin_trace,ispin,big_array,sze_buff,orb_bitmask) & + !$OMP PRIVATE(krow, kcol, tmp_det, spindet, k_a, k_b, i,c_1, & + !$OMP lcol, lrow, l_a, l_b, & + !$OMP buffer, doubles, n_doubles, & + !$OMP tmp_det2, idx, l, kcol_prev, & + !$OMP singles_a, n_singles_a, singles_b, & + !$OMP n_singles_b, nkeys, keys, values) + + ! Alpha/Beta double excitations + ! ============================= + nkeys = 0 + allocate( keys(4,sze_buff), values(n_st,n_st,sze_buff)) + allocate( buffer($N_int,maxab), & + singles_a(maxab), & + singles_b(maxab), & + doubles(maxab), & + idx(maxab)) + + kcol_prev=-1 + + ASSERT (iend <= N_det) + ASSERT (istart > 0) + ASSERT (istep > 0) + + !$OMP DO SCHEDULE(dynamic,64) + do k_a=istart+ishift,iend,istep + + krow = psi_bilinear_matrix_rows(k_a) + ASSERT (krow <= N_det_alpha_unique) + + kcol = psi_bilinear_matrix_columns(k_a) + ASSERT (kcol <= N_det_beta_unique) + + tmp_det(1:$N_int,1) = psi_det_alpha_unique(1:$N_int, krow) + tmp_det(1:$N_int,2) = psi_det_beta_unique (1:$N_int, kcol) + + if (kcol /= kcol_prev) then + call get_all_spin_singles_$N_int( & + psi_det_beta_unique, idx0, & + tmp_det(1,2), N_det_beta_unique, & + singles_b, n_singles_b) + endif + kcol_prev = kcol + + ! Loop over singly excited beta columns + ! ------------------------------------- + + do i=1,n_singles_b + lcol = singles_b(i) + + tmp_det2(1:$N_int,2) = psi_det_beta_unique(1:$N_int, lcol) + + l_a = psi_bilinear_matrix_columns_loc(lcol) + ASSERT (l_a <= N_det) + + do j=1,psi_bilinear_matrix_columns_loc(lcol+1) - l_a + lrow = psi_bilinear_matrix_rows(l_a) + ASSERT (lrow <= N_det_alpha_unique) + + buffer(1:$N_int,j) = psi_det_alpha_unique(1:$N_int, lrow) + + ASSERT (l_a <= N_det) + idx(j) = l_a + l_a = l_a+1 + enddo + j = j-1 + + call get_all_spin_singles_$N_int( & + buffer, idx, tmp_det(1,1), j, & + singles_a, n_singles_a ) + + ! Loop over alpha singles + ! ----------------------- + + if(alpha_beta.or.spin_trace)then + do k = 1,n_singles_a + l_a = singles_a(k) + ASSERT (l_a <= N_det) + + lrow = psi_bilinear_matrix_rows(l_a) + ASSERT (lrow <= N_det_alpha_unique) + + tmp_det2(1:$N_int,1) = psi_det_alpha_unique(1:$N_int, lrow) +! print*,'nkeys before = ',nkeys + do ll = 1, N_states + do l= 1, N_states + c_1(l,ll) = u_t(ll,l_a) * u_t(l,k_a) + enddo + enddo + if(alpha_beta)then + ! only ONE contribution + if (nkeys+1 .ge. sze_buff) then + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + endif + else if (spin_trace)then + ! TWO contributions + if (nkeys+2 .ge. sze_buff) then + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + endif + endif + call orb_range_off_diag_double_to_all_states_ab_trans_rdm_buffer(tmp_det,tmp_det2,c_1,N_st,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + + enddo + endif + + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + enddo + + enddo + !$OMP END DO + + !$OMP DO SCHEDULE(dynamic,64) + do k_a=istart+ishift,iend,istep + + + ! Single and double alpha exitations + ! =================================== + + + ! Initial determinant is at k_a in alpha-major representation + ! ----------------------------------------------------------------------- + + krow = psi_bilinear_matrix_rows(k_a) + ASSERT (krow <= N_det_alpha_unique) + + kcol = psi_bilinear_matrix_columns(k_a) + ASSERT (kcol <= N_det_beta_unique) + + tmp_det(1:$N_int,1) = psi_det_alpha_unique(1:$N_int, krow) + tmp_det(1:$N_int,2) = psi_det_beta_unique (1:$N_int, kcol) + + ! Initial determinant is at k_b in beta-major representation + ! ---------------------------------------------------------------------- + + k_b = psi_bilinear_matrix_order_transp_reverse(k_a) + ASSERT (k_b <= N_det) + + spindet(1:$N_int) = tmp_det(1:$N_int,1) + + ! Loop inside the beta column to gather all the connected alphas + lcol = psi_bilinear_matrix_columns(k_a) + l_a = psi_bilinear_matrix_columns_loc(lcol) + do i=1,N_det_alpha_unique + if (l_a > N_det) exit + lcol = psi_bilinear_matrix_columns(l_a) + if (lcol /= kcol) exit + lrow = psi_bilinear_matrix_rows(l_a) + ASSERT (lrow <= N_det_alpha_unique) + + buffer(1:$N_int,i) = psi_det_alpha_unique(1:$N_int, lrow) + idx(i) = l_a + l_a = l_a+1 + enddo + i = i-1 + + call get_all_spin_singles_and_doubles_$N_int( & + buffer, idx, spindet, i, & + singles_a, doubles, n_singles_a, n_doubles ) + + ! Compute Hij for all alpha singles + ! ---------------------------------- + + tmp_det2(1:$N_int,2) = psi_det_beta_unique (1:$N_int, kcol) + do i=1,n_singles_a + l_a = singles_a(i) + ASSERT (l_a <= N_det) + + lrow = psi_bilinear_matrix_rows(l_a) + ASSERT (lrow <= N_det_alpha_unique) + + tmp_det2(1:$N_int,1) = psi_det_alpha_unique(1:$N_int, lrow) + do ll= 1, N_states + do l= 1, N_states + c_1(l,ll) = u_t(ll,l_a) * u_t(l,k_a) + enddo + enddo + if(alpha_beta.or.spin_trace.or.alpha_alpha)then + ! increment the alpha/beta part for single excitations + if (nkeys+ 2 * elec_alpha_num .ge. sze_buff) then + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + endif + call orb_range_off_diag_single_to_all_states_ab_trans_rdm_buffer(tmp_det, tmp_det2,c_1,N_st,orb_bitmask,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + ! increment the alpha/alpha part for single excitations + if (nkeys+4 * elec_alpha_num .ge. sze_buff ) then + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + endif + call orb_range_off_diag_single_to_all_states_aa_trans_rdm_buffer(tmp_det,tmp_det2,c_1,N_st,orb_bitmask,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + endif + + enddo + + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + + ! Compute Hij for all alpha doubles + ! ---------------------------------- + + if(alpha_alpha.or.spin_trace)then + do i=1,n_doubles + l_a = doubles(i) + ASSERT (l_a <= N_det) + + lrow = psi_bilinear_matrix_rows(l_a) + ASSERT (lrow <= N_det_alpha_unique) + + do ll= 1, N_states + do l= 1, N_states + c_1(l,ll) = u_t(ll,l_a) * u_t(l,k_a) + enddo + enddo + if (nkeys+4 .ge. sze_buff) then + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + endif + call orb_range_off_diag_double_to_all_states_aa_trans_rdm_buffer(tmp_det(1,1),psi_det_alpha_unique(1, lrow),c_1,N_st,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + enddo + endif + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + + + ! Single and double beta excitations + ! ================================== + + + ! Initial determinant is at k_a in alpha-major representation + ! ----------------------------------------------------------------------- + + krow = psi_bilinear_matrix_rows(k_a) + kcol = psi_bilinear_matrix_columns(k_a) + + tmp_det(1:$N_int,1) = psi_det_alpha_unique(1:$N_int, krow) + tmp_det(1:$N_int,2) = psi_det_beta_unique (1:$N_int, kcol) + + spindet(1:$N_int) = tmp_det(1:$N_int,2) + + ! Initial determinant is at k_b in beta-major representation + ! ----------------------------------------------------------------------- + + k_b = psi_bilinear_matrix_order_transp_reverse(k_a) + ASSERT (k_b <= N_det) + + ! Loop inside the alpha row to gather all the connected betas + lrow = psi_bilinear_matrix_transp_rows(k_b) + l_b = psi_bilinear_matrix_transp_rows_loc(lrow) + do i=1,N_det_beta_unique + if (l_b > N_det) exit + lrow = psi_bilinear_matrix_transp_rows(l_b) + if (lrow /= krow) exit + lcol = psi_bilinear_matrix_transp_columns(l_b) + ASSERT (lcol <= N_det_beta_unique) + + buffer(1:$N_int,i) = psi_det_beta_unique(1:$N_int, lcol) + idx(i) = l_b + l_b = l_b+1 + enddo + i = i-1 + + call get_all_spin_singles_and_doubles_$N_int( & + buffer, idx, spindet, i, & + singles_b, doubles, n_singles_b, n_doubles ) + + ! Compute Hij for all beta singles + ! ---------------------------------- + + tmp_det2(1:$N_int,1) = psi_det_alpha_unique(1:$N_int, krow) + do i=1,n_singles_b + l_b = singles_b(i) + ASSERT (l_b <= N_det) + + lcol = psi_bilinear_matrix_transp_columns(l_b) + ASSERT (lcol <= N_det_beta_unique) + + tmp_det2(1:$N_int,2) = psi_det_beta_unique (1:$N_int, lcol) + l_a = psi_bilinear_matrix_transp_order(l_b) + do ll= 1, N_states + do l= 1, N_states + c_1(l,ll) = u_t(ll,l_a) * u_t(l,k_a) + enddo + enddo + if(alpha_beta.or.spin_trace.or.beta_beta)then + ! increment the alpha/beta part for single excitations + if (nkeys+2 * elec_alpha_num .ge. sze_buff ) then + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + endif + call orb_range_off_diag_single_to_all_states_ab_trans_rdm_buffer(tmp_det, tmp_det2,c_1,N_st,orb_bitmask,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + ! increment the beta /beta part for single excitations + if (nkeys+4 * elec_alpha_num .ge. sze_buff) then + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + endif + call orb_range_off_diag_single_to_all_states_bb_trans_rdm_buffer(tmp_det, tmp_det2,c_1,N_st,orb_bitmask,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + endif + enddo + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + + ! Compute Hij for all beta doubles + ! ---------------------------------- + + if(beta_beta.or.spin_trace)then + do i=1,n_doubles + l_b = doubles(i) + ASSERT (l_b <= N_det) + + lcol = psi_bilinear_matrix_transp_columns(l_b) + ASSERT (lcol <= N_det_beta_unique) + + l_a = psi_bilinear_matrix_transp_order(l_b) + do ll= 1, N_states + do l= 1, N_states + c_1(l,ll) = u_t(ll,l_a) * u_t(l,k_a) + enddo + enddo + if (nkeys+4 .ge. sze_buff) then + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + endif + call orb_range_off_diag_double_to_all_states_trans_rdm_bb_buffer(tmp_det(1,2),psi_det_beta_unique(1, lcol),c_1,N_st,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) +! print*,'to do orb_range_off_diag_double_to_2_trans_rdm_bb_dm_buffer' + ASSERT (l_a <= N_det) + + enddo + endif + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + + + ! Diagonal contribution + ! ===================== + + + ! Initial determinant is at k_a in alpha-major representation + ! ----------------------------------------------------------------------- + + krow = psi_bilinear_matrix_rows(k_a) + ASSERT (krow <= N_det_alpha_unique) + + kcol = psi_bilinear_matrix_columns(k_a) + ASSERT (kcol <= N_det_beta_unique) + + tmp_det(1:$N_int,1) = psi_det_alpha_unique(1:$N_int, krow) + tmp_det(1:$N_int,2) = psi_det_beta_unique (1:$N_int, kcol) + + double precision, external :: diag_wee_mat_elem, diag_S_mat_elem + + double precision :: c_1(N_states,N_states) + do ll = 1, N_states + do l = 1, N_states + c_1(l,ll) = u_t(ll,k_a) * u_t(l,k_a) + enddo + enddo + + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + call orb_range_diag_to_all_states_2_rdm_trans_buffer(tmp_det,c_1,N_states,orb_bitmask,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + call update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2trans_rdm) + nkeys = 0 + + end do + !$OMP END DO + deallocate(buffer, singles_a, singles_b, doubles, idx, keys, values) + !$OMP END PARALLEL + +end + + SUBST [ N_int ] + + 1;; + 2;; + 3;; + 4;; + N_int;; + + END_TEMPLATE + +subroutine update_keys_values_n_states_trans(keys,values,nkeys,dim1,n_st,big_array,lock_2rdm) + use omp_lib + implicit none + integer, intent(in) :: n_st,nkeys,dim1 + integer, intent(in) :: keys(4,nkeys) + double precision, intent(in) :: values(n_st,n_st,nkeys) + double precision, intent(inout) :: big_array(dim1,dim1,dim1,dim1,n_st,n_st) + + integer(omp_lock_kind),intent(inout):: lock_2rdm + + integer :: i,h1,h2,p1,p2,istate,jstate + call omp_set_lock(lock_2rdm) + +! print*,'*************' +! print*,'updating' +! print*,'nkeys',nkeys + do i = 1, nkeys + h1 = keys(1,i) + h2 = keys(2,i) + p1 = keys(3,i) + p2 = keys(4,i) + do jstate = 1, N_st + do istate = 1, N_st +!! print*,h1,h2,p1,p2,values(istate,i) + big_array(h1,h2,p1,p2,istate,jstate) += values(istate,jstate,i) + enddo + enddo + enddo + call omp_unset_lock(lock_2rdm) + +end + diff --git a/src/two_rdm_routines/update_trans_rdm.irp.f b/src/two_rdm_routines/update_trans_rdm.irp.f new file mode 100644 index 00000000..9f7077a2 --- /dev/null +++ b/src/two_rdm_routines/update_trans_rdm.irp.f @@ -0,0 +1,1002 @@ + subroutine orb_range_diag_to_all_states_2_rdm_trans_buffer(det_1,c_1,N_st,orb_bitmask,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + use bitmasks + BEGIN_DOC + ! routine that update the DIAGONAL PART of the two body trans_rdms in a specific range of orbitals for a given determinant det_1 + ! + ! c_1 is the array of the contributions to the trans_rdm for all states + ! + ! orb_bitmask(N_int) is the bitmask for the orbital range, list_orb_reverse(mo_num) is the inverse range of orbitals + ! + ! ispin determines which spin-spin component of the two-trans_rdm you will update + ! + ! ispin == 1 :: alpha/ alpha + ! ispin == 2 :: beta / beta + ! ispin == 3 :: alpha/ beta + ! ispin == 4 :: spin traced <=> total two-trans_rdm + END_DOC + implicit none + integer, intent(in) :: ispin,sze_buff,N_st + integer, intent(in) :: list_orb_reverse(mo_num) + integer(bit_kind), intent(in) :: det_1(N_int,2) + integer(bit_kind), intent(in) :: orb_bitmask(N_int) + double precision, intent(in) :: c_1(N_st,N_st) + double precision, intent(out) :: values(N_st,N_st,sze_buff) + integer , intent(out) :: keys(4,sze_buff) + integer , intent(inout):: nkeys + + integer :: occ(N_int*bit_kind_size,2) + integer :: n_occ_ab(2) + integer :: i,j,h1,h2 + integer(bit_kind) :: det_1_act(N_int,2) + logical :: alpha_alpha,beta_beta,alpha_beta,spin_trace + do i = 1, N_int + det_1_act(i,1) = iand(det_1(i,1),orb_bitmask(i)) + det_1_act(i,2) = iand(det_1(i,2),orb_bitmask(i)) + enddo + + alpha_alpha = .False. + beta_beta = .False. + alpha_beta = .False. + spin_trace = .False. + if( ispin == 1)then + alpha_alpha = .True. + else if(ispin == 2)then + beta_beta = .True. + else if(ispin == 3)then + alpha_beta = .True. + else if(ispin == 4)then + spin_trace = .True. + endif + call bitstring_to_list_ab(det_1_act, occ, n_occ_ab, N_int) + logical :: is_integer_in_string + integer :: i1,i2,istate + integer :: jstate + if(alpha_beta)then + do i = 1, n_occ_ab(1) + i1 = occ(i,1) + do j = 1, n_occ_ab(2) + i2 = occ(j,2) + h1 = list_orb_reverse(i1) + h2 = list_orb_reverse(i2) + ! If alpha/beta, electron 1 is alpha, electron 2 is beta + ! Therefore you don't necessayr have symmetry between electron 1 and 2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h1 + keys(4,nkeys) = h2 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = h2 + keys(4,nkeys) = h1 + enddo + enddo + + else if (alpha_alpha)then + do i = 1, n_occ_ab(1) + i1 = occ(i,1) + do j = 1, n_occ_ab(1) + i2 = occ(j,1) + h1 = list_orb_reverse(i1) + h2 = list_orb_reverse(i2) + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h1 + keys(4,nkeys) = h2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = -0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h2 + keys(4,nkeys) = h1 + enddo + enddo + else if (beta_beta)then + do i = 1, n_occ_ab(2) + i1 = occ(i,2) + do j = 1, n_occ_ab(2) + i2 = occ(j,2) + h1 = list_orb_reverse(i1) + h2 = list_orb_reverse(i2) + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h1 + keys(4,nkeys) = h2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = -0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h2 + keys(4,nkeys) = h1 + enddo + enddo + else if(spin_trace)then + ! 0.5 * (alpha beta + beta alpha) + do i = 1, n_occ_ab(1) + i1 = occ(i,1) + do j = 1, n_occ_ab(2) + i2 = occ(j,2) + h1 = list_orb_reverse(i1) + h2 = list_orb_reverse(i2) + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h1 + keys(4,nkeys) = h2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = h2 + keys(4,nkeys) = h1 + enddo + enddo + do i = 1, n_occ_ab(1) + i1 = occ(i,1) + do j = 1, n_occ_ab(1) + i2 = occ(j,1) + h1 = list_orb_reverse(i1) + h2 = list_orb_reverse(i2) + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h1 + keys(4,nkeys) = h2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = -0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h2 + keys(4,nkeys) = h1 + enddo + enddo + do i = 1, n_occ_ab(2) + i1 = occ(i,2) + do j = 1, n_occ_ab(2) + i2 = occ(j,2) + h1 = list_orb_reverse(i1) + h2 = list_orb_reverse(i2) + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h1 + keys(4,nkeys) = h2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = -0.5d0 * c_1(istate,jstate) + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h2 + keys(4,nkeys) = h1 + enddo + enddo + endif + end + + + subroutine orb_range_off_diag_double_to_all_states_ab_trans_rdm_buffer(det_1,det_2,c_1,N_st,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + use bitmasks + BEGIN_DOC +! routine that update the OFF DIAGONAL PART of the two body trans_rdms in a specific range of orbitals for +! +! a given couple of determinant det_1, det_2 being a alpha/beta DOUBLE excitation with respect to one another +! +! c_1 is the array of the contributions to the trans_rdm for all states +! +! orb_bitmask(N_int) is the bitmask for the orbital range, list_orb_reverse(mo_num) is the inverse range of orbitals +! +! ispin determines which spin-spin component of the two-trans_rdm you will update +! +! ispin == 1 :: alpha/ alpha +! ispin == 2 :: beta / beta +! ispin == 3 :: alpha/ beta +! ispin == 4 :: spin traced <=> total two-trans_rdm +! +! here, only ispin == 3 or 4 will do something + END_DOC + implicit none + integer, intent(in) :: ispin,sze_buff,N_st + integer(bit_kind), intent(in) :: det_1(N_int,2),det_2(N_int,2) + integer, intent(in) :: list_orb_reverse(mo_num) + double precision, intent(in) :: c_1(N_st,N_st) + double precision, intent(out) :: values(N_st,N_st,sze_buff) + integer , intent(out) :: keys(4,sze_buff) + integer , intent(inout):: nkeys + integer :: i,j,h1,h2,p1,p2,istate + integer :: exc(0:2,2,2) + double precision :: phase + logical :: alpha_alpha,beta_beta,alpha_beta,spin_trace + logical :: is_integer_in_string + integer :: jstate + alpha_alpha = .False. + beta_beta = .False. + alpha_beta = .False. + spin_trace = .False. + if( ispin == 1)then + alpha_alpha = .True. + else if(ispin == 2)then + beta_beta = .True. + else if(ispin == 3)then + alpha_beta = .True. + else if(ispin == 4)then + spin_trace = .True. + endif + call get_double_excitation(det_1,det_2,exc,phase,N_int) + h1 = exc(1,1,1) + if(list_orb_reverse(h1).lt.0)return + h1 = list_orb_reverse(h1) + h2 = exc(1,1,2) + if(list_orb_reverse(h2).lt.0)return + h2 = list_orb_reverse(h2) + p1 = exc(1,2,1) + if(list_orb_reverse(p1).lt.0)return + p1 = list_orb_reverse(p1) + p2 = exc(1,2,2) + if(list_orb_reverse(p2).lt.0)return + p2 = list_orb_reverse(p2) + if(alpha_beta)then + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p1 + keys(4,nkeys) = p2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = p2 + keys(4,nkeys) = p1 + else if(spin_trace)then + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p1 + keys(4,nkeys) = p2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = p2 + keys(4,nkeys) = p1 + endif + end + + subroutine orb_range_off_diag_single_to_all_states_ab_trans_rdm_buffer(det_1,det_2,c_1,N_st,orb_bitmask,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + use bitmasks + BEGIN_DOC + ! routine that update the OFF DIAGONAL PART of the two body trans_rdms in a specific range of orbitals for + ! + ! a given couple of determinant det_1, det_2 being a SINGLE excitation with respect to one another + ! + ! c_1 is supposed to be a scalar quantity, such as state averaged coef of the determinant det_1 + ! + ! big_array(dim1,dim1,dim1,dim1) is the two-body trans_rdm to be updated in physicist notation + ! + ! orb_bitmask(N_int) is the bitmask for the orbital range, list_orb_reverse(mo_num) is the inverse range of orbitals + ! + ! ispin determines which spin-spin component of the two-trans_rdm you will update + ! + ! ispin == 1 :: alpha/ alpha + ! ispin == 2 :: beta / beta + ! ispin == 3 :: alpha/ beta + ! ispin == 4 :: spin traced <=> total two-trans_rdm + ! + ! here, only ispin == 3 or 4 will do something + END_DOC + implicit none + integer, intent(in) :: ispin,sze_buff,N_st + integer(bit_kind), intent(in) :: det_1(N_int,2),det_2(N_int,2) + integer, intent(in) :: list_orb_reverse(mo_num) + integer(bit_kind), intent(in) :: orb_bitmask(N_int) + double precision, intent(in) :: c_1(N_st,N_st) + double precision, intent(out) :: values(N_st,N_st,sze_buff) + integer , intent(out) :: keys(4,sze_buff) + integer , intent(inout):: nkeys + + integer :: jstate + integer :: occ(N_int*bit_kind_size,2) + integer :: n_occ_ab(2) + integer :: i,j,h1,h2,p1,istate + integer :: exc(0:2,2,2) + double precision :: phase + + logical :: alpha_alpha,beta_beta,alpha_beta,spin_trace + logical :: is_integer_in_string + alpha_alpha = .False. + beta_beta = .False. + alpha_beta = .False. + spin_trace = .False. + if( ispin == 1)then + alpha_alpha = .True. + else if(ispin == 2)then + beta_beta = .True. + else if(ispin == 3)then + alpha_beta = .True. + else if(ispin == 4)then + spin_trace = .True. + endif + + call bitstring_to_list_ab(det_1, occ, n_occ_ab, N_int) + call get_single_excitation(det_1,det_2,exc,phase,N_int) + if(alpha_beta)then + if (exc(0,1,1) == 1) then + ! Mono alpha + h1 = exc(1,1,1) + if(.not.is_integer_in_string(h1,orb_bitmask,N_int))return + h1 = list_orb_reverse(h1) + p1 = exc(1,2,1) + if(.not.is_integer_in_string(p1,orb_bitmask,N_int))return + p1 = list_orb_reverse(p1) + do i = 1, n_occ_ab(2) + h2 = occ(i,2) + if(.not.is_integer_in_string(h2,orb_bitmask,N_int))cycle + h2 = list_orb_reverse(h2) + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p1 + keys(4,nkeys) = h2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = h2 + keys(4,nkeys) = p1 + enddo + else + ! Mono beta + h1 = exc(1,1,2) + if(.not.is_integer_in_string(h1,orb_bitmask,N_int))return + h1 = list_orb_reverse(h1) + p1 = exc(1,2,2) + if(.not.is_integer_in_string(p1,orb_bitmask,N_int))return + p1 = list_orb_reverse(p1) + do i = 1, n_occ_ab(1) + h2 = occ(i,1) + if(.not.is_integer_in_string(h2,orb_bitmask,N_int))cycle + h2 = list_orb_reverse(h2) + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p1 + keys(4,nkeys) = h2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = h2 + keys(4,nkeys) = p1 + enddo + endif + else if(spin_trace)then + if (exc(0,1,1) == 1) then + ! Mono alpha + h1 = exc(1,1,1) + if(.not.is_integer_in_string(h1,orb_bitmask,N_int))return + h1 = list_orb_reverse(h1) + p1 = exc(1,2,1) + if(.not.is_integer_in_string(p1,orb_bitmask,N_int))return + p1 = list_orb_reverse(p1) + do i = 1, n_occ_ab(2) + h2 = occ(i,2) + if(.not.is_integer_in_string(h2,orb_bitmask,N_int))cycle + h2 = list_orb_reverse(h2) + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p1 + keys(4,nkeys) = h2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = h2 + keys(4,nkeys) = p1 + enddo + else + ! Mono beta + h1 = exc(1,1,2) + if(.not.is_integer_in_string(h1,orb_bitmask,N_int))return + h1 = list_orb_reverse(h1) + p1 = exc(1,2,2) + if(.not.is_integer_in_string(p1,orb_bitmask,N_int))return + p1 = list_orb_reverse(p1) + do i = 1, n_occ_ab(1) + h2 = occ(i,1) + if(.not.is_integer_in_string(h2,orb_bitmask,N_int))cycle + h2 = list_orb_reverse(h2) + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p1 + keys(4,nkeys) = h2 + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = h2 + keys(4,nkeys) = p1 + enddo + endif + endif + end + + subroutine orb_range_off_diag_single_to_all_states_aa_trans_rdm_buffer(det_1,det_2,c_1,N_st,orb_bitmask,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + BEGIN_DOC + ! routine that update the OFF DIAGONAL PART of the two body trans_rdms in a specific range of orbitals for + ! + ! a given couple of determinant det_1, det_2 being a ALPHA SINGLE excitation with respect to one another + ! + ! c_1 is supposed to be a scalar quantity, such as state averaged coef of the determinant det_1 + ! + ! big_array(dim1,dim1,dim1,dim1) is the two-body trans_rdm to be updated in physicist notation + ! + ! orb_bitmask(N_int) is the bitmask for the orbital range, list_orb_reverse(mo_num) is the inverse range of orbitals + ! + ! ispin determines which spin-spin component of the two-trans_rdm you will update + ! + ! ispin == 1 :: alpha/ alpha + ! ispin == 2 :: beta / beta + ! ispin == 3 :: alpha/ beta + ! ispin == 4 :: spin traced <=> total two-trans_rdm + ! + ! here, only ispin == 1 or 4 will do something + END_DOC + use bitmasks + implicit none + integer, intent(in) :: ispin,sze_buff,N_st + integer(bit_kind), intent(in) :: det_1(N_int,2),det_2(N_int,2) + integer, intent(in) :: list_orb_reverse(mo_num) + integer(bit_kind), intent(in) :: orb_bitmask(N_int) + double precision, intent(in) :: c_1(N_st,N_st) + double precision, intent(out) :: values(N_st,N_st,sze_buff) + integer , intent(out) :: keys(4,sze_buff) + integer , intent(inout):: nkeys + + integer :: jstate + integer :: occ(N_int*bit_kind_size,2) + integer :: n_occ_ab(2) + integer :: i,j,h1,h2,p1,istate + integer :: exc(0:2,2,2) + double precision :: phase + + logical :: alpha_alpha,beta_beta,alpha_beta,spin_trace + logical :: is_integer_in_string + alpha_alpha = .False. + beta_beta = .False. + alpha_beta = .False. + spin_trace = .False. + if( ispin == 1)then + alpha_alpha = .True. + else if(ispin == 2)then + beta_beta = .True. + else if(ispin == 3)then + alpha_beta = .True. + else if(ispin == 4)then + spin_trace = .True. + endif + + call bitstring_to_list_ab(det_1, occ, n_occ_ab, N_int) + call get_single_excitation(det_1,det_2,exc,phase,N_int) + if(alpha_alpha.or.spin_trace)then + if (exc(0,1,1) == 1) then + ! Mono alpha + h1 = exc(1,1,1) + if(.not.is_integer_in_string(h1,orb_bitmask,N_int))return + h1 = list_orb_reverse(h1) + p1 = exc(1,2,1) + if(.not.is_integer_in_string(p1,orb_bitmask,N_int))return + p1 = list_orb_reverse(p1) + do i = 1, n_occ_ab(1) + h2 = occ(i,1) + if(.not.is_integer_in_string(h2,orb_bitmask,N_int))cycle + h2 = list_orb_reverse(h2) + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p1 + keys(4,nkeys) = h2 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = - 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h2 + keys(4,nkeys) = p1 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = h2 + keys(4,nkeys) = p1 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = - 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = p1 + keys(4,nkeys) = h2 + enddo + else + return + endif + endif + end + + subroutine orb_range_off_diag_single_to_all_states_bb_trans_rdm_buffer(det_1,det_2,c_1,N_st,orb_bitmask,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + use bitmasks + BEGIN_DOC + ! routine that update the OFF DIAGONAL PART of the two body trans_rdms in a specific range of orbitals for + ! + ! a given couple of determinant det_1, det_2 being a BETA SINGLE excitation with respect to one another + ! + ! c_1 is supposed to be a scalar quantity, such as state averaged coef of the determinant det_1 + ! + ! big_array(dim1,dim1,dim1,dim1) is the two-body trans_rdm to be updated in physicist notation + ! + ! orb_bitmask(N_int) is the bitmask for the orbital range, list_orb_reverse(mo_num) is the inverse range of orbitals + ! + ! ispin determines which spin-spin component of the two-trans_rdm you will update + ! + ! ispin == 1 :: alpha/ alpha + ! ispin == 2 :: beta / beta + ! ispin == 3 :: alpha/ beta + ! ispin == 4 :: spin traced <=> total two-trans_rdm + ! + ! here, only ispin == 2 or 4 will do something + END_DOC + implicit none + integer, intent(in) :: ispin,sze_buff,N_st + integer(bit_kind), intent(in) :: det_1(N_int,2),det_2(N_int,2) + integer, intent(in) :: list_orb_reverse(mo_num) + integer(bit_kind), intent(in) :: orb_bitmask(N_int) + double precision, intent(in) :: c_1(N_st,N_st) + double precision, intent(out) :: values(N_st,N_st,sze_buff) + integer , intent(out) :: keys(4,sze_buff) + integer , intent(inout):: nkeys + + integer :: jstate + integer :: occ(N_int*bit_kind_size,2) + integer :: n_occ_ab(2) + integer :: i,j,h1,h2,p1,istate + integer :: exc(0:2,2,2) + double precision :: phase + logical :: alpha_alpha,beta_beta,alpha_beta,spin_trace + logical :: is_integer_in_string + alpha_alpha = .False. + beta_beta = .False. + alpha_beta = .False. + spin_trace = .False. + if( ispin == 1)then + alpha_alpha = .True. + else if(ispin == 2)then + beta_beta = .True. + else if(ispin == 3)then + alpha_beta = .True. + else if(ispin == 4)then + spin_trace = .True. + endif + + + call bitstring_to_list_ab(det_1, occ, n_occ_ab, N_int) + call get_single_excitation(det_1,det_2,exc,phase,N_int) + if(beta_beta.or.spin_trace)then + if (exc(0,1,1) == 1) then + return + else + ! Mono beta + h1 = exc(1,1,2) + if(.not.is_integer_in_string(h1,orb_bitmask,N_int))return + h1 = list_orb_reverse(h1) + p1 = exc(1,2,2) + if(.not.is_integer_in_string(p1,orb_bitmask,N_int))return + p1 = list_orb_reverse(p1) + do i = 1, n_occ_ab(2) + h2 = occ(i,2) + if(.not.is_integer_in_string(h2,orb_bitmask,N_int))cycle + h2 = list_orb_reverse(h2) + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p1 + keys(4,nkeys) = h2 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = - 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = h2 + keys(4,nkeys) = p1 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = h2 + keys(4,nkeys) = p1 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = - 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = p1 + keys(4,nkeys) = h2 + enddo + endif + endif + end + + + subroutine orb_range_off_diag_double_to_all_states_aa_trans_rdm_buffer(det_1,det_2,c_1,N_st,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + use bitmasks + BEGIN_DOC + ! routine that update the OFF DIAGONAL PART of the two body trans_rdms in a specific range of orbitals for + ! + ! a given couple of determinant det_1, det_2 being a ALPHA/ALPHA DOUBLE excitation with respect to one another + ! + ! c_1 is supposed to be a scalar quantity, such as state averaged coef of the determinant det_1 + ! + ! big_array(dim1,dim1,dim1,dim1) is the two-body trans_rdm to be updated in physicist notation + ! + ! orb_bitmask(N_int) is the bitmask for the orbital range, list_orb_reverse(mo_num) is the inverse range of orbitals + ! + ! ispin determines which spin-spin component of the two-trans_rdm you will update + ! + ! ispin == 1 :: alpha/ alpha + ! ispin == 2 :: beta / beta + ! ispin == 3 :: alpha/ beta + ! ispin == 4 :: spin traced <=> total two-trans_rdm + ! + ! here, only ispin == 1 or 4 will do something + END_DOC + implicit none + integer, intent(in) :: ispin,sze_buff,N_st + integer(bit_kind), intent(in) :: det_1(N_int),det_2(N_int) + integer, intent(in) :: list_orb_reverse(mo_num) + double precision, intent(in) :: c_1(N_st,N_st) + double precision, intent(out) :: values(N_st,N_st,sze_buff) + integer , intent(out) :: keys(4,sze_buff) + integer , intent(inout):: nkeys + + + integer :: i,j,h1,h2,p1,p2,istate + integer :: exc(0:2,2) + double precision :: phase + + integer :: jstate + logical :: alpha_alpha,beta_beta,alpha_beta,spin_trace + logical :: is_integer_in_string + alpha_alpha = .False. + beta_beta = .False. + alpha_beta = .False. + spin_trace = .False. + if( ispin == 1)then + alpha_alpha = .True. + else if(ispin == 2)then + beta_beta = .True. + else if(ispin == 3)then + alpha_beta = .True. + else if(ispin == 4)then + spin_trace = .True. + endif + call get_double_excitation_spin(det_1,det_2,exc,phase,N_int) + h1 =exc(1,1) + if(list_orb_reverse(h1).lt.0)return + h1 = list_orb_reverse(h1) + h2 =exc(2,1) + if(list_orb_reverse(h2).lt.0)return + h2 = list_orb_reverse(h2) + p1 =exc(1,2) + if(list_orb_reverse(p1).lt.0)return + p1 = list_orb_reverse(p1) + p2 =exc(2,2) + if(list_orb_reverse(p2).lt.0)return + p2 = list_orb_reverse(p2) + if(alpha_alpha.or.spin_trace)then + nkeys += 1 + + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p1 + keys(4,nkeys) = p2 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = - 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p2 + keys(4,nkeys) = p1 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = p2 + keys(4,nkeys) = p1 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = - 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = p1 + keys(4,nkeys) = p2 + endif + end + + subroutine orb_range_off_diag_double_to_all_states_trans_rdm_bb_buffer(det_1,det_2,c_1,N_st,list_orb_reverse,ispin,sze_buff,nkeys,keys,values) + use bitmasks + BEGIN_DOC + ! routine that update the OFF DIAGONAL PART of the two body trans_rdms in a specific range of orbitals for + ! + ! a given couple of determinant det_1, det_2 being a BETA /BETA DOUBLE excitation with respect to one another + ! + ! c_1 is supposed to be a scalar quantity, such as state averaged coef of the determinant det_1 + ! + ! big_array(dim1,dim1,dim1,dim1) is the two-body trans_rdm to be updated in physicist notation + ! + ! orb_bitmask(N_int) is the bitmask for the orbital range, list_orb_reverse(mo_num) is the inverse range of orbitals + ! + ! ispin determines which spin-spin component of the two-trans_rdm you will update + ! + ! ispin == 1 :: alpha/ alpha + ! ispin == 2 :: beta / beta + ! ispin == 3 :: alpha/ beta + ! ispin == 4 :: spin traced <=> total two-trans_rdm + ! + ! here, only ispin == 2 or 4 will do something + END_DOC + implicit none + + integer, intent(in) :: ispin,sze_buff,N_st + integer(bit_kind), intent(in) :: det_1(N_int),det_2(N_int) + integer, intent(in) :: list_orb_reverse(mo_num) + double precision, intent(in) :: c_1(N_st,N_st) + double precision, intent(out) :: values(N_st,N_st,sze_buff) + integer , intent(out) :: keys(4,sze_buff) + integer , intent(inout):: nkeys + + integer :: jstate + integer :: i,j,h1,h2,p1,p2,istate + integer :: exc(0:2,2) + double precision :: phase + logical :: alpha_alpha,beta_beta,alpha_beta,spin_trace + logical :: is_integer_in_string + alpha_alpha = .False. + beta_beta = .False. + alpha_beta = .False. + spin_trace = .False. + if( ispin == 1)then + alpha_alpha = .True. + else if(ispin == 2)then + beta_beta = .True. + else if(ispin == 3)then + alpha_beta = .True. + else if(ispin == 4)then + spin_trace = .True. + endif + + call get_double_excitation_spin(det_1,det_2,exc,phase,N_int) + h1 =exc(1,1) + if(list_orb_reverse(h1).lt.0)return + h1 = list_orb_reverse(h1) + h2 =exc(2,1) + if(list_orb_reverse(h2).lt.0)return + h2 = list_orb_reverse(h2) + p1 =exc(1,2) + if(list_orb_reverse(p1).lt.0)return + p1 = list_orb_reverse(p1) + p2 =exc(2,2) + if(list_orb_reverse(p2).lt.0)return + p2 = list_orb_reverse(p2) + if(beta_beta.or.spin_trace)then + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p1 + keys(4,nkeys) = p2 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = - 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h1 + keys(2,nkeys) = h2 + keys(3,nkeys) = p2 + keys(4,nkeys) = p1 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = p2 + keys(4,nkeys) = p1 + + nkeys += 1 + do jstate = 1, N_st + do istate = 1, N_st + values(istate,jstate,nkeys) = - 0.5d0 * c_1(istate,jstate) * phase + enddo + enddo + keys(1,nkeys) = h2 + keys(2,nkeys) = h1 + keys(3,nkeys) = p1 + keys(4,nkeys) = p2 + endif + end + diff --git a/src/utils/linear_algebra.irp.f b/src/utils/linear_algebra.irp.f index 314ad4f6..175beff3 100644 --- a/src/utils/linear_algebra.irp.f +++ b/src/utils/linear_algebra.irp.f @@ -652,6 +652,7 @@ subroutine get_pseudo_inverse_complex(A,LDA,m,n,C,LDC,cutoff) complex*16, allocatable :: U(:,:), Vt(:,:), work(:), A_tmp(:,:) integer :: info, lwork integer :: i,j,k + double precision :: d1 allocate (D(n),U(m,n),Vt(n,n),work(1),A_tmp(m,n),rwork(5*n)) do j=1,n do i=1,m @@ -673,8 +674,9 @@ subroutine get_pseudo_inverse_complex(A,LDA,m,n,C,LDC,cutoff) stop 1 endif + d1 = D(1) do i=1,n - if (D(i) > cutoff*D(1)) then + if (D(i) > cutoff*d1) then D(i) = 1.d0/D(i) else D(i) = 0.d0 @@ -1321,19 +1323,23 @@ subroutine get_inverse(A,LDA,m,C,LDC) deallocate(ipiv,work) end -subroutine get_pseudo_inverse(A,LDA,m,n,C,LDC,cutoff) - implicit none +subroutine get_pseudo_inverse(A, LDA, m, n, C, LDC, cutoff) + BEGIN_DOC ! Find C = A^-1 END_DOC - integer, intent(in) :: m,n, LDA, LDC - double precision, intent(in) :: A(LDA,n) - double precision, intent(in) :: cutoff - double precision, intent(out) :: C(LDC,m) - double precision, allocatable :: U(:,:), D(:), Vt(:,:), work(:), A_tmp(:,:) - integer :: info, lwork - integer :: i,j,k + implicit none + integer, intent(in) :: m, n, LDA, LDC + double precision, intent(in) :: A(LDA,n) + double precision, intent(in) :: cutoff + double precision, intent(out) :: C(LDC,m) + + integer :: info, lwork + integer :: i, j, k, n_svd + double precision :: D1_inv + double precision, allocatable :: U(:,:), D(:), Vt(:,:), work(:), A_tmp(:,:) + allocate (D(n),U(m,n),Vt(n,n),work(1),A_tmp(m,n)) do j=1,n do i=1,m @@ -1355,22 +1361,45 @@ subroutine get_pseudo_inverse(A,LDA,m,n,C,LDC,cutoff) stop 1 endif - do i=1,n - if (D(i)/D(1) > cutoff) then - D(i) = 1.d0/D(i) - else - D(i) = 0.d0 - endif - enddo + if(D(1) .lt. 1d-14) then + print*, ' largest singular value is very small:', D(1) + n_svd = 1 + else + n_svd = 0 + D1_inv = 1.d0 / D(1) + do i = 1, n + if(D(i)*D1_inv > cutoff) then + D(i) = 1.d0 / D(i) + n_svd = n_svd + 1 + else + D(i) = 0.d0 + endif + enddo + endif - C = 0.d0 - do i=1,m - do j=1,n - do k=1,n - C(j,i) = C(j,i) + U(i,k) * D(k) * Vt(k,j) - enddo + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j) & + !$OMP SHARED (n, n_svd, D, Vt) + !$OMP DO + do j = 1, n + do i = 1, n_svd + Vt(i,j) = D(i) * Vt(i,j) enddo enddo + !$OMP END DO + !$OMP END PARALLEL + + call dgemm('T', 'T', n, m, n_svd, 1.d0, Vt, size(Vt,1), U, size(U,1), 0.d0, C, size(C,1)) + +! C = 0.d0 +! do i=1,m +! do j=1,n +! do k=1,n_svd +! C(j,i) = C(j,i) + U(i,k) * D(k) * Vt(k,j) +! enddo +! enddo +! enddo deallocate(U,D,Vt,work,A_tmp) @@ -1868,3 +1897,140 @@ end do end subroutine pivoted_cholesky +subroutine exp_matrix(X,n,exp_X) + implicit none + double precision, intent(in) :: X(n,n) + integer, intent(in):: n + double precision, intent(out):: exp_X(n,n) + BEGIN_DOC + ! exponential of the matrix X: X has to be ANTI HERMITIAN !! + ! + ! taken from Hellgaker, jorgensen, Olsen book + ! + ! section evaluation of matrix exponential (Eqs. 3.1.29 to 3.1.31) + END_DOC + integer :: i + double precision, allocatable :: r2_mat(:,:),eigvalues(:),eigvectors(:,:) + double precision, allocatable :: matrix_tmp1(:,:),eigvalues_mat(:,:),matrix_tmp2(:,:) + include 'constants.include.F' + allocate(r2_mat(n,n),eigvalues(n),eigvectors(n,n)) + allocate(eigvalues_mat(n,n),matrix_tmp1(n,n),matrix_tmp2(n,n)) + + ! r2_mat = X^2 in the 3.1.30 + call get_A_squared(X,n,r2_mat) + call lapack_diagd(eigvalues,eigvectors,r2_mat,n,n) + eigvalues=-eigvalues + + if(.False.)then + !!! For debugging and following the book intermediate + ! rebuilding the matrix : X^2 = -W t^2 W^T as in 3.1.30 + ! matrix_tmp1 = W t^2 + print*,'eigvalues = ' + do i = 1, n + print*,i,eigvalues(i) + write(*,'(100(F16.10,X))')eigvectors(:,i) + enddo + eigvalues_mat=0.d0 + do i = 1,n + ! t = dsqrt(t^2) where t^2 are eigenvalues of X^2 + eigvalues(i) = dsqrt(eigvalues(i)) + eigvalues_mat(i,i) = eigvalues(i)*eigvalues(i) + enddo + call dgemm('N','N',n,n,n,1.d0,eigvectors,size(eigvectors,1), & + eigvalues_mat,size(eigvalues_mat,1),0.d0,matrix_tmp1,size(matrix_tmp1,1)) + call dgemm('N','T',n,n,n,-1.d0,matrix_tmp1,size(matrix_tmp1,1), & + eigvectors,size(eigvectors,1),0.d0,matrix_tmp2,size(matrix_tmp2,1)) + print*,'r2_mat new = ' + do i = 1, n + write(*,'(100(F16.10,X))')matrix_tmp2(:,i) + enddo + endif + + ! building the exponential + ! exp(X) = W cos(t) W^T + W t^-1 sin(t) W^T X as in Eq. 3.1.31 + ! matrix_tmp1 = W cos(t) + do i = 1,n + eigvalues_mat(i,i) = dcos(eigvalues(i)) + enddo + ! matrix_tmp2 = W cos(t) + call dgemm('N','N',n,n,n,1.d0,eigvectors,size(eigvectors,1), & + eigvalues_mat,size(eigvalues_mat,1),0.d0,matrix_tmp1,size(matrix_tmp1,1)) + ! matrix_tmp2 = W cos(t) W^T + call dgemm('N','T',n,n,n,-1.d0,matrix_tmp1,size(matrix_tmp1,1), & + eigvectors,size(eigvectors,1),0.d0,matrix_tmp2,size(matrix_tmp2,1)) + exp_X = matrix_tmp2 + ! matrix_tmp2 = W t^-1 sin(t) W^T X + do i = 1,n + if(dabs(eigvalues(i)).gt.1.d-4)then + eigvalues_mat(i,i) = dsin(eigvalues(i))/eigvalues(i) + else ! Taylor development of sin(x)/x near x=0 = 1 - x^2/6 + eigvalues_mat(i,i) = 1.d0 - eigvalues(i)*eigvalues(i)*c_1_3*0.5d0 + endif + enddo + ! matrix_tmp1 = W t^-1 sin(t) + call dgemm('N','N',n,n,n,1.d0,eigvectors,size(eigvectors,1), & + eigvalues_mat,size(eigvalues_mat,1),0.d0,matrix_tmp1,size(matrix_tmp1,1)) + ! matrix_tmp2 = W t^-1 sin(t) W^T + call dgemm('N','T',n,n,n,-1.d0,matrix_tmp1,size(matrix_tmp1,1), & + eigvectors,size(eigvectors,1),0.d0,matrix_tmp2,size(matrix_tmp2,1)) + ! exp_X += matrix_tmp2 X + call dgemm('N','N',n,n,n,1.d0,matrix_tmp2,size(matrix_tmp2,1), & + X,size(X,1),1.d0,exp_X,size(exp_X,1)) + +end + + +subroutine exp_matrix_taylor(X,n,exp_X,converged) + implicit none + BEGIN_DOC + ! exponential of a general real matrix X using the Taylor expansion of exp(X) + ! + ! returns the logical converged which checks the convergence + END_DOC + double precision, intent(in) :: X(n,n) + integer, intent(in):: n + double precision, intent(out):: exp_X(n,n) + logical :: converged + double precision :: f + integer :: i,iter + double precision, allocatable :: Tpotmat(:,:),Tpotmat2(:,:) + allocate(Tpotmat(n,n),Tpotmat2(n,n)) + BEGIN_DOC + ! exponential of X using Taylor expansion + END_DOC + Tpotmat(:,:)=0.D0 + exp_X(:,:) =0.D0 + do i=1,n + Tpotmat(i,i)=1.D0 + exp_X(i,i) =1.d0 + end do + iter=0 + converged=.false. + do while (.not.converged) + iter+=1 + f = 1.d0 / dble(iter) + Tpotmat2(:,:) = Tpotmat(:,:) * f + call dgemm('N','N', n,n,n,1.d0, & + Tpotmat2, size(Tpotmat2,1), & + X, size(X,1), 0.d0, & + Tpotmat, size(Tpotmat,1)) + exp_X(:,:) = exp_X(:,:) + Tpotmat(:,:) + + converged = ( sum(abs(Tpotmat(:,:))) < 1.d-6).or.(iter>30) + end do + if(.not.converged)then + print*,'Warning !! exp_matrix_taylor did not converge !' + endif + +end + +subroutine get_A_squared(A,n,A2) + implicit none + BEGIN_DOC +! A2 = A A where A is n x n matrix. Use the dgemm routine + END_DOC + double precision, intent(in) :: A(n,n) + integer, intent(in) :: n + double precision, intent(out):: A2(n,n) + call dgemm('N','N',n,n,n,1.d0,A,size(A,1),A,size(A,1),0.d0,A2,size(A2,1)) +end diff --git a/src/utils/util.irp.f b/src/utils/util.irp.f index ebb13781..97cbde67 100644 --- a/src/utils/util.irp.f +++ b/src/utils/util.irp.f @@ -579,5 +579,64 @@ logical function is_same_spin(sigma_1, sigma_2) end function is_same_spin ! --- + +function Kronecker_delta(i, j) result(delta) + + BEGIN_DOC + ! Kronecker Delta + END_DOC + + implicit none + integer, intent(in) :: i, j + double precision :: delta + + if(i == j) then + delta = 1.d0 + else + delta = 0.d0 + endif + +end function Kronecker_delta + +! --- + +subroutine diagonalize_sym_matrix(N, A, e) + + BEGIN_DOC + ! + ! Diagonalize a symmetric matrix + ! + END_DOC + + implicit none + + integer, intent(in) :: N + double precision, intent(inout) :: A(N,N) + double precision, intent(out) :: e(N) + + integer :: lwork, info + double precision, allocatable :: work(:) + + allocate(work(1)) + + lwork = -1 + call dsyev('V', 'U', N, A, N, e, work, lwork, info) + lwork = int(work(1)) + + deallocate(work) + + allocate(work(lwork)) + + call dsyev('V', 'U', N, A, N, e, work, lwork, info) + deallocate(work) + + if(info /= 0) then + print*,'Problem in diagonalize_sym_matrix (dsyev)!!' + endif + +end subroutine diagonalize_sym_matrix + +! --- +