diff --git a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f new file mode 100644 index 00000000..c3cdb491 --- /dev/null +++ b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f @@ -0,0 +1,213 @@ + +! --- + +BEGIN_PROVIDER [ integer, List_all_comb_b3_size] + + implicit none + + List_all_comb_b3_size = 3**nucl_num + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ integer, List_all_comb_b3, (nucl_num, List_all_comb_b3_size)] + + implicit none + integer :: i, j, ii, jj + integer, allocatable :: M(:,:), p(:) + + if(nucl_num .gt. 32) then + print *, ' nucl_num = ', nucl_num, '> 32' + stop + endif + + List_all_comb_b3(:,:) = 0 + List_all_comb_b3(:,List_all_comb_b3_size) = 2 + + allocate(p(nucl_num)) + p = 0 + + do i = 2, List_all_comb_b3_size-1 + do j = 1, nucl_num + + ii = 0 + do jj = 1, j-1, 1 + ii = ii + p(jj) * 3**(jj-1) + enddo + p(j) = modulo(i-1-ii, 3**j) / 3**(j-1) + + List_all_comb_b3(j,i) = p(j) + enddo + enddo + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [ double precision, List_all_comb_b3_coef, ( List_all_comb_b3_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b3_expo, ( List_all_comb_b3_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b3_cent, (3, List_all_comb_b3_size)] + + implicit none + integer :: i, j, k, phase + double precision :: tmp_alphaj, tmp_alphak, facto + + provide j1b_pen + + List_all_comb_b3_coef = 0.d0 + List_all_comb_b3_expo = 0.d0 + List_all_comb_b3_cent = 0.d0 + + do i = 1, List_all_comb_b3_size + + do j = 1, nucl_num + tmp_alphaj = dble(List_all_comb_b3(j,i)) * j1b_pen(j) + + List_all_comb_b3_expo(i) += tmp_alphaj + List_all_comb_b3_cent(1,i) += tmp_alphaj * nucl_coord(j,1) + List_all_comb_b3_cent(2,i) += tmp_alphaj * nucl_coord(j,2) + List_all_comb_b3_cent(3,i) += tmp_alphaj * nucl_coord(j,3) + + enddo + + ASSERT(List_all_comb_b3_expo(i) .gt. 0d0) + if(List_all_comb_b3_expo(i) .lt. 1d-10) cycle + + List_all_comb_b3_cent(1,i) = List_all_comb_b3_cent(1,i) / List_all_comb_b3_expo(i) + List_all_comb_b3_cent(2,i) = List_all_comb_b3_cent(2,i) / List_all_comb_b3_expo(i) + List_all_comb_b3_cent(3,i) = List_all_comb_b3_cent(3,i) / List_all_comb_b3_expo(i) + enddo + + ! --- + + do i = 1, List_all_comb_b3_size + + do j = 2, nucl_num, 1 + tmp_alphaj = dble(List_all_comb_b3(j,i)) * j1b_pen(j) + do k = 1, j-1, 1 + tmp_alphak = dble(List_all_comb_b3(k,i)) * j1b_pen(k) + + List_all_comb_b3_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & + + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & + + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) + enddo + enddo + + if(List_all_comb_b3_expo(i) .lt. 1d-10) cycle + + List_all_comb_b3_coef(i) = List_all_comb_b3_coef(i) / List_all_comb_b3_expo(i) + enddo + + ! --- + + do i = 1, List_all_comb_b3_size + + facto = 1.d0 + phase = 0 + do j = 1, nucl_num + tmp_alphaj = dble(List_all_comb_b3(j,i)) + + facto *= 2.d0 / (gamma(tmp_alphaj+1.d0) * gamma(3.d0-tmp_alphaj)) + phase += List_all_comb_b3(j,i) + enddo + + List_all_comb_b3_coef(i) = (-1.d0)**dble(phase) * facto * dexp(-List_all_comb_b3_coef(i)) + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, int2_grad1u_grad2u_j1b, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! -\frac{1}{4} x int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 [1 - erf(mu r12)]^2 + ! + END_DOC + + implicit none + integer :: i, j, ipoint, i_1s, i_fit + double precision :: r(3), int_fit, expo_fit, coef_fit + double precision :: coef, beta, B_center(3) + double precision :: wall0, wall1 + double precision, allocatable :: tmp(:,:,:) + + double precision, external :: overlap_gauss_r12_ao_with1s + + provide mu_erf final_grid_points j1b_pen + call wall_time(wall0) + + int2_grad1u_grad2u_j1b = 0.d0 + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit, tmp) & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & + !$OMP final_grid_points, n_max_fit_slat, & + !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & + !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & + !$OMP List_all_comb_b3_cent, int2_grad1u_grad2u_j1b) + + allocate( tmp(ao_num,ao_num,n_points_final_grid) ) + tmp = 0.d0 + + !$OMP DO + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i_1s = 1, List_all_comb_b3_size + + coef = List_all_comb_b3_coef (i_1s) + beta = List_all_comb_b3_expo (i_1s) + B_center(1) = List_all_comb_b3_cent(1,i_1s) + B_center(2) = List_all_comb_b3_cent(2,i_1s) + B_center(3) = List_all_comb_b3_cent(3,i_1s) + + do i_fit = 1, n_max_fit_slat + + expo_fit = expo_gauss_1_erf_x_2(i_fit) + coef_fit = coef_gauss_1_erf_x_2(i_fit) + int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) + + tmp(j,i,ipoint) += -0.25d0 * coef * coef_fit * int_fit + enddo + enddo + enddo + enddo + enddo + !$OMP END DO + + !$OMP CRITICAL + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + int2_grad1u_grad2u_j1b(j,i,ipoint) += tmp(j,i,ipoint) + enddo + enddo + enddo + !$OMP END CRITICAL + + deallocate( tmp ) + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, i-1 + int2_grad1u_grad2u_j1b(j,i,ipoint) = int2_grad1u_grad2u_j1b(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for int2_grad1u_grad2u_j1b', wall1 - wall0 + +END_PROVIDER + +! --- + diff --git a/src/ao_many_one_e_ints/grad_J1b_ints.irp.f b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f similarity index 65% rename from src/ao_many_one_e_ints/grad_J1b_ints.irp.f rename to src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f index 30e0acc8..c789ad36 100644 --- a/src/ao_many_one_e_ints/grad_J1b_ints.irp.f +++ b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f @@ -1,17 +1,17 @@ ! --- -BEGIN_PROVIDER [ integer, List_all_comb_size] +BEGIN_PROVIDER [ integer, List_all_comb_b2_size] implicit none - List_all_comb_size = 2**nucl_num + List_all_comb_b2_size = 2**nucl_num END_PROVIDER ! --- -BEGIN_PROVIDER [ integer, List_all_comb, (nucl_num, List_all_comb_size)] +BEGIN_PROVIDER [ integer, List_all_comb_b2, (nucl_num, List_all_comb_b2_size)] implicit none integer :: i, j @@ -21,12 +21,12 @@ BEGIN_PROVIDER [ integer, List_all_comb, (nucl_num, List_all_comb_size)] stop endif - List_all_comb = 0 + List_all_comb_b2 = 0 - do i = 0, List_all_comb_size-1 + do i = 0, List_all_comb_b2_size-1 do j = 0, nucl_num-1 if (btest(i,j)) then - List_all_comb(j+1,i+1) = 1 + List_all_comb_b2(j+1,i+1) = 1 endif enddo enddo @@ -35,9 +35,9 @@ END_PROVIDER ! --- - BEGIN_PROVIDER [ double precision, List_all_j1b1s_coef, ( List_all_comb_size)] -&BEGIN_PROVIDER [ double precision, List_all_j1b1s_expo, ( List_all_comb_size)] -&BEGIN_PROVIDER [ double precision, List_all_j1b1s_cent, (3, List_all_comb_size)] + BEGIN_PROVIDER [ double precision, List_all_comb_b2_coef, ( List_all_comb_b2_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b2_expo, ( List_all_comb_b2_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b2_cent, (3, List_all_comb_b2_size)] implicit none integer :: i, j, k, phase @@ -45,60 +45,60 @@ END_PROVIDER provide j1b_pen - List_all_j1b1s_coef = 0.d0 - List_all_j1b1s_expo = 0.d0 - List_all_j1b1s_cent = 0.d0 + List_all_comb_b2_coef = 0.d0 + List_all_comb_b2_expo = 0.d0 + List_all_comb_b2_cent = 0.d0 - do i = 1, List_all_comb_size + do i = 1, List_all_comb_b2_size do j = 1, nucl_num - tmp_alphaj = dble(List_all_comb(j,i)) * j1b_pen(j) + tmp_alphaj = dble(List_all_comb_b2(j,i)) * j1b_pen(j) - List_all_j1b1s_expo(i) += tmp_alphaj - List_all_j1b1s_cent(1,i) += tmp_alphaj * nucl_coord(j,1) - List_all_j1b1s_cent(2,i) += tmp_alphaj * nucl_coord(j,2) - List_all_j1b1s_cent(3,i) += tmp_alphaj * nucl_coord(j,3) + List_all_comb_b2_expo(i) += tmp_alphaj + List_all_comb_b2_cent(1,i) += tmp_alphaj * nucl_coord(j,1) + List_all_comb_b2_cent(2,i) += tmp_alphaj * nucl_coord(j,2) + List_all_comb_b2_cent(3,i) += tmp_alphaj * nucl_coord(j,3) enddo - ASSERT(List_all_j1b1s_expo(i) .gt. 0d0) - if(List_all_j1b1s_expo(i) .lt. 1d-10) cycle + ASSERT(List_all_comb_b2_expo(i) .gt. 0d0) + if(List_all_comb_b2_expo(i) .lt. 1d-10) cycle - List_all_j1b1s_cent(1,i) = List_all_j1b1s_cent(1,i) / List_all_j1b1s_expo(i) - List_all_j1b1s_cent(2,i) = List_all_j1b1s_cent(2,i) / List_all_j1b1s_expo(i) - List_all_j1b1s_cent(3,i) = List_all_j1b1s_cent(3,i) / List_all_j1b1s_expo(i) + List_all_comb_b2_cent(1,i) = List_all_comb_b2_cent(1,i) / List_all_comb_b2_expo(i) + List_all_comb_b2_cent(2,i) = List_all_comb_b2_cent(2,i) / List_all_comb_b2_expo(i) + List_all_comb_b2_cent(3,i) = List_all_comb_b2_cent(3,i) / List_all_comb_b2_expo(i) enddo ! --- - do i = 1, List_all_comb_size + do i = 1, List_all_comb_b2_size do j = 2, nucl_num, 1 - tmp_alphaj = dble(List_all_comb(j,i)) * j1b_pen(j) + tmp_alphaj = dble(List_all_comb_b2(j,i)) * j1b_pen(j) do k = 1, j-1, 1 - tmp_alphak = dble(List_all_comb(k,i)) * j1b_pen(k) + tmp_alphak = dble(List_all_comb_b2(k,i)) * j1b_pen(k) - List_all_j1b1s_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & - + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & - + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) + List_all_comb_b2_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & + + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & + + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) enddo enddo - if(List_all_j1b1s_expo(i) .lt. 1d-10) cycle + if(List_all_comb_b2_expo(i) .lt. 1d-10) cycle - List_all_j1b1s_coef(i) = List_all_j1b1s_coef(i) / List_all_j1b1s_expo(i) + List_all_comb_b2_coef(i) = List_all_comb_b2_coef(i) / List_all_comb_b2_expo(i) enddo ! --- - do i = 1, List_all_comb_size + do i = 1, List_all_comb_b2_size phase = 0 do j = 1, nucl_num - phase += List_all_comb(j,i) + phase += List_all_comb_b2(j,i) enddo - List_all_j1b1s_coef(i) = (-1.d0)**dble(phase) * dexp(-List_all_j1b1s_coef(i)) + List_all_comb_b2_coef(i) = (-1.d0)**dble(phase) * dexp(-List_all_comb_b2_coef(i)) enddo END_PROVIDER @@ -129,8 +129,8 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, int_mu, int_coulomb, tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_size, final_grid_points, & - !$OMP List_all_j1b1s_coef, List_all_j1b1s_expo, List_all_j1b1s_cent, & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, final_grid_points, & + !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, List_all_comb_b2_cent, & !$OMP v_ij_erf_rk_cst_mu_j1b, mu_erf) allocate( tmp(ao_num,ao_num,n_points_final_grid) ) @@ -144,13 +144,13 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) - do i_1s = 1, List_all_comb_size + do i_1s = 1, List_all_comb_b2_size - coef = List_all_j1b1s_coef (i_1s) - beta = List_all_j1b1s_expo (i_1s) - B_center(1) = List_all_j1b1s_cent(1,i_1s) - B_center(2) = List_all_j1b1s_cent(2,i_1s) - B_center(3) = List_all_j1b1s_cent(3,i_1s) + coef = List_all_comb_b2_coef (i_1s) + beta = List_all_comb_b2_expo (i_1s) + B_center(1) = List_all_comb_b2_cent(1,i_1s) + B_center(2) = List_all_comb_b2_cent(2,i_1s) + B_center(3) = List_all_comb_b2_cent(3,i_1s) int_mu = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r) int_coulomb = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r) @@ -238,8 +238,8 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_ !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, ints, ints_coulomb, tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_size, final_grid_points, & - !$OMP List_all_j1b1s_coef, List_all_j1b1s_expo, List_all_j1b1s_cent, & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, final_grid_points,& + !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, List_all_comb_b2_cent, & !$OMP x_v_ij_erf_rk_cst_mu_tmp_j1b, mu_erf) allocate( tmp(3,ao_num,ao_num,n_points_final_grid) ) @@ -253,13 +253,13 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_ r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) - do i_1s = 1, List_all_comb_size + do i_1s = 1, List_all_comb_b2_size - coef = List_all_j1b1s_coef (i_1s) - beta = List_all_j1b1s_expo (i_1s) - B_center(1) = List_all_j1b1s_cent(1,i_1s) - B_center(2) = List_all_j1b1s_cent(2,i_1s) - B_center(3) = List_all_j1b1s_cent(3,i_1s) + coef = List_all_comb_b2_coef (i_1s) + beta = List_all_comb_b2_expo (i_1s) + B_center(1) = List_all_comb_b2_cent(1,i_1s) + B_center(2) = List_all_comb_b2_cent(2,i_1s) + B_center(3) = List_all_comb_b2_cent(3,i_1s) call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r, ints ) call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r, ints_coulomb) @@ -291,15 +291,15 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_ do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = 1, i-1 - x_v_ij_erf_rk_cst_mu_tmp(1,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(1,i,j,ipoint) - x_v_ij_erf_rk_cst_mu_tmp(2,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(2,i,j,ipoint) - x_v_ij_erf_rk_cst_mu_tmp(3,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(3,i,j,ipoint) + x_v_ij_erf_rk_cst_mu_tmp_j1b(1,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp_j1b(1,i,j,ipoint) + x_v_ij_erf_rk_cst_mu_tmp_j1b(2,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp_j1b(2,i,j,ipoint) + x_v_ij_erf_rk_cst_mu_tmp_j1b(3,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp_j1b(3,i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for x_v_ij_erf_rk_cst_mu_tmp', wall1 - wall0 + print*, ' wall time for x_v_ij_erf_rk_cst_mu_tmp_j1b', wall1 - wall0 END_PROVIDER @@ -330,11 +330,11 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_ !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & !$OMP coef_fit, expo_fit, int_fit, tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_size, & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, & !$OMP final_grid_points, n_max_fit_slat, & !$OMP expo_gauss_j_mu_x, coef_gauss_j_mu_x, & - !$OMP List_all_j1b1s_coef, List_all_j1b1s_expo, & - !$OMP List_all_j1b1s_cent, v_ij_u_cst_mu_j1b) + !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, & + !$OMP List_all_comb_b2_cent, v_ij_u_cst_mu_j1b) allocate( tmp(ao_num,ao_num,n_points_final_grid) ) tmp = 0.d0 @@ -347,13 +347,13 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_ r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) - do i_1s = 1, List_all_comb_size + do i_1s = 1, List_all_comb_b2_size - coef = List_all_j1b1s_coef (i_1s) - beta = List_all_j1b1s_expo (i_1s) - B_center(1) = List_all_j1b1s_cent(1,i_1s) - B_center(2) = List_all_j1b1s_cent(2,i_1s) - B_center(3) = List_all_j1b1s_cent(3,i_1s) + coef = List_all_comb_b2_coef (i_1s) + beta = List_all_comb_b2_expo (i_1s) + B_center(1) = List_all_comb_b2_cent(1,i_1s) + B_center(2) = List_all_comb_b2_cent(2,i_1s) + B_center(3) = List_all_comb_b2_cent(3,i_1s) do i_fit = 1, n_max_fit_slat diff --git a/src/ao_many_one_e_ints/grad_related_ints.irp.f b/src/ao_many_one_e_ints/grad_related_ints.irp.f index 13fb1fc8..7b183d83 100644 --- a/src/ao_many_one_e_ints/grad_related_ints.irp.f +++ b/src/ao_many_one_e_ints/grad_related_ints.irp.f @@ -134,7 +134,7 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp, (3, ao_num, ao_num, call NAI_pol_x_mult_erf_ao(i, j, 1.d+9 , r, ints_coulomb) do m = 1, 3 - x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) = (ints(m) - ints_coulomb(m)) + x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) = (ints(m) - ints_coulomb(m)) enddo enddo enddo @@ -153,7 +153,7 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp, (3, ao_num, ao_num, enddo call wall_time(wall1) - print*,'wall time for x_v_ij_erf_rk_cst_mu_tmp',wall1 - wall0 + print*, ' wall time for x_v_ij_erf_rk_cst_mu_tmp', wall1 - wall0 END_PROVIDER diff --git a/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f new file mode 100644 index 00000000..54026349 --- /dev/null +++ b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f @@ -0,0 +1,93 @@ + +! -- + +program debug_integ_jmu_modif + + implicit none + + my_grid_becke = .True. + !my_n_pt_r_grid = 30 + !my_n_pt_a_grid = 50 + my_n_pt_r_grid = 100 + my_n_pt_a_grid = 170 + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + PROVIDE mu_erf j1b_pen + + call test_grad_1_u_ij_mu() + +end + +! --- + +subroutine test_grad_1_u_ij_mu() + + implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num + double precision, external :: num_grad_1_u_ij_mu_x + double precision, external :: num_grad_1_u_ij_mu_y + double precision, external :: num_grad_1_u_ij_mu_z + + print*, ' test_grad_1_u_ij_mu ...' + + PROVIDE grad_1_u_ij_mu +! PROVIDE num_grad_1_u_ij_mu + + eps_ij = 1d-6 + acc_tot = 0.d0 + + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + + i_exc = grad_1_u_ij_mu(i,j,ipoint,1) + !i_num = num_grad_1_u_ij_mu(i,j,ipoint,1) + i_num = num_grad_1_u_ij_mu_x(i, j, ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in x part of grad_1_u_ij_mu on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + + i_exc = grad_1_u_ij_mu(i,j,ipoint,2) + !i_num = num_grad_1_u_ij_mu(i,j,ipoint,2) + i_num = num_grad_1_u_ij_mu_y(i, j, ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in y part of grad_1_u_ij_mu on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + + i_exc = grad_1_u_ij_mu(i,j,ipoint,3) + !i_num = num_grad_1_u_ij_mu(i,j,ipoint,3) + i_num = num_grad_1_u_ij_mu_z(i, j, ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in y part of grad_1_u_ij_mu on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + + enddo + enddo + enddo + + acc_tot = acc_tot / dble(ao_num*ao_num*n_points_final_grid) + print*, ' normalized acc = ', acc_tot + + return +end subroutine test_grad_1_u_ij_mu + +! --- + + + diff --git a/src/non_h_ints_mu/grad_squared.irp.f b/src/non_h_ints_mu/grad_squared.irp.f index a88521a1..d02edb12 100644 --- a/src/non_h_ints_mu/grad_squared.irp.f +++ b/src/non_h_ints_mu/grad_squared.irp.f @@ -1,72 +1,112 @@ - BEGIN_PROVIDER [ double precision, grad_1_squared_u_ij_mu, ( ao_num, ao_num,n_points_final_grid)] - implicit none - integer :: ipoint,i,j,m,igauss - BEGIN_DOC - ! grad_1_squared_u_ij_mu(j,i,ipoint) = -1/2 \int dr2 phi_j(r2) phi_i(r2) |\grad_r1 u(r1,r2,\mu)|^2 - ! |\grad_r1 u(r1,r2,\mu)|^2 = 1/4 * (1 - erf(mu*r12))^2 - ! ! (1 - erf(mu*r12))^2 = \sum_i coef_gauss_1_erf_x_2(i) * exp(-expo_gauss_1_erf_x_2(i) * r12^2) - END_DOC - double precision :: r(3),delta,coef - double precision :: overlap_gauss_r12_ao,time0,time1 - print*,'providing grad_1_squared_u_ij_mu ...' - call wall_time(time0) - !TODO : strong optmization : write the loops in a different way - ! : for each couple of AO, the gaussian product are done once for all - do ipoint = 1, n_points_final_grid - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - do j = 1, ao_num - do i = 1, ao_num - ! \int dr2 phi_j(r2) phi_i(r2) (1 - erf(mu*r12))^2 - ! = \sum_i coef_gauss_1_erf_x_2(i) \int dr2 phi_j(r2) phi_i(r2) exp(-expo_gauss_1_erf_x_2(i) * (r_1 - r_2)^2) - do igauss = 1, n_max_fit_slat - delta = expo_gauss_1_erf_x_2(igauss) - coef = coef_gauss_1_erf_x_2(igauss) - grad_1_squared_u_ij_mu(j,i,ipoint) += -0.25 * coef * overlap_gauss_r12_ao(r,delta,i,j) - enddo - enddo - enddo - enddo - call wall_time(time1) - print*,'Wall time for grad_1_squared_u_ij_mu = ',time1 - time0 - END_PROVIDER -BEGIN_PROVIDER [double precision, tc_grad_square_ao, (ao_num, ao_num, ao_num, ao_num)] - implicit none - BEGIN_DOC - ! tc_grad_square_ao(k,i,l,j) = -1/2 - ! - END_DOC - integer :: ipoint,i,j,k,l - double precision :: contrib,weight1 - double precision, allocatable :: ac_mat(:,:,:,:) - allocate(ac_mat(ao_num, ao_num, ao_num, ao_num)) - ac_mat = 0.d0 - do ipoint = 1, n_points_final_grid - weight1 = final_weight_at_r_vector(ipoint) - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - contrib = weight1 *0.5D0* (aos_in_r_array_transp(ipoint,k) * aos_in_r_array_transp(ipoint,i)) - ! \int dr1 phi_k(r1) phi_i(r1) . \int dr2 |\grad_1 u(r1,r2)|^2 \phi_l(r2) \phi_j(r2) - ac_mat(k,i,l,j) += grad_1_squared_u_ij_mu(l,j,ipoint) * contrib +! --- + +! TODO : strong optmization : write the loops in a different way +! : for each couple of AO, the gaussian product are done once for all + +BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num,n_points_final_grid)] + + BEGIN_DOC + ! + ! -1/2 [ (grad_1 u)^2 + (grad_2 u^2)] = - 1/4 * (1 - erf(mu*r12))^2 + ! + ! and + ! (1 - erf(mu*r12))^2 = \sum_i coef_gauss_1_erf_x_2(i) * exp(-expo_gauss_1_erf_x_2(i) * r12^2) + ! + END_DOC + + implicit none + integer :: ipoint, i, j, m, igauss + double precision :: r(3), delta, coef + double precision :: time0, time1 + double precision, external :: overlap_gauss_r12_ao + + print*, ' providing gradu_squared_u_ij_mu ...' + call wall_time(time0) + + PROVIDE j1b_type j1b_pen + + if(j1b_type .eq. 3) then + + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + gradu_squared_u_ij_mu(j,i,ipoint) += fact3_j12(ipoint) * int2_grad1u_grad2u_j1b(i,j,ipoint) + enddo enddo - enddo enddo - enddo - enddo - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - tc_grad_square_ao(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + else + + do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do j = 1, ao_num + do i = 1, ao_num + do igauss = 1, n_max_fit_slat + delta = expo_gauss_1_erf_x_2(igauss) + coef = coef_gauss_1_erf_x_2(igauss) + gradu_squared_u_ij_mu(j,i,ipoint) += -0.25d0 * coef * overlap_gauss_r12_ao(r, delta, i, j) + enddo + enddo + enddo enddo - enddo - enddo - enddo + + endif + + call wall_time(time1) + print*, ' Wall time for gradu_squared_u_ij_mu = ', time1 - time0 END_PROVIDER +! --- + +BEGIN_PROVIDER [double precision, tc_grad_square_ao, (ao_num, ao_num, ao_num, ao_num)] + + BEGIN_DOC + ! + ! tc_grad_square_ao(k,i,l,j) = -1/2 + ! + END_DOC + + implicit none + integer :: ipoint, i, j, k, l + double precision :: contrib, weight1 + double precision, allocatable :: ac_mat(:,:,:,:) + + allocate(ac_mat(ao_num,ao_num,ao_num,ao_num)) + ac_mat = 0.d0 + + do ipoint = 1, n_points_final_grid + weight1 = final_weight_at_r_vector(ipoint) + + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + contrib = weight1 * 0.5d0 * (aos_in_r_array_transp(ipoint,k) * aos_in_r_array_transp(ipoint,i)) + ! \int dr1 phi_k(r1) phi_i(r1) . \int dr2 |\grad_1 u(r1,r2)|^2 \phi_l(r2) \phi_j(r2) + ac_mat(k,i,l,j) += gradu_squared_u_ij_mu(l,j,ipoint) * contrib + enddo + enddo + enddo + enddo + enddo + + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + tc_grad_square_ao(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + enddo + enddo + enddo + enddo + + deallocate(ac_mat) + +END_PROVIDER + +! --- + diff --git a/src/non_h_ints_mu/jmu_modif.irp.f b/src/non_h_ints_mu/jmu_modif.irp.f new file mode 100644 index 00000000..59a4a104 --- /dev/null +++ b/src/non_h_ints_mu/jmu_modif.irp.f @@ -0,0 +1,266 @@ + +! --- + +double precision function jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision, external :: j12_mu, j12_nucl + + jmu_modif = j12_mu(r1, r2) * j12_nucl(r1, r2) + + return +end function jmu_modif + +! --- + +double precision function j12_mu(r1, r2) + + include 'constants.include.F' + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: mu_r12, r12 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + mu_r12 = mu_erf * r12 + + j12_mu = 0.5d0 * r12 * (1.d0 - derf(mu_r12)) - inv_sq_pi_2 * dexp(-mu_r12*mu_r12) / mu_erf + + return +end function j12_mu + +! --- + +double precision function j12_nucl(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + integer :: i, j + double precision :: a1, d1, e1, a2, d2, e2 + + j12_nucl = 1.d0 + do i = 1, nucl_num + a1 = j1b_pen(i) + d1 = ( (r1(1) - nucl_coord(i,1)) * (r1(1) - nucl_coord(i,1)) & + + (r1(2) - nucl_coord(i,2)) * (r1(2) - nucl_coord(i,2)) & + + (r1(3) - nucl_coord(i,3)) * (r1(3) - nucl_coord(i,3)) ) + e1 = 1.d0 - exp(-a1*d1) + + do j = 1, nucl_num + a2 = j1b_pen(j) + d2 = ( (r2(1) - nucl_coord(j,1)) * (r2(1) - nucl_coord(j,1)) & + + (r2(2) - nucl_coord(j,2)) * (r2(2) - nucl_coord(j,2)) & + + (r2(3) - nucl_coord(j,3)) * (r2(3) - nucl_coord(j,3)) ) + e2 = 1.d0 - exp(-a2*d2) + + j12_nucl = j12_nucl * e1 * e2 + enddo + enddo + + return +end function j12_nucl + +! --- + +! --------------------------------------------------------------------------------------- + +double precision function grad1_x_jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: jmu_modif + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(1))) + + r1_eps(1) = r1_eps(1) + delta + fp = jmu_modif(r1_eps, r2) + r1_eps(1) = r1_eps(1) - 2.d0 * delta + fm = jmu_modif(r1_eps, r2) + + grad1_x_jmu_modif = 0.5d0 * (fp - fm) / delta + + return +end function grad1_x_jmu_modif + +double precision function grad1_y_jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: jmu_modif + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(2))) + + r1_eps(2) = r1_eps(2) + delta + fp = jmu_modif(r1_eps, r2) + r1_eps(2) = r1_eps(2) - 2.d0 * delta + fm = jmu_modif(r1_eps, r2) + + grad1_y_jmu_modif = 0.5d0 * (fp - fm) / delta + + return +end function grad1_y_jmu_modif + +double precision function grad1_z_jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: jmu_modif + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(3))) + + r1_eps(3) = r1_eps(3) + delta + fp = jmu_modif(r1_eps, r2) + r1_eps(3) = r1_eps(3) - 2.d0 * delta + fm = jmu_modif(r1_eps, r2) + + grad1_z_jmu_modif = 0.5d0 * (fp - fm) / delta + + return +end function grad1_z_jmu_modif + +! --------------------------------------------------------------------------------------- + +! --- + +! --------------------------------------------------------------------------------------- + +double precision function grad1_x_j12_mu_num(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: j12_mu + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(1))) + + r1_eps(1) = r1_eps(1) + delta + fp = j12_mu(r1_eps, r2) + r1_eps(1) = r1_eps(1) - 2.d0 * delta + fm = j12_mu(r1_eps, r2) + + grad1_x_j12_mu_num = 0.5d0 * (fp - fm) / delta + + return +end function grad1_x_j12_mu_num + +double precision function grad1_y_j12_mu_num(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: j12_mu + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(2))) + + r1_eps(2) = r1_eps(2) + delta + fp = j12_mu(r1_eps, r2) + r1_eps(2) = r1_eps(2) - 2.d0 * delta + fm = j12_mu(r1_eps, r2) + + grad1_y_j12_mu_num = 0.5d0 * (fp - fm) / delta + + return +end function grad1_y_j12_mu_num + +double precision function grad1_z_j12_mu_num(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: j12_mu + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(3))) + + r1_eps(3) = r1_eps(3) + delta + fp = j12_mu(r1_eps, r2) + r1_eps(3) = r1_eps(3) - 2.d0 * delta + fm = j12_mu(r1_eps, r2) + + grad1_z_j12_mu_num = 0.5d0 * (fp - fm) / delta + + return +end function grad1_z_j12_mu_num + +! --------------------------------------------------------------------------------------- + +! --- + +! --------------------------------------------------------------------------------------- + +double precision function grad1_x_j12_mu_exc(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r12 + + grad1_x_j12_mu_exc = 0.d0 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + if(r12 .lt. 1d-10) return + + grad1_x_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(1) - r2(1)) / r12 + + return +end function grad1_x_j12_mu_exc + +double precision function grad1_y_j12_mu_exc(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r12 + + grad1_y_j12_mu_exc = 0.d0 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + if(r12 .lt. 1d-10) return + + grad1_y_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(2) - r2(2)) / r12 + + return +end function grad1_y_j12_mu_exc + +double precision function grad1_z_j12_mu_exc(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r12 + + grad1_z_j12_mu_exc = 0.d0 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + if(r12 .lt. 1d-10) return + + grad1_z_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(3) - r2(3)) / r12 + + return +end function grad1_z_j12_mu_exc + +! --------------------------------------------------------------------------------------- + +! --- + + diff --git a/src/non_h_ints_mu/new_grad_tc.irp.f b/src/non_h_ints_mu/new_grad_tc.irp.f index 8f4883eb..26ed642c 100644 --- a/src/non_h_ints_mu/new_grad_tc.irp.f +++ b/src/non_h_ints_mu/new_grad_tc.irp.f @@ -1,7 +1,85 @@ ! --- -BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num,n_points_final_grid, 3)] + BEGIN_PROVIDER [ double precision, fact1_j12, ( n_points_final_grid)] +&BEGIN_PROVIDER [ double precision, fact2_j12, (3, n_points_final_grid)] +&BEGIN_PROVIDER [ double precision, fact3_j12, ( n_points_final_grid)] + + implicit none + integer :: ipoint, i, j, phase + double precision :: x, y, z, dx, dy, dz + double precision :: a, d, e, fact_r, fact_r_sq + double precision :: fact_x, fact_y, fact_z + double precision :: ax_der, ay_der, az_der, a_expo + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + ! --- + + fact_r = 1.d0 + fact_r_sq = 1.d0 + do j = 1, nucl_num + a = j1b_pen(j) + dx = x - nucl_coord(j,1) + dy = y - nucl_coord(j,2) + dz = z - nucl_coord(j,3) + d = x*x + y*y + z*z + e = 1.d0 - dexp(-a*d) + + fact_r = fact_r * e + fact_r_sq = fact_r_sq * e * e + enddo + fact1_j12(ipoint) = fact_r + fact3_j12(ipoint) = fact_r_sq + + ! --- + + fact_x = 0.d0 + fact_y = 0.d0 + fact_z = 0.d0 + do i = 1, List_all_comb_b2_size + + phase = 0 + a_expo = 0.d0 + ax_der = 0.d0 + ay_der = 0.d0 + az_der = 0.d0 + do j = 1, nucl_num + a = dble(List_all_comb_b2(j,i)) * j1b_pen(j) + dx = x - nucl_coord(j,1) + dy = y - nucl_coord(j,2) + dz = z - nucl_coord(j,3) + + phase += List_all_comb_b2(j,i) + a_expo += a * (dx*dx + dy*dy + dz*dz) + ax_der += a * dx + ay_der += a * dy + az_der += a * dz + enddo + e = -2.d0 * (-1.d0)**dble(phase) * dexp(-a_expo) + + fact_x += e * ax_der + fact_y += e * ay_der + fact_z += e * az_der + enddo + + fact2_j12(1,ipoint) = fact_x + fact2_j12(2,ipoint) = fact_y + fact2_j12(3,ipoint) = fact_z + + ! --- + + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num, n_points_final_grid, 3)] BEGIN_DOC ! @@ -13,45 +91,32 @@ BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num,n_points_fina END_DOC implicit none - integer :: ipoint, i, j, i_1s - double precision :: r(3) - double precision :: a, d, e, tmp1, tmp2, fact_r, fact_xyz(3) + integer :: ipoint, i, j + double precision :: x, y, z, tmp_x, tmp_y, tmp_z, tmp0, tmp1, tmp2 PROVIDE j1b_type j1b_pen if(j1b_type .eq. 3) then do ipoint = 1, n_points_final_grid - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + tmp0 = fact1_j12(ipoint) + tmp_x = fact2_j12(1,ipoint) + tmp_y = fact2_j12(2,ipoint) + tmp_z = fact2_j12(3,ipoint) - fact_r = 1.d0 - fact_xyz(1) = 1.d0 - fact_xyz(2) = 1.d0 - fact_xyz(3) = 1.d0 - do i_1s = 1, nucl_num - a = j1b_pen(i_1s) - d = (r(1) - nucl_coord(i_1s,1)) * (r(1) - nucl_coord(i_1s,1)) & - + (r(2) - nucl_coord(i_1s,2)) * (r(2) - nucl_coord(i_1s,2)) & - + (r(3) - nucl_coord(i_1s,3)) * (r(3) - nucl_coord(i_1s,3)) - e = dexp(-a*d) - - fact_r = fact_r * (1.d0 - e) - fact_xyz(1) = fact_xyz(1) * (2.d0 * a * (r(1) - nucl_coord(i_1s,1)) * e) - fact_xyz(2) = fact_xyz(2) * (2.d0 * a * (r(2) - nucl_coord(i_1s,2)) * e) - fact_xyz(3) = fact_xyz(3) * (2.d0 * a * (r(3) - nucl_coord(i_1s,3)) * e) - enddo - do j = 1, ao_num do i = 1, ao_num - tmp1 = fact_r * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) - tmp2 = v_ij_u_cst_mu_j1b (i,j,ipoint) + tmp1 = tmp0 * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) + tmp2 = v_ij_u_cst_mu_j1b(i,j,ipoint) - grad_1_u_ij_mu(i,j,ipoint,1) = tmp1 * r(1) - fact_r * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) + fact_xyz(1) * tmp2 - grad_1_u_ij_mu(i,j,ipoint,2) = tmp1 * r(2) - fact_r * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) + fact_xyz(2) * tmp2 - grad_1_u_ij_mu(i,j,ipoint,3) = tmp1 * r(3) - fact_r * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + fact_xyz(3) * tmp2 + grad_1_u_ij_mu(i,j,ipoint,1) = tmp1 * x - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) + tmp_x * tmp2 + grad_1_u_ij_mu(i,j,ipoint,2) = tmp1 * y - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) + tmp_y * tmp2 + grad_1_u_ij_mu(i,j,ipoint,3) = tmp1 * z - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + tmp_z * tmp2 enddo enddo enddo @@ -59,14 +124,14 @@ BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num,n_points_fina else do ipoint = 1, n_points_final_grid - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) do j = 1, ao_num do i = 1, ao_num - grad_1_u_ij_mu(i,j,ipoint,1) = v_ij_erf_rk_cst_mu(i,j,ipoint) * r(1) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,1) - grad_1_u_ij_mu(i,j,ipoint,2) = v_ij_erf_rk_cst_mu(i,j,ipoint) * r(2) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,2) - grad_1_u_ij_mu(i,j,ipoint,3) = v_ij_erf_rk_cst_mu(i,j,ipoint) * r(3) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,3) + grad_1_u_ij_mu(i,j,ipoint,1) = v_ij_erf_rk_cst_mu(i,j,ipoint) * x - x_v_ij_erf_rk_cst_mu(i,j,ipoint,1) + grad_1_u_ij_mu(i,j,ipoint,2) = v_ij_erf_rk_cst_mu(i,j,ipoint) * y - x_v_ij_erf_rk_cst_mu(i,j,ipoint,2) + grad_1_u_ij_mu(i,j,ipoint,3) = v_ij_erf_rk_cst_mu(i,j,ipoint) * z - x_v_ij_erf_rk_cst_mu(i,j,ipoint,3) enddo enddo enddo diff --git a/src/non_h_ints_mu/numerical_integ.irp.f b/src/non_h_ints_mu/numerical_integ.irp.f new file mode 100644 index 00000000..abac1874 --- /dev/null +++ b/src/non_h_ints_mu/numerical_integ.irp.f @@ -0,0 +1,135 @@ + +! --- + +! +! \int dr2 [-1 * \grad_r1 u(r1,r2)] \phi_i(r2) \phi_j(r2) x 1s_j1b(r2) +! +BEGIN_PROVIDER [ double precision, num_grad_1_u_ij_mu, (ao_num, ao_num, n_points_final_grid, 3)] + + implicit none + + integer :: i, j, ipoint, jpoint + double precision :: tmp, r1(3), r2(3) + + double precision, external :: ao_value + double precision, external :: j12_nucl + double precision, external :: grad1_x_j12_mu_num, grad1_x_j12_mu_exc + double precision, external :: grad1_y_j12_mu_num, grad1_y_j12_mu_exc + double precision, external :: grad1_z_j12_mu_num, grad1_z_j12_mu_exc + + num_grad_1_u_ij_mu = 0.d0 + + do j = 1, ao_num + do i = 1, ao_num + + do ipoint = 1, n_points_final_grid + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) + + num_grad_1_u_ij_mu(i,j,ipoint,1) += tmp * (-1.d0 * grad1_x_j12_mu_exc(r1, r2)) + num_grad_1_u_ij_mu(i,j,ipoint,2) += tmp * (-1.d0 * grad1_y_j12_mu_exc(r1, r2)) + num_grad_1_u_ij_mu(i,j,ipoint,3) += tmp * (-1.d0 * grad1_z_j12_mu_exc(r1, r2)) + enddo + + enddo + enddo + enddo + +END_PROVIDER + +! --- + +double precision function num_grad_1_u_ij_mu_x(i, j, ipoint) + + implicit none + integer, intent(in) :: i, j, ipoint + integer :: jpoint + double precision :: tmp, r1(3), r2(3) + double precision, external :: ao_value + double precision, external :: j12_nucl + double precision, external :: grad1_x_j12_mu_num, grad1_x_j12_mu_exc + + num_grad_1_u_ij_mu_x = 0.d0 + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) + + num_grad_1_u_ij_mu_x += tmp * (-1.d0 * grad1_x_j12_mu_exc(r1, r2)) + enddo + +end function num_grad_1_u_ij_mu_x + +! --- + +double precision function num_grad_1_u_ij_mu_y(i, j, ipoint) + + implicit none + integer, intent(in) :: i, j, ipoint + integer :: jpoint + double precision :: tmp, r1(3), r2(3) + double precision, external :: ao_value + double precision, external :: j12_nucl + double precision, external :: grad1_y_j12_mu_num, grad1_y_j12_mu_exc + + num_grad_1_u_ij_mu_y = 0.d0 + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) + + num_grad_1_u_ij_mu_y += tmp * (-1.d0 * grad1_y_j12_mu_exc(r1, r2)) + enddo + +end function num_grad_1_u_ij_mu_y + +! --- + +double precision function num_grad_1_u_ij_mu_z(i, j, ipoint) + + implicit none + integer, intent(in) :: i, j, ipoint + integer :: jpoint + double precision :: tmp, r1(3), r2(3) + double precision, external :: ao_value + double precision, external :: j12_nucl + double precision, external :: grad1_z_j12_mu_num, grad1_z_j12_mu_exc + + num_grad_1_u_ij_mu_z = 0.d0 + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) + + num_grad_1_u_ij_mu_z += tmp * (-1.d0 * grad1_z_j12_mu_exc(r1, r2)) + enddo + +end function num_grad_1_u_ij_mu_z + +! --- +