From e288c40d8b117b17b87db4f75e1aff902a7fa0c7 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Tue, 12 Sep 2023 16:52:58 +0200 Subject: [PATCH] NO: L1 & L2 added and tested --- src/bi_ort_ints/three_body_ints_bi_ort.irp.f | 54 +++ src/tc_bi_ortho/no_dressing_naive.irp.f | 450 ++++++++++++++++++- src/tc_bi_ortho/no_dressing_v0.irp.f | 277 +++++++++++- src/tc_bi_ortho/test_tc_bi_ortho.irp.f | 89 +++- 4 files changed, 834 insertions(+), 36 deletions(-) diff --git a/src/bi_ort_ints/three_body_ints_bi_ort.irp.f b/src/bi_ort_ints/three_body_ints_bi_ort.irp.f index d8145c3e..cb5c08cf 100644 --- a/src/bi_ort_ints/three_body_ints_bi_ort.irp.f +++ b/src/bi_ort_ints/three_body_ints_bi_ort.irp.f @@ -68,6 +68,60 @@ END_PROVIDER ! --- +subroutine give_integrals_3_body_bi_ort_spin( n, sigma_n, l, sigma_l, k, sigma_k & + , m, sigma_m, j, sigma_j, i, sigma_i & + , integral) + + BEGIN_DOC + ! + ! < n l k | -L | m j i > with a BI-ORTHONORMAL SPIN-ORBITALS + ! + END_DOC + + implicit none + integer, intent(in) :: n, l, k, m, j, i + double precision, intent(in) :: sigma_n, sigma_l, sigma_k, sigma_m, sigma_j, sigma_i + double precision, intent(out) :: integral + integer :: ipoint + double precision :: weight, tmp + logical, external :: is_same_spin + + integral = 0.d0 + + if( is_same_spin(sigma_n, sigma_m) .and. & + is_same_spin(sigma_l, sigma_j) .and. & + is_same_spin(sigma_k, sigma_i) ) then + + PROVIDE mo_l_coef mo_r_coef + PROVIDE int2_grad1_u12_bimo_t + + do ipoint = 1, n_points_final_grid + + tmp = mos_l_in_r_array_transp(ipoint,k) * mos_r_in_r_array_transp(ipoint,i) & + * ( int2_grad1_u12_bimo_t(ipoint,1,n,m) * int2_grad1_u12_bimo_t(ipoint,1,l,j) & + + int2_grad1_u12_bimo_t(ipoint,2,n,m) * int2_grad1_u12_bimo_t(ipoint,2,l,j) & + + int2_grad1_u12_bimo_t(ipoint,3,n,m) * int2_grad1_u12_bimo_t(ipoint,3,l,j) ) + + tmp = tmp + mos_l_in_r_array_transp(ipoint,l) * mos_r_in_r_array_transp(ipoint,j) & + * ( int2_grad1_u12_bimo_t(ipoint,1,n,m) * int2_grad1_u12_bimo_t(ipoint,1,k,i) & + + int2_grad1_u12_bimo_t(ipoint,2,n,m) * int2_grad1_u12_bimo_t(ipoint,2,k,i) & + + int2_grad1_u12_bimo_t(ipoint,3,n,m) * int2_grad1_u12_bimo_t(ipoint,3,k,i) ) + + tmp = tmp + mos_l_in_r_array_transp(ipoint,n) * mos_r_in_r_array_transp(ipoint,m) & + * ( int2_grad1_u12_bimo_t(ipoint,1,l,j) * int2_grad1_u12_bimo_t(ipoint,1,k,i) & + + int2_grad1_u12_bimo_t(ipoint,2,l,j) * int2_grad1_u12_bimo_t(ipoint,2,k,i) & + + int2_grad1_u12_bimo_t(ipoint,3,l,j) * int2_grad1_u12_bimo_t(ipoint,3,k,i) ) + + integral = integral + tmp * final_weight_at_r_vector(ipoint) + enddo + + endif + + return +end subroutine give_integrals_3_body_bi_ort_spin + +! --- + subroutine give_integrals_3_body_bi_ort(n, l, k, m, j, i, integral) BEGIN_DOC diff --git a/src/tc_bi_ortho/no_dressing_naive.irp.f b/src/tc_bi_ortho/no_dressing_naive.irp.f index 10c80cec..a57b1723 100644 --- a/src/tc_bi_ortho/no_dressing_naive.irp.f +++ b/src/tc_bi_ortho/no_dressing_naive.irp.f @@ -4,19 +4,26 @@ BEGIN_PROVIDER [double precision, no_0_naive] implicit none - integer :: ii, jj, kk - integer :: i, j, k - double precision :: sigma_i, sigma_j, sigma_k - double precision :: tmp - double precision :: I_ijk_ijk, I_ijk_kij, I_ijk_jki, I_ijk_jik, I_ijk_kji, I_ijk_ikj - double precision :: t0, t1 - logical, external :: is_same_spin + integer :: ii, jj, kk + integer :: i, j, k + double precision :: sigma_i, sigma_j, sigma_k + double precision :: I_ijk_ijk, I_ijk_kij, I_ijk_jki, I_ijk_jik, I_ijk_kji, I_ijk_ikj + double precision :: t0, t1 + double precision, allocatable :: tmp(:) print*, " Providing no_0_naive ..." call wall_time(t0) + allocate(tmp(elec_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ii, i, sigma_i, jj, j, sigma_j, kk, k, sigma_k, & + !$OMP I_ijk_ijk, I_ijk_kij, I_ijk_jki, I_ijk_jik, & + !$OMP I_ijk_kji, I_ijk_ikj) & + !$OMP SHARED (elec_beta_num, elec_num, tmp) + !$OMP DO - tmp = 0.d0 do ii = 1, elec_num if(ii .le. elec_beta_num) then @@ -27,6 +34,8 @@ BEGIN_PROVIDER [double precision, no_0_naive] sigma_i = +1.d0 endif + tmp(ii) = 0.d0 + do jj = 1, elec_num if(jj .le. elec_beta_num) then @@ -72,16 +81,20 @@ BEGIN_PROVIDER [double precision, no_0_naive] , I_ijk_ikj) - tmp = tmp + I_ijk_ijk + I_ijk_kij + I_ijk_jki - I_ijk_jik - I_ijk_kji - I_ijk_ikj - !tmp = tmp + I_ijk_ijk + 2.d0 * I_ijk_kij - 3.d0 * I_ijk_jik + tmp(ii) = tmp(ii) + I_ijk_ijk + I_ijk_kij + I_ijk_jki - I_ijk_jik - I_ijk_kji - I_ijk_ikj + ! = tmp(ii) + I_ijk_ijk + 2.d0 * I_ijk_kij - 3.d0 * I_ijk_jik enddo enddo enddo + !$OMP END DO + !$OMP END PARALLEL - no_0_naive = -1.d0 * (-tmp) / 6.d0 + no_0_naive = -1.d0 * (-sum(tmp)) / 6.d0 + + deallocate(tmp) call wall_time(t1) - print*, " Wall time for no_0_naive (sec) = ", (t1 - t0)/60.d0 + print*, " Wall time for no_0_naive (min) = ", (t1 - t0)/60.d0 print*, " no_0_naive = ", no_0_naive @@ -89,4 +102,417 @@ END_PROVIDER ! --- +BEGIN_PROVIDER [double precision, no_1_naive, (mo_num, mo_num)] + + BEGIN_DOC + ! + ! < p | H(1) | s > is dressed with no_1_naive(p,s) + ! + END_DOC + + implicit none + integer :: ii, jj + integer :: i, j, p, s + double precision :: sigma_i, sigma_j, sigma_p, sigma_s + double precision :: I_pij_sji, I_pij_sij, I_pij_jis, I_pij_ijs, I_pij_isj, I_pij_jsi + double precision :: t0, t1 + + print*, " Providing no_1_naive ..." + call wall_time(t0) + + ! ---- + ! up-up part + + sigma_p = +1.d0 + sigma_s = +1.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ii, i, sigma_i, jj, j, sigma_j, & + !$OMP I_pij_sji, I_pij_sij, I_pij_jis, & + !$OMP I_pij_ijs, I_pij_isj, I_pij_jsi ) & + !$OMP SHARED (mo_num, elec_beta_num, elec_num, & + !$OMP sigma_p, sigma_s, no_1_naive) + + !$OMP DO COLLAPSE (2) + + do s = 1, mo_num + do p = 1, mo_num + + no_1_naive(p,s) = 0.d0 + do ii = 1, elec_num + if(ii .le. elec_beta_num) then + i = ii + sigma_i = -1.d0 + else + i = ii - elec_beta_num + sigma_i = +1.d0 + endif + + do jj = 1, elec_num + if(jj .le. elec_beta_num) then + j = jj + sigma_j = -1.d0 + else + j = jj - elec_beta_num + sigma_j = +1d0 + endif + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , s, sigma_s, j, sigma_j, i, sigma_i & + , I_pij_sji) + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , s, sigma_s, i, sigma_i, j, sigma_j & + , I_pij_sij) + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , j, sigma_j, i, sigma_i, s, sigma_s & + , I_pij_jis) + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , i, sigma_i, j, sigma_j, s, sigma_s & + , I_pij_ijs) + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , i, sigma_i, s, sigma_s, j, sigma_j & + , I_pij_isj) + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , j, sigma_j, s, sigma_s, i, sigma_i & + , I_pij_jsi) + + ! x (-1) because integrals are over -L + ! x 0.5 because we consider 0.5 (up + down) + no_1_naive(p,s) = no_1_naive(p,s) + 0.25d0 * (I_pij_sji - I_pij_sij + I_pij_jis - I_pij_ijs + I_pij_isj - I_pij_jsi) + enddo ! j + enddo ! i + enddo ! s + enddo ! p + !$OMP END DO + !$OMP END PARALLEL + + + ! ---- + ! down-down part + + sigma_p = -1.d0 + sigma_s = -1.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ii, i, sigma_i, jj, j, sigma_j, & + !$OMP I_pij_sji, I_pij_sij, I_pij_jis, & + !$OMP I_pij_ijs, I_pij_isj, I_pij_jsi ) & + !$OMP SHARED (mo_num, elec_beta_num, elec_num, & + !$OMP sigma_p, sigma_s, no_1_naive) + + !$OMP DO COLLAPSE (2) + + do s = 1, mo_num + do p = 1, mo_num + + do ii = 1, elec_num + if(ii .le. elec_beta_num) then + i = ii + sigma_i = -1.d0 + else + i = ii - elec_beta_num + sigma_i = +1.d0 + endif + + do jj = 1, elec_num + if(jj .le. elec_beta_num) then + j = jj + sigma_j = -1.d0 + else + j = jj - elec_beta_num + sigma_j = +1d0 + endif + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , s, sigma_s, j, sigma_j, i, sigma_i & + , I_pij_sji) + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , s, sigma_s, i, sigma_i, j, sigma_j & + , I_pij_sij) + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , j, sigma_j, i, sigma_i, s, sigma_s & + , I_pij_jis) + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , i, sigma_i, j, sigma_j, s, sigma_s & + , I_pij_ijs) + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , i, sigma_i, s, sigma_s, j, sigma_j & + , I_pij_isj) + + call give_integrals_3_body_bi_ort_spin( p, sigma_p, i, sigma_i, j, sigma_j & + , j, sigma_j, s, sigma_s, i, sigma_i & + , I_pij_jsi) + + ! x (-1) because integrals are over -L + ! x 0.5 because we consider 0.5 (up + down) + no_1_naive(p,s) = no_1_naive(p,s) + 0.25d0 * (I_pij_sji - I_pij_sij + I_pij_jis - I_pij_ijs + I_pij_isj - I_pij_jsi) + enddo ! j + enddo ! i + enddo ! s + enddo ! p + !$OMP END DO + !$OMP END PARALLEL + + ! --- + + call wall_time(t1) + print*, " Wall time for no_1_naive (min) = ", (t1 - t0)/60.d0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, no_2_naive, (mo_num, mo_num, mo_num, mo_num)] + + BEGIN_DOC + ! + ! < p q | H(2) | s t > is dressed with no_2_naive(p,q,s,t) + ! + END_DOC + + implicit none + integer :: ii + integer :: i, p, q, s, t + double precision :: sigma_i, sigma_p, sigma_q, sigma_s, sigma_t + double precision :: I_ipq_ist, I_ipq_sit, I_ipq_tsi + double precision :: t0, t1 + + print*, " Providing no_2_naive ..." + call wall_time(t0) + + ! ---- + ! up-up & up-up part + + sigma_p = +1.d0 + sigma_s = +1.d0 + sigma_q = +1.d0 + sigma_t = +1.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ii, i, sigma_i, p, q, s, t, & + !$OMP I_ipq_ist, I_ipq_sit, I_ipq_tsi) & + !$OMP SHARED (mo_num, elec_beta_num, elec_num, & + !$OMP sigma_p, sigma_q, sigma_s, sigma_t, & + !$OMP no_2_naive) + + !$OMP DO COLLAPSE (4) + do t = 1, mo_num + do s = 1, mo_num + do q = 1, mo_num + do p = 1, mo_num + + no_2_naive(p,q,s,t) = 0.d0 + do ii = 1, elec_num + if(ii .le. elec_beta_num) then + i = ii + sigma_i = -1.d0 + else + i = ii - elec_beta_num + sigma_i = +1.d0 + endif + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , i, sigma_i, s, sigma_s, t, sigma_t & + , I_ipq_ist) + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , s, sigma_s, i, sigma_i, t, sigma_t & + , I_ipq_sit) + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , t, sigma_t, s, sigma_s, i, sigma_i & + , I_ipq_tsi) + + ! x (-1) because integrals are over -L + ! x 0.25 because we consider 0.25 (up-up + up-down + down-up + down-down) + no_2_naive(p,q,s,t) = no_2_naive(p,q,s,t) + 0.125d0 * (I_ipq_ist - I_ipq_sit - I_ipq_tsi) + enddo ! i + enddo ! p + enddo ! q + enddo ! s + enddo ! t + !$OMP END DO + !$OMP END PARALLEL + + ! ---- + ! up-up & down-down part + + sigma_p = +1.d0 + sigma_s = +1.d0 + sigma_q = -1.d0 + sigma_t = -1.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ii, i, sigma_i, p, q, s, t, & + !$OMP I_ipq_ist, I_ipq_sit, I_ipq_tsi) & + !$OMP SHARED (mo_num, elec_beta_num, elec_num, & + !$OMP sigma_p, sigma_q, sigma_s, sigma_t, & + !$OMP no_2_naive) + + !$OMP DO COLLAPSE (4) + do t = 1, mo_num + do s = 1, mo_num + do q = 1, mo_num + do p = 1, mo_num + + do ii = 1, elec_num + if(ii .le. elec_beta_num) then + i = ii + sigma_i = -1.d0 + else + i = ii - elec_beta_num + sigma_i = +1.d0 + endif + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , i, sigma_i, s, sigma_s, t, sigma_t & + , I_ipq_ist) + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , s, sigma_s, i, sigma_i, t, sigma_t & + , I_ipq_sit) + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , t, sigma_t, s, sigma_s, i, sigma_i & + , I_ipq_tsi) + + ! x (-1) because integrals are over -L + ! x 0.25 because we consider 0.25 (up-up + up-down + down-up + down-down) + no_2_naive(p,q,s,t) = no_2_naive(p,q,s,t) + 0.125d0 * (I_ipq_ist - I_ipq_sit - I_ipq_tsi) + enddo ! i + enddo ! p + enddo ! q + enddo ! s + enddo ! t + !$OMP END DO + !$OMP END PARALLEL + + ! ---- + ! down-down & up-up part + + sigma_p = -1.d0 + sigma_s = -1.d0 + sigma_q = +1.d0 + sigma_t = +1.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ii, i, sigma_i, p, q, s, t, & + !$OMP I_ipq_ist, I_ipq_sit, I_ipq_tsi) & + !$OMP SHARED (mo_num, elec_beta_num, elec_num, & + !$OMP sigma_p, sigma_q, sigma_s, sigma_t, & + !$OMP no_2_naive) + + !$OMP DO COLLAPSE (4) + do t = 1, mo_num + do s = 1, mo_num + do q = 1, mo_num + do p = 1, mo_num + + do ii = 1, elec_num + if(ii .le. elec_beta_num) then + i = ii + sigma_i = -1.d0 + else + i = ii - elec_beta_num + sigma_i = +1.d0 + endif + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , i, sigma_i, s, sigma_s, t, sigma_t & + , I_ipq_ist) + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , s, sigma_s, i, sigma_i, t, sigma_t & + , I_ipq_sit) + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , t, sigma_t, s, sigma_s, i, sigma_i & + , I_ipq_tsi) + + ! x (-1) because integrals are over -L + ! x 0.25 because we consider 0.25 (up-up + up-down + down-up + down-down) + no_2_naive(p,q,s,t) = no_2_naive(p,q,s,t) + 0.125d0 * (I_ipq_ist - I_ipq_sit - I_ipq_tsi) + enddo ! i + enddo ! p + enddo ! q + enddo ! s + enddo ! t + !$OMP END DO + !$OMP END PARALLEL + + ! ---- + ! down-down & down-down part + + sigma_p = -1.d0 + sigma_s = -1.d0 + sigma_q = -1.d0 + sigma_t = -1.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ii, i, sigma_i, p, q, s, t, & + !$OMP I_ipq_ist, I_ipq_sit, I_ipq_tsi) & + !$OMP SHARED (mo_num, elec_beta_num, elec_num, & + !$OMP sigma_p, sigma_q, sigma_s, sigma_t, & + !$OMP no_2_naive) + + !$OMP DO COLLAPSE (4) + do t = 1, mo_num + do s = 1, mo_num + do q = 1, mo_num + do p = 1, mo_num + + do ii = 1, elec_num + if(ii .le. elec_beta_num) then + i = ii + sigma_i = -1.d0 + else + i = ii - elec_beta_num + sigma_i = +1.d0 + endif + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , i, sigma_i, s, sigma_s, t, sigma_t & + , I_ipq_ist) + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , s, sigma_s, i, sigma_i, t, sigma_t & + , I_ipq_sit) + + call give_integrals_3_body_bi_ort_spin( i, sigma_i, p, sigma_p, q, sigma_q & + , t, sigma_t, s, sigma_s, i, sigma_i & + , I_ipq_tsi) + + ! x (-1) because integrals are over -L + ! x 0.25 because we consider 0.25 (up-up + up-down + down-up + down-down) + no_2_naive(p,q,s,t) = no_2_naive(p,q,s,t) + 0.125d0 * (I_ipq_ist - I_ipq_sit - I_ipq_tsi) + enddo ! i + enddo ! p + enddo ! q + enddo ! s + enddo ! t + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(t1) + print*, " Wall time for no_2_naive (min) = ", (t1 - t0)/60.d0 + +END_PROVIDER + +! --- + diff --git a/src/tc_bi_ortho/no_dressing_v0.irp.f b/src/tc_bi_ortho/no_dressing_v0.irp.f index 9a070dab..4d40c76f 100644 --- a/src/tc_bi_ortho/no_dressing_v0.irp.f +++ b/src/tc_bi_ortho/no_dressing_v0.irp.f @@ -4,19 +4,28 @@ BEGIN_PROVIDER [double precision, no_0_v0] implicit none - integer :: i, j, k - double precision :: tmp - double precision :: I_ijk_ijk, I_ijk_kij, I_ijk_jik, I_ijk_jki, I_ijk_ikj, I_ijk_kji - double precision :: t0, t1 + integer :: i, j, k + double precision :: I_ijk_ijk, I_ijk_kij, I_ijk_jik, I_ijk_jki, I_ijk_ikj, I_ijk_kji + double precision :: t0, t1 + double precision, allocatable :: tmp(:) call wall_time(t0) print*, " Providing no_0_v0 ..." if(elec_alpha_num .eq. elec_beta_num) then - tmp = 0.d0 + allocate(tmp(elec_beta_num)) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k, & + !$OMP I_ijk_ijk, I_ijk_kij, I_ijk_jik) & + !$OMP SHARED (elec_beta_num, tmp) + + !$OMP DO do i = 1, elec_beta_num + + tmp(i) = 0.d0 do j = 1, elec_beta_num do k = 1, elec_beta_num @@ -24,18 +33,32 @@ BEGIN_PROVIDER [double precision, no_0_v0] call give_integrals_3_body_bi_ort(i, j, k, k, i, j, I_ijk_kij) call give_integrals_3_body_bi_ort(i, j, k, j, i, k, I_ijk_jik) - tmp = tmp + 4.d0 * (2.d0 * I_ijk_ijk + I_ijk_kij - 3.d0 * I_ijk_jik) + tmp(i) = tmp(i) + 4.d0 * (2.d0 * I_ijk_ijk + I_ijk_kij - 3.d0 * I_ijk_jik) enddo enddo enddo + !$OMP END DO + !$OMP END PARALLEL - no_0_v0 = -1.d0 * (-tmp) / 6.d0 + no_0_v0 = -1.d0 * (-sum(tmp)) / 6.d0 + + deallocate(tmp) else - tmp = 0.d0 + allocate(tmp(elec_alpha_num)) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k, & + !$OMP I_ijk_ijk, I_ijk_kij, I_ijk_jik, & + !$OMP I_ijk_jki, I_ijk_ikj, I_ijk_kji) & + !$OMP SHARED (elec_beta_num, elec_alpha_num, tmp) + + !$OMP DO do i = 1, elec_beta_num + + tmp(i) = 0.d0 do j = 1, elec_beta_num do k = 1, elec_beta_num @@ -43,12 +66,16 @@ BEGIN_PROVIDER [double precision, no_0_v0] call give_integrals_3_body_bi_ort(i, j, k, k, i, j, I_ijk_kij) call give_integrals_3_body_bi_ort(i, j, k, j, i, k, I_ijk_jik) - tmp = tmp + 4.d0 * (2.d0 * I_ijk_ijk + I_ijk_kij - 3.d0 * I_ijk_jik) + tmp(i) = tmp(i) + 4.d0 * (2.d0 * I_ijk_ijk + I_ijk_kij - 3.d0 * I_ijk_jik) enddo enddo enddo + !$OMP END DO + !$OMP DO do i = elec_beta_num+1, elec_alpha_num + + tmp(i) = 0.d0 do j = elec_beta_num+1, elec_alpha_num do k = elec_beta_num+1, elec_alpha_num @@ -56,14 +83,11 @@ BEGIN_PROVIDER [double precision, no_0_v0] call give_integrals_3_body_bi_ort(i, j, k, k, i, j, I_ijk_kij) call give_integrals_3_body_bi_ort(i, j, k, j, i, k, I_ijk_jik) - tmp = tmp + I_ijk_ijk + 2.d0 * I_ijk_kij - 3.d0 * I_ijk_jik - enddo - enddo - enddo + tmp(i) = tmp(i) + I_ijk_ijk + 2.d0 * I_ijk_kij - 3.d0 * I_ijk_jik + enddo ! k + enddo ! j - do i = elec_beta_num+1, elec_alpha_num do j = 1, elec_beta_num - do k = 1, elec_beta_num call give_integrals_3_body_bi_ort(i, j, k, i, j, k, I_ijk_ijk) @@ -72,8 +96,8 @@ BEGIN_PROVIDER [double precision, no_0_v0] call give_integrals_3_body_bi_ort(i, j, k, j, i, k, I_ijk_jik) call give_integrals_3_body_bi_ort(i, j, k, k, j, i, I_ijk_kji) - tmp = tmp + 6.d0 * (2.d0 * I_ijk_ijk + I_ijk_jki - I_ijk_ikj - I_ijk_jik - I_ijk_kji) - enddo + tmp(i) = tmp(i) + 6.d0 * (2.d0 * I_ijk_ijk + I_ijk_jki - I_ijk_ikj - I_ijk_jik - I_ijk_kji) + enddo ! k do k = elec_beta_num+1, elec_alpha_num @@ -83,18 +107,21 @@ BEGIN_PROVIDER [double precision, no_0_v0] call give_integrals_3_body_bi_ort(i, j, k, j, i, k, I_ijk_jik) call give_integrals_3_body_bi_ort(i, j, k, k, j, i, I_ijk_kji) - tmp = tmp + 3.d0 * (2.d0 * I_ijk_ijk + 2.d0 * I_ijk_jki - I_ijk_ikj - I_ijk_jik - 2.d0 * I_ijk_kji) - enddo + tmp(i) = tmp(i) + 3.d0 * (2.d0 * I_ijk_ijk + 2.d0 * I_ijk_jki - I_ijk_ikj - I_ijk_jik - 2.d0 * I_ijk_kji) + enddo ! k + enddo ! j + enddo ! i + !$OMP END DO + !$OMP END PARALLEL - enddo - enddo + no_0_v0 = -1.d0 * (-sum(tmp)) / 6.d0 - no_0_v0 = -1.d0 * (-tmp) / 6.d0 + deallocate(tmp) endif call wall_time(t1) - print*, " Wall time for no_0_v0 (sec) = ", (t1 - t0)/60.d0 + print*, " Wall time for no_0_v0 (min) = ", (t1 - t0)/60.d0 print*, " no_0_v0 = ", no_0_v0 @@ -102,4 +129,208 @@ END_PROVIDER ! --- +BEGIN_PROVIDER [double precision, no_1_v0, (mo_num, mo_num)] + + BEGIN_DOC + ! + ! x (-1) because integrals are over -L + ! + END_DOC + + implicit none + integer :: p, s, i, j + double precision :: I_pij_sij, I_pij_isj, I_pij_ijs, I_pij_sji, I_pij_jsi, I_pij_jis + double precision :: t0, t1 + + call wall_time(t0) + print*, " Providing no_1_v0 ..." + + if(elec_alpha_num .eq. elec_beta_num) then + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (p, s, i, j, & + !$OMP I_pij_sij, I_pij_isj, I_pij_ijs, & + !$OMP I_pij_sji) & + !$OMP SHARED (mo_num, elec_beta_num, no_1_v0) + + !$OMP DO COLLAPSE(2) + do s = 1, mo_num + do p = 1, mo_num + + no_1_v0(p,s) = 0.d0 + do i = 1, elec_beta_num + do j = 1, elec_beta_num + + call give_integrals_3_body_bi_ort(p, i, j, s, i, j, I_pij_sij) + call give_integrals_3_body_bi_ort(p, i, j, i, s, j, I_pij_isj) + call give_integrals_3_body_bi_ort(p, i, j, i, j, s, I_pij_ijs) + call give_integrals_3_body_bi_ort(p, i, j, s, j, i, I_pij_sji) + + no_1_v0(p,s) = no_1_v0(p,s) - (2.d0*I_pij_sij - 2.d0*I_pij_isj + I_pij_ijs - I_pij_sji) + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + else + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (p, s, i, j, & + !$OMP I_pij_sij, I_pij_isj, I_pij_ijs, & + !$OMP I_pij_sji, I_pij_jsi, I_pij_jis) & + !$OMP SHARED (mo_num, elec_beta_num, elec_alpha_num, no_1_v0) + + !$OMP DO COLLAPSE(2) + do s = 1, mo_num + do p = 1, mo_num + + no_1_v0(p,s) = 0.d0 + do i = 1, elec_beta_num + do j = 1, elec_beta_num + + call give_integrals_3_body_bi_ort(p, i, j, s, i, j, I_pij_sij) + call give_integrals_3_body_bi_ort(p, i, j, i, s, j, I_pij_isj) + call give_integrals_3_body_bi_ort(p, i, j, i, j, s, I_pij_ijs) + call give_integrals_3_body_bi_ort(p, i, j, s, j, i, I_pij_sji) + + no_1_v0(p,s) = no_1_v0(p,s) - (2.d0*I_pij_sij - 2.d0*I_pij_isj + I_pij_ijs - I_pij_sji) + enddo ! j + enddo ! i + + do i = elec_beta_num+1, elec_alpha_num + do j = 1, elec_beta_num + + call give_integrals_3_body_bi_ort(p, i, j, s, j, i, I_pij_sji) + call give_integrals_3_body_bi_ort(p, i, j, j, s, i, I_pij_jsi) + call give_integrals_3_body_bi_ort(p, i, j, j, i, s, I_pij_jis) + call give_integrals_3_body_bi_ort(p, i, j, s, i, j, I_pij_sij) + call give_integrals_3_body_bi_ort(p, i, j, i, s, j, I_pij_isj) + call give_integrals_3_body_bi_ort(p, i, j, i, j, s, I_pij_ijs) + + no_1_v0(p,s) = no_1_v0(p,s) + 0.5d0 * (2.d0*I_pij_sji - I_pij_jsi + 2.d0*I_pij_jis - 4.d0*I_pij_sij + 2.d0*I_pij_isj - I_pij_ijs) + enddo ! j + + do j = elec_beta_num+1, elec_alpha_num + + call give_integrals_3_body_bi_ort(p, i, j, s, i, j, I_pij_sij) + call give_integrals_3_body_bi_ort(p, i, j, i, s, j, I_pij_isj) + call give_integrals_3_body_bi_ort(p, i, j, i, j, s, I_pij_ijs) + call give_integrals_3_body_bi_ort(p, i, j, s, j, i, I_pij_sji) + + no_1_v0(p,s) = no_1_v0(p,s) - 0.5d0 * (I_pij_sij - I_pij_isj + I_pij_ijs - I_pij_sji) + enddo ! j + enddo ! i + + enddo ! p + enddo ! s + !$OMP END DO + !$OMP END PARALLEL + + endif + + call wall_time(t1) + print*, " Wall time for no_1_v0 (min) = ", (t1 - t0)/60.d0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, no_2_v0, (mo_num, mo_num, mo_num, mo_num)] + + BEGIN_DOC + ! + ! x (-1) because integrals are over -L + ! + END_DOC + + implicit none + integer :: p, q, s, t, i + double precision :: I_ipq_sit, I_ipq_tsi, I_ipq_ist + double precision :: t0, t1 + + call wall_time(t0) + print*, " Providing no_2_v0 ..." + + if(elec_alpha_num .eq. elec_beta_num) then + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (p, q, s, t, i, & + !$OMP I_ipq_sit, I_ipq_tsi, I_ipq_ist) & + !$OMP SHARED (mo_num, elec_beta_num, no_2_v0) + + !$OMP DO COLLAPSE(4) + do t = 1, mo_num + do s = 1, mo_num + do q = 1, mo_num + do p = 1, mo_num + + no_2_v0(p,q,s,t) = 0.d0 + do i = 1, elec_beta_num + + call give_integrals_3_body_bi_ort(i, p, q, s, i, t, I_ipq_sit) + call give_integrals_3_body_bi_ort(i, p, q, t, s, i, I_ipq_tsi) + call give_integrals_3_body_bi_ort(i, p, q, i, s, t, I_ipq_ist) + + no_2_v0(p,q,s,t) = no_2_v0(p,q,s,t) - 0.5d0 * (I_ipq_sit + I_ipq_tsi - 2.d0*I_ipq_ist) + enddo + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + else + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (p, q, s, t, i, & + !$OMP I_ipq_sit, I_ipq_tsi, I_ipq_ist) & + !$OMP SHARED (mo_num, elec_beta_num, elec_alpha_num, no_2_v0) + + !$OMP DO COLLAPSE(4) + do t = 1, mo_num + do s = 1, mo_num + do q = 1, mo_num + do p = 1, mo_num + + no_2_v0(p,q,s,t) = 0.d0 + do i = 1, elec_beta_num + + call give_integrals_3_body_bi_ort(i, p, q, s, i, t, I_ipq_sit) + call give_integrals_3_body_bi_ort(i, p, q, t, s, i, I_ipq_tsi) + call give_integrals_3_body_bi_ort(i, p, q, i, s, t, I_ipq_ist) + + no_2_v0(p,q,s,t) = no_2_v0(p,q,s,t) - 0.5d0 * (I_ipq_sit + I_ipq_tsi - 2.d0*I_ipq_ist) + enddo ! i + + do i = elec_beta_num+1, elec_alpha_num + + call give_integrals_3_body_bi_ort(i, p, q, s, i, t, I_ipq_sit) + call give_integrals_3_body_bi_ort(i, p, q, t, s, i, I_ipq_tsi) + call give_integrals_3_body_bi_ort(i, p, q, i, s, t, I_ipq_ist) + + no_2_v0(p,q,s,t) = no_2_v0(p,q,s,t) - 0.25d0 * (I_ipq_sit + I_ipq_tsi - 2.d0*I_ipq_ist) + enddo ! i + + enddo ! p + enddo ! q + enddo ! s + enddo ! t + !$OMP END DO + !$OMP END PARALLEL + + endif + + call wall_time(t1) + print*, " Wall time for no_2_v0 (min) = ", (t1 - t0)/60.d0 + +END_PROVIDER + +! --- diff --git a/src/tc_bi_ortho/test_tc_bi_ortho.irp.f b/src/tc_bi_ortho/test_tc_bi_ortho.irp.f index 226854ed..9cbf7748 100644 --- a/src/tc_bi_ortho/test_tc_bi_ortho.irp.f +++ b/src/tc_bi_ortho/test_tc_bi_ortho.irp.f @@ -38,6 +38,8 @@ program tc_bi_ortho !call test_no_v0() call test_no_0() + call test_no_1() + call test_no_2() end @@ -505,7 +507,7 @@ subroutine test_no_0() implicit none double precision :: accu, norm - print*, ' testing test_no_0 ...' + print*, ' testing no_0 ...' PROVIDE no_0_naive PROVIDE no_0_v0 @@ -520,3 +522,88 @@ end ! --- +subroutine test_no_1() + + implicit none + integer :: i, j + double precision :: accu, contrib, new, ref, thr, norm + + print*, ' testing no_1 ...' + + PROVIDE no_1_naive + PROVIDE no_1_v0 + + thr = 1d-8 + + accu = 0.d0 + norm = 0.d0 + do i = 1, mo_num + do j = 1, mo_num + + new = no_1_v0 (j,i) + ref = no_1_naive(j,i) + contrib = dabs(new - ref) + if(contrib .gt. thr) then + print*, ' problem on no_aaa_contraction' + print*, j, i + print*, ref, new, contrib + stop + endif + + accu += contrib + norm += dabs(ref) + enddo + enddo + + print*, ' accu (%) = ', 100.d0*accu/norm + + return +end + +! --- + +subroutine test_no_2() + + implicit none + integer :: i, j, k, l + double precision :: accu, contrib, new, ref, thr, norm + + print*, ' testing no_2 ...' + + PROVIDE no_2_naive + PROVIDE no_2_v0 + + thr = 1d-8 + + accu = 0.d0 + norm = 0.d0 + do i = 1, mo_num + do j = 1, mo_num + do k = 1, mo_num + do l = 1, mo_num + + new = no_2_v0 (l,k,j,i) + ref = no_2_naive(l,k,j,i) + contrib = dabs(new - ref) + if(contrib .gt. thr) then + print*, ' problem on no_aaa_contraction' + print*, l, k, j, i + print*, ref, new, contrib + stop + endif + + accu += contrib + norm += dabs(ref) + enddo + enddo + enddo + enddo + + print*, ' accu (%) = ', 100.d0*accu/norm + + return +end + +! --- + +