From 461e2164f8bc3261f46a861dad7ed2a3a2fcccec Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Fri, 28 Jun 2024 15:04:34 +0200 Subject: [PATCH 01/19] few modif in TC int --- plugins/local/tc_int/jast_grad_full.irp.f | 190 ++++++++-------------- plugins/local/tc_int/jast_utils_bh.irp.f | 43 ----- 2 files changed, 69 insertions(+), 164 deletions(-) delete mode 100644 plugins/local/tc_int/jast_utils_bh.irp.f diff --git a/plugins/local/tc_int/jast_grad_full.irp.f b/plugins/local/tc_int/jast_grad_full.irp.f index 599d3779..2f6abf39 100644 --- a/plugins/local/tc_int/jast_grad_full.irp.f +++ b/plugins/local/tc_int/jast_grad_full.irp.f @@ -58,7 +58,7 @@ subroutine grad1_j12_r1_seq(r1, n_grid2, gradx, grady, gradz) integer :: jpoint, i_nucl, p, mpA, npA, opA double precision :: r2(3) double precision :: dx, dy, dz, r12, tmp - double precision :: rn(3), f1A, grad1_f1A(3), f2A, grad2_f2A(3), g12, grad1_g12(3) + double precision :: rn(3), f1A, grad1_f1A(3), f2A, g12, grad1_g12(3) double precision :: tmp1, tmp2, dist integer :: powmax1, powmax, powmax2 double precision, allocatable :: f1A_power(:), f2A_power(:), double_p(:), g12_power(:) @@ -91,35 +91,29 @@ subroutine grad1_j12_r1_seq(r1, n_grid2, gradx, grady, gradz) grady(jpoint) = 0.d0 gradz(jpoint) = 0.d0 - call jBH_elem_fct_grad_alpha1(r1, r2, g12, grad1_g12) - -! dist = (r1(1) - r2(1)) * (r1(1) - r2(1)) & -! + (r1(2) - r2(2)) * (r1(2) - r2(2)) & -! + (r1(3) - r2(3)) * (r1(3) - r2(3)) -! -! if(dist .ge. 1d-15) then -! dist = dsqrt( dist ) -! -! tmp1 = 1.d0 / (1.d0 + dist) -! -! g12 = dist * tmp1 -! tmp2 = tmp1 * tmp1 / dist -! grad1_g12(1) = tmp2 * (r1(1) - r2(1)) -! grad1_g12(2) = tmp2 * (r1(2) - r2(2)) -! grad1_g12(3) = tmp2 * (r1(3) - r2(3)) -! -! else -! -! grad1_g12(1) = 0.d0 -! grad1_g12(2) = 0.d0 -! grad1_g12(3) = 0.d0 -! g12 = 0.d0 -! -! endif -! - do p = 1, powmax2 - g12_power(p) = g12_power(p-1) * g12 - enddo + dist = (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) + if(dist .ge. 1d-15) then + dist = dsqrt(dist) + tmp1 = 1.d0 / (1.d0 + dist) + g12 = dist * tmp1 + tmp2 = tmp1 * tmp1 / dist + grad1_g12(1) = tmp2 * (r1(1) - r2(1)) + grad1_g12(2) = tmp2 * (r1(2) - r2(2)) + grad1_g12(3) = tmp2 * (r1(3) - r2(3)) + do p = 1, powmax2 + g12_power(p) = g12_power(p-1) * g12 + enddo + else + grad1_g12(1) = 0.d0 + grad1_g12(2) = 0.d0 + grad1_g12(3) = 0.d0 + g12 = 0.d0 + do p = 1, powmax2 + g12_power(p) = 0.d0 + enddo + endif do i_nucl = 1, nucl_num @@ -127,68 +121,56 @@ subroutine grad1_j12_r1_seq(r1, n_grid2, gradx, grady, gradz) rn(2) = nucl_coord(i_nucl,2) rn(3) = nucl_coord(i_nucl,3) - call jBH_elem_fct_grad_alpha1(r1, rn, f1A, grad1_f1A) -! dist = (r1(1) - rn(1)) * (r1(1) - rn(1)) & -! + (r1(2) - rn(2)) * (r1(2) - rn(2)) & -! + (r1(3) - rn(3)) * (r1(3) - rn(3)) -! if (dist > 1.d-15) then -! dist = dsqrt( dist ) -! -! tmp1 = 1.d0 / (1.d0 + dist) -! -! f1A = dist * tmp1 -! tmp2 = tmp1 * tmp1 / dist -! grad1_f1A(1) = tmp2 * (r1(1) - rn(1)) -! grad1_f1A(2) = tmp2 * (r1(2) - rn(2)) -! grad1_f1A(3) = tmp2 * (r1(3) - rn(3)) -! -! else -! -! grad1_f1A(1) = 0.d0 -! grad1_f1A(2) = 0.d0 -! grad1_f1A(3) = 0.d0 -! f1A = 0.d0 -! -! endif + dist = (r1(1) - rn(1)) * (r1(1) - rn(1)) & + + (r1(2) - rn(2)) * (r1(2) - rn(2)) & + + (r1(3) - rn(3)) * (r1(3) - rn(3)) + if (dist > 1.d-15) then + dist = dsqrt(dist) + tmp1 = 1.d0 / (1.d0 + dist) + f1A = dist * tmp1 + tmp2 = tmp1 * tmp1 / dist + grad1_f1A(1) = tmp2 * (r1(1) - rn(1)) + grad1_f1A(2) = tmp2 * (r1(2) - rn(2)) + grad1_f1A(3) = tmp2 * (r1(3) - rn(3)) + do p = 1, powmax1 + f1A_power(p) = f1A_power(p-1) * f1A + enddo + else + grad1_f1A(1) = 0.d0 + grad1_f1A(2) = 0.d0 + grad1_f1A(3) = 0.d0 + f1A = 0.d0 + do p = 1, powmax1 + f1A_power(p) = 0.d0 + enddo + endif - call jBH_elem_fct_grad_alpha1(r2, rn, f2A, grad2_f2A) -! dist = (r2(1) - rn(1)) * (r2(1) - rn(1)) & -! + (r2(2) - rn(2)) * (r2(2) - rn(2)) & -! + (r2(3) - rn(3)) * (r2(3) - rn(3)) -! -! if (dist > 1.d-15) then -! dist = dsqrt( dist ) -! -! tmp1 = 1.d0 / (1.d0 + dist) -! -! f2A = dist * tmp1 -! tmp2 = tmp1 * tmp1 / dist -! grad2_f2A(1) = tmp2 * (r2(1) - rn(1)) -! grad2_f2A(2) = tmp2 * (r2(2) - rn(2)) -! grad2_f2A(3) = tmp2 * (r2(3) - rn(3)) -! -! else -! -! grad2_f2A(1) = 0.d0 -! grad2_f2A(2) = 0.d0 -! grad2_f2A(3) = 0.d0 -! f2A = 0.d0 -! -! endif - - ! Compute powers of f1A and f2A - do p = 1, powmax1 - f1A_power(p) = f1A_power(p-1) * f1A - f2A_power(p) = f2A_power(p-1) * f2A - enddo + dist = (r2(1) - rn(1)) * (r2(1) - rn(1)) & + + (r2(2) - rn(2)) * (r2(2) - rn(2)) & + + (r2(3) - rn(3)) * (r2(3) - rn(3)) + if (dist > 1.d-15) then + dist = dsqrt(dist) + f2A = dist / (1.d0 + dist) + do p = 1, powmax1 + f2A_power(p) = f2A_power(p-1) * f2A + enddo + else + f2A = 0.d0 + do p = 1, powmax1 + f2A_power(p) = 0.d0 + enddo + endif do p = 1, jBH_size + + tmp = jBH_c(p,i_nucl) + if (dabs(tmp) <= 1.d-10) cycle + mpA = jBH_m(p,i_nucl) npA = jBH_n(p,i_nucl) opA = jBH_o(p,i_nucl) - tmp = jBH_c(p,i_nucl) -! if (dabs(tmp) <= 1.d-10) cycle -! + + ! TODO to it when reading the parameters if(mpA .eq. npA) then tmp = tmp * 0.5d0 endif @@ -207,39 +189,5 @@ subroutine grad1_j12_r1_seq(r1, n_grid2, gradx, grady, gradz) return end -subroutine jBH_elem_fct_grad_alpha1(r1, r2, fct, grad1_fct) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision, intent(out) :: fct, grad1_fct(3) - double precision :: dist, tmp1, tmp2 - - dist = (r1(1) - r2(1)) * (r1(1) - r2(1)) & - + (r1(2) - r2(2)) * (r1(2) - r2(2)) & - + (r1(3) - r2(3)) * (r1(3) - r2(3)) - - - if(dist .ge. 1d-15) then - dist = dsqrt( dist ) - - tmp1 = 1.d0 / (1.d0 + dist) - - fct = dist * tmp1 - tmp2 = tmp1 * tmp1 / dist - grad1_fct(1) = tmp2 * (r1(1) - r2(1)) - grad1_fct(2) = tmp2 * (r1(2) - r2(2)) - grad1_fct(3) = tmp2 * (r1(3) - r2(3)) - - else - - grad1_fct(1) = 0.d0 - grad1_fct(2) = 0.d0 - grad1_fct(3) = 0.d0 - fct = 0.d0 - - endif - - return -end - ! --- + diff --git a/plugins/local/tc_int/jast_utils_bh.irp.f b/plugins/local/tc_int/jast_utils_bh.irp.f deleted file mode 100644 index 200bc5ff..00000000 --- a/plugins/local/tc_int/jast_utils_bh.irp.f +++ /dev/null @@ -1,43 +0,0 @@ - -! --- - - - -subroutine jBH_elem_fct_grad(alpha, r1, r2, fct, grad1_fct) - - implicit none - double precision, intent(in) :: alpha, r1(3), r2(3) - double precision, intent(out) :: fct, grad1_fct(3) - double precision :: dist, tmp1, tmp2, dist_inv - - dist = (r1(1) - r2(1)) * (r1(1) - r2(1)) & - + (r1(2) - r2(2)) * (r1(2) - r2(2)) & - + (r1(3) - r2(3)) * (r1(3) - r2(3)) - - - if(dist .ge. 1d-15) then - dist_inv = 1.d0/dsqrt( dist ) - dist = dist_inv * dist - - tmp1 = 1.d0 / (1.d0 + alpha * dist) - - fct = alpha * dist * tmp1 - tmp2 = alpha * tmp1 * tmp1 * dist_inv - grad1_fct(1) = tmp2 * (r1(1) - r2(1)) - grad1_fct(2) = tmp2 * (r1(2) - r2(2)) - grad1_fct(3) = tmp2 * (r1(3) - r2(3)) - - else - - grad1_fct(1) = 0.d0 - grad1_fct(2) = 0.d0 - grad1_fct(3) = 0.d0 - fct = 0.d0 - - endif - - return -end - -! --- - From 73066b4ac54eeb99ece66947d7827b0482da4e12 Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Sat, 29 Jun 2024 01:15:47 +0200 Subject: [PATCH 02/19] issue with linking with CUDA --- plugins/local/tc_int/compute_tc_int_gpu.irp.f | 157 ++++++++++++++++++ plugins/local/tc_int/gpu.c | 2 + plugins/local/tc_int/gpu_module.F90 | 38 +++++ plugins/local/tc_int/write_tc_int_gpu.irp.f | 56 +++++++ src/dft_utils_in_r/ao_in_r.irp.f | 113 ++++++++----- 5 files changed, 329 insertions(+), 37 deletions(-) create mode 100644 plugins/local/tc_int/compute_tc_int_gpu.irp.f create mode 100644 plugins/local/tc_int/gpu.c create mode 100644 plugins/local/tc_int/gpu_module.F90 create mode 100644 plugins/local/tc_int/write_tc_int_gpu.irp.f diff --git a/plugins/local/tc_int/compute_tc_int_gpu.irp.f b/plugins/local/tc_int/compute_tc_int_gpu.irp.f new file mode 100644 index 00000000..146574a6 --- /dev/null +++ b/plugins/local/tc_int/compute_tc_int_gpu.irp.f @@ -0,0 +1,157 @@ + +! --- + +subroutine provide_int2_grad1_u12_ao_gpu() + + use gpu_module + + BEGIN_DOC + ! + ! int2_grad1_u12_ao(i,j,ipoint,1) = \int dr2 [\grad1 u(r1,r2)]_x1 \chi_i(r2) \chi_j(r2) + ! int2_grad1_u12_ao(i,j,ipoint,2) = \int dr2 [\grad1 u(r1,r2)]_y1 \chi_i(r2) \chi_j(r2) + ! int2_grad1_u12_ao(i,j,ipoint,3) = \int dr2 [\grad1 u(r1,r2)]_z1 \chi_i(r2) \chi_j(r2) + ! int2_grad1_u12_ao(i,j,ipoint,4) = \int dr2 [-(1/2) [\grad1 u(r1,r2)]^2] \chi_i(r2) \chi_j(r2) + ! + ! + ! tc_int_2e_ao(k,i,l,j) = (ki|V^TC(r_12)|lj) + ! = where V^TC(r_12) is the total TC operator + ! = tc_grad_and_lapl_ao(k,i,l,j) + tc_grad_square_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j) + ! where: + ! + ! tc_grad_and_lapl_ao(k,i,l,j) = < k l | -1/2 \Delta_1 u(r1,r2) - \grad_1 u(r1,r2) . \grad_1 | ij > + ! = -1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) + ! = 1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 (-1) \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) + ! + ! tc_grad_square_ao(k,i,l,j) = -1/2 + ! + ! ao_two_e_coul(k,i,l,j) = < l k | 1/r12 | j i > = ( k i | 1/r12 | l j ) + ! + END_DOC + + implicit none + + integer :: i, j, k, l, m, ipoint, jpoint + integer :: n_blocks, n_rest, n_pass + integer :: i_blocks, i_rest, i_pass, ii + double precision :: mem, n_double + double precision :: weight1, ao_k_r, ao_i_r + double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq + double precision :: time0, time1, time2, tc1, tc2, tc + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:), tc_int_2e_ao(:,:,:,:) + double precision, allocatable :: tmp(:,:,:), c_mat(:,:,:), tmp_grad1_u12(:,:,:) + + double precision, external :: get_ao_two_e_integral + + + PROVIDE final_weight_at_r_vector_extra aos_in_r_array_extra + PROVIDE final_weight_at_r_vector aos_grad_in_r_array_transp_bis final_weight_at_r_vector aos_in_r_array_transp + + + + print*, ' start provide_int2_grad1_u12_ao_gpu ...' + call wall_time(time0) + + call total_memory(mem) + mem = max(1.d0, qp_max_mem - mem) + n_double = mem * 1.d8 + n_blocks = int(min(n_double / (n_points_extra_final_grid * 4.d0), 1.d0*n_points_final_grid)) + n_rest = int(mod(n_points_final_grid, n_blocks)) + n_pass = int((n_points_final_grid - n_rest) / n_blocks) + + call write_int(6, n_pass, 'Number of passes') + call write_int(6, n_blocks, 'Size of the blocks') + call write_int(6, n_rest, 'Size of the last block') + + ! --- + + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) + allocate(tc_int_2e_ao(ao_num,ao_num,ao_num,ao_num)) + + double precision, allocatable :: aos_data1(:,:,:) + double precision, allocatable :: aos_data2(:,:,:) + allocate(aos_data1(n_points_final_grid,ao_num,4)) + allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) + + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + aos_data1(ipoint,k,1) = aos_in_r_array(i,ipoint) + aos_data1(ipoint,k,2) = aos_grad_in_r_array(i,ipoint,1) + aos_data1(ipoint,k,3) = aos_grad_in_r_array(i,ipoint,2) + aos_data1(ipoint,k,4) = aos_grad_in_r_array(i,ipoint,3) + enddo + + do ipoint = 1, n_points_extra_final_grid + aos_data1(ipoint,k,1) = aos_in_r_array_extra(i,ipoint) + aos_data1(ipoint,k,2) = aos_grad_in_r_array_extra(i,ipoint,1) + aos_data1(ipoint,k,3) = aos_grad_in_r_array_extra(i,ipoint,2) + aos_data1(ipoint,k,4) = aos_grad_in_r_array_extra(i,ipoint,3) + enddo + enddo + + call tc_int_bh(n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, & + jBH_size, jBH_m, jBH_n, jBH_o, jBH_c, & + final_grid_points, final_grid_points_extra, nucl_coord, & + final_weight_at_r_vector, final_weight_at_r_vector_extra, & + aos_data1, aos_data2, int2_grad1_u12_ao, tc_int_2e_ao) + + ! --- + + call wall_time(time1) + + PROVIDE ao_integrals_map + !$OMP PARALLEL DEFAULT(NONE) & + !$OMP SHARED(ao_num, tc_int_2e_ao, ao_integrals_map) & + !$OMP PRIVATE(i, j, k, l) + !$OMP DO COLLAPSE(3) + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + ! < 1:i, 2:j | 1:k, 2:l > + tc_int_2e_ao(k,i,l,j) = tc_int_2e_ao(k,i,l,j) + get_ao_two_e_integral(i, j, k, l, ao_integrals_map) + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(time2) + print*, ' wall time of Coulomb part of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0 + call print_memory_usage() + + ! --- + + print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") + call ezfio_set_work_empty(.False.) + write(11) int2_grad1_u12_ao(:,:,:,1:3) + close(11) + + print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/ao_two_e_tc_tot', action="write") + call ezfio_set_work_empty(.False.) + do i = 1, ao_num + write(11) tc_int_2e_ao(:,:,:,i) + enddo + close(11) + + ! ---- + + deallocate(int2_grad1_u12_ao) + deallocate(tc_int_2e_ao) + + call wall_time(time2) + print*, ' wall time for tc_int_2e_ao (min) = ', (time2-time1) / 60.d0 + call print_memory_usage() + + ! --- + + call wall_time(time1) + print*, ' wall time for TC-integrals (min) = ', (time1-time0) / 60.d0 + + return +end + +! --- + diff --git a/plugins/local/tc_int/gpu.c b/plugins/local/tc_int/gpu.c new file mode 100644 index 00000000..139597f9 --- /dev/null +++ b/plugins/local/tc_int/gpu.c @@ -0,0 +1,2 @@ + + diff --git a/plugins/local/tc_int/gpu_module.F90 b/plugins/local/tc_int/gpu_module.F90 new file mode 100644 index 00000000..cf7efad2 --- /dev/null +++ b/plugins/local/tc_int/gpu_module.F90 @@ -0,0 +1,38 @@ + +! --- + +module gpu_module + + use iso_c_binding + + implicit none + + interface + + subroutine tc_int_bh(n_grid1, n_grid2, ao_num, n_nuc, & + size_bh, m_bh, n_bh, o_bh, c_bh, & + r1, r2, rn, wr1, wr2, aos_data1, & + aos_data2, int2_grad1_u12, tc_int_2e_ao) bind(C) + + import c_int, c_double + + integer(c_int), intent(in), value :: n_grid1, n_grid2, ao_num, n_nuc, size_bh + integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) + real(c_double), intent(in) :: c_bh(size_bh,n_nuc) + real(c_double), intent(in) :: r1(n_grid1,3), r2(n_grid2,3) + real(c_double), intent(in) :: rn(n_nuc,3) + real(c_double), intent(in) :: wr1(n_grid1), wr2(n_grid2) + real(c_double), intent(in) :: aos_data1(n_grid1,ao_num,4), aos_data2(n_grid2,ao_num,4) + real(c_double), intent(out) :: int2_grad1_u12(n_grid1,ao_num,ao_num,4) + real(c_double), intent(out) :: tc_int_2e_ao(ao_num,ao_num,ao_num,ao_num) + + end subroutine + + end interface + +end module + +! --- + diff --git a/plugins/local/tc_int/write_tc_int_gpu.irp.f b/plugins/local/tc_int/write_tc_int_gpu.irp.f new file mode 100644 index 00000000..c0dd9c90 --- /dev/null +++ b/plugins/local/tc_int/write_tc_int_gpu.irp.f @@ -0,0 +1,56 @@ +! --- + +program write_tc_int_gpu + + implicit none + + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + + call write_int(6, my_n_pt_r_grid, 'radial external grid over') + call write_int(6, my_n_pt_a_grid, 'angular external grid over') + + call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over') + call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over') + + call main() + +end + +! --- + +subroutine main() + + implicit none + + PROVIDE io_tc_integ + + print*, 'io_tc_integ = ', io_tc_integ + + if(io_tc_integ .ne. "Write") then + print*, 'io_tc_integ != Write' + print*, io_tc_integ + stop + endif + + call provide_int2_grad1_u12_ao_gpu() + + call ezfio_set_tc_keywords_io_tc_integ('Read') + +end + +! --- + diff --git a/src/dft_utils_in_r/ao_in_r.irp.f b/src/dft_utils_in_r/ao_in_r.irp.f index 16414f39..e9c003d4 100644 --- a/src/dft_utils_in_r/ao_in_r.irp.f +++ b/src/dft_utils_in_r/ao_in_r.irp.f @@ -52,35 +52,39 @@ END_PROVIDER BEGIN_PROVIDER[double precision, aos_grad_in_r_array, (ao_num,n_points_final_grid,3)] - BEGIN_DOC - ! aos_grad_in_r_array(i,j,k) = value of the kth component of the gradient of ith ao on the jth grid point - ! - ! k = 1 : x, k= 2, y, k 3, z - END_DOC + BEGIN_DOC + ! + ! aos_grad_in_r_array(i,j,k) = value of the kth component of the gradient of ith ao on the jth grid point + ! + ! k = 1 : x, k= 2, y, k 3, z + ! + END_DOC - implicit none - integer :: i,j,m - double precision :: aos_array(ao_num), r(3) - double precision :: aos_grad_array(3,ao_num) - !$OMP PARALLEL DO & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,r,aos_array,aos_grad_array,j,m) & - !$OMP SHARED(aos_grad_in_r_array,n_points_final_grid,ao_num,final_grid_points) - do i = 1, n_points_final_grid - r(1) = final_grid_points(1,i) - r(2) = final_grid_points(2,i) - r(3) = final_grid_points(3,i) - call give_all_aos_and_grad_at_r(r,aos_array,aos_grad_array) - do m = 1, 3 - do j = 1, ao_num - aos_grad_in_r_array(j,i,m) = aos_grad_array(m,j) - enddo + implicit none + integer :: i, j, m + double precision :: aos_array(ao_num), r(3) + double precision :: aos_grad_array(3,ao_num) + + !$OMP PARALLEL DO & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,m,r,aos_array,aos_grad_array) & + !$OMP SHARED(aos_grad_in_r_array,n_points_final_grid,ao_num,final_grid_points) + do i = 1, n_points_final_grid + r(1) = final_grid_points(1,i) + r(2) = final_grid_points(2,i) + r(3) = final_grid_points(3,i) + call give_all_aos_and_grad_at_r(r,aos_array,aos_grad_array) + do m = 1, 3 + do j = 1, ao_num + aos_grad_in_r_array(j,i,m) = aos_grad_array(m,j) + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO + !$OMP END PARALLEL DO +END_PROVIDER - END_PROVIDER +! --- BEGIN_PROVIDER[double precision, aos_grad_in_r_array_transp, (3,ao_num,n_points_final_grid)] @@ -205,18 +209,53 @@ BEGIN_PROVIDER[double precision, aos_grad_in_r_array, (ao_num,n_points_final_gri END_PROVIDER - BEGIN_PROVIDER[double precision, aos_in_r_array_extra_transp, (n_points_extra_final_grid,ao_num)] - implicit none - BEGIN_DOC - ! aos_in_r_array_extra_transp(i,j) = value of the jth ao on the ith grid point - END_DOC - integer :: i,j - double precision :: aos_array(ao_num), r(3) - do i = 1, n_points_extra_final_grid - do j = 1, ao_num - aos_in_r_array_extra_transp(i,j) = aos_in_r_array_extra(j,i) +! --- + +BEGIN_PROVIDER[double precision, aos_in_r_array_extra_transp, (n_points_extra_final_grid,ao_num)] + + BEGIN_DOC + ! aos_in_r_array_extra_transp(i,j) = value of the jth ao on the ith grid point + END_DOC + + implicit none + integer :: i, j + double precision :: aos_array(ao_num), r(3) + + do i = 1, n_points_extra_final_grid + do j = 1, ao_num + aos_in_r_array_extra_transp(i,j) = aos_in_r_array_extra(j,i) + enddo enddo - enddo - END_PROVIDER +END_PROVIDER + +! --- + +BEGIN_PROVIDER[double precision, aos_grad_in_r_array_extra, (ao_num,n_points_extra_final_grid,3)] + + implicit none + integer :: i, j, m + double precision :: aos_array(ao_num), r(3) + double precision :: aos_grad_array(3,ao_num) + + !$OMP PARALLEL DO & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,m,r,aos_array,aos_grad_array) & + !$OMP SHARED(aos_grad_in_r_array_extra,n_points_extra_final_grid,ao_num,final_grid_points_extra) + do i = 1, n_points_extra_final_grid + r(1) = final_grid_points_extra(1,i) + r(2) = final_grid_points_extra(2,i) + r(3) = final_grid_points_extra(3,i) + call give_all_aos_and_grad_at_r(r, aos_array, aos_grad_array) + do m = 1, 3 + do j = 1, ao_num + aos_grad_in_r_array_extra(j,i,m) = aos_grad_array(m,j) + enddo + enddo + enddo + !$OMP END PARALLEL DO + +END_PROVIDER + +! --- From b6b0ed5d22a9af6c6c2b27e47f6c8a154587bd7d Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Thu, 1 Aug 2024 10:05:47 +0200 Subject: [PATCH 03/19] WORKING ON DEBUG CUDA-INTEG --- .../local/non_h_ints_mu/total_tc_int.irp.f | 2 +- plugins/local/tc_int/LIB | 1 + plugins/local/tc_int/compute_tc_int.irp.f | 72 +-- plugins/local/tc_int/compute_tc_int_gpu.irp.f | 237 ++++++++-- plugins/local/tc_int/deb_tc_int_cuda.irp.f | 446 ++++++++++++++++++ plugins/local/tc_int/gpu.c | 2 - plugins/local/tc_int/gpu_module.F90 | 113 ++++- plugins/local/tc_int/write_tc_int_cuda.irp.f | 194 ++++++++ 8 files changed, 959 insertions(+), 108 deletions(-) create mode 100644 plugins/local/tc_int/LIB create mode 100644 plugins/local/tc_int/deb_tc_int_cuda.irp.f delete mode 100644 plugins/local/tc_int/gpu.c create mode 100644 plugins/local/tc_int/write_tc_int_cuda.irp.f diff --git a/plugins/local/non_h_ints_mu/total_tc_int.irp.f b/plugins/local/non_h_ints_mu/total_tc_int.irp.f index fb09168e..634d7e87 100644 --- a/plugins/local/non_h_ints_mu/total_tc_int.irp.f +++ b/plugins/local/non_h_ints_mu/total_tc_int.irp.f @@ -118,7 +118,7 @@ BEGIN_PROVIDER [double precision, ao_two_e_tc_tot, (ao_num, ao_num, ao_num, ao_n !$OMP END PARALLEL call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & , int2_grad1_u12_square_ao(1,1,1), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & - , 0.d0, ao_two_e_tc_tot, ao_num*ao_num) + , 0.d0, ao_two_e_tc_tot(1,1,1,1), ao_num*ao_num) deallocate(c_mat) endif diff --git a/plugins/local/tc_int/LIB b/plugins/local/tc_int/LIB new file mode 100644 index 00000000..c41ceb9d --- /dev/null +++ b/plugins/local/tc_int/LIB @@ -0,0 +1 @@ +-ltc_int_cu diff --git a/plugins/local/tc_int/compute_tc_int.irp.f b/plugins/local/tc_int/compute_tc_int.irp.f index 92c90d03..35034454 100644 --- a/plugins/local/tc_int/compute_tc_int.irp.f +++ b/plugins/local/tc_int/compute_tc_int.irp.f @@ -2,7 +2,7 @@ ! --- subroutine provide_int2_grad1_u12_ao() - use gpu + BEGIN_DOC ! ! int2_grad1_u12_ao(i,j,ipoint,1) = \int dr2 [\grad1 u(r1,r2)]_x1 \chi_i(r2) \chi_j(r2) @@ -35,8 +35,8 @@ subroutine provide_int2_grad1_u12_ao() double precision :: weight1, ao_k_r, ao_i_r double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq double precision :: time0, time1, time2, tc1, tc2, tc - type(gpu_double4) :: int2_grad1_u12_ao - type(gpu_double3) :: tmp_grad1_u12, tmp_grad1_u12p, tmp + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: tmp_grad1_u12(:,:,:), tmp(:,:,:) double precision, allocatable :: c_mat(:,:,:), tc_int_2e_ao(:,:,:,:) double precision, external :: get_ao_two_e_integral @@ -52,7 +52,6 @@ subroutine provide_int2_grad1_u12_ao() call total_memory(mem) mem = max(1.d0, qp_max_mem - mem) - mem = 6 n_double = mem * 1.d8 n_blocks = int(min(n_double / (n_points_extra_final_grid * 4.d0), 1.d0*n_points_final_grid)) n_rest = int(mod(n_points_final_grid, n_blocks)) @@ -66,9 +65,9 @@ subroutine provide_int2_grad1_u12_ao() ! --- ! --- - call gpu_allocate(int2_grad1_u12_ao, ao_num,ao_num,n_points_final_grid,4) + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) - call gpu_allocate(tmp,n_points_extra_final_grid,ao_num,ao_num) + allocate(tmp(n_points_extra_final_grid,ao_num,ao_num)) !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (j, i, jpoint) & @@ -77,23 +76,17 @@ subroutine provide_int2_grad1_u12_ao() do j = 1, ao_num do i = 1, ao_num do jpoint = 1, n_points_extra_final_grid - tmp%f(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) + tmp(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) enddo enddo enddo !$OMP END DO !$OMP END PARALLEL - call gpu_allocate(tmp_grad1_u12,n_points_extra_final_grid,n_blocks,4) - call gpu_allocate(tmp_grad1_u12p,n_points_extra_final_grid,n_blocks,4) + allocate(tmp_grad1_u12(n_points_extra_final_grid,n_blocks,4)) tc = 0.d0 - type(gpu_stream) :: stream(4) - do i=1,4 - call gpu_stream_create(stream(i)) - enddo - do i_pass = 1, n_pass ii = (i_pass-1)*n_blocks + 1 @@ -102,25 +95,22 @@ subroutine provide_int2_grad1_u12_ao() !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i_blocks, ipoint) & - !$OMP SHARED (n_blocks, n_points_extra_final_grid, ii, final_grid_points, tmp_grad1_u12) + !$OMP SHARED (n_blocks, n_points_extra_final_grid, ii, tmp_grad1_u12) !$OMP DO do i_blocks = 1, n_blocks ipoint = ii - 1 + i_blocks ! r1 - call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12%f(1,i_blocks,1), tmp_grad1_u12%f(1,i_blocks,2), & - tmp_grad1_u12%f(1,i_blocks,3), tmp_grad1_u12%f(1,i_blocks,4)) + call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,i_blocks,1), tmp_grad1_u12(1,i_blocks,2), & + tmp_grad1_u12(1,i_blocks,3), tmp_grad1_u12(1,i_blocks,4)) enddo !$OMP END DO !$OMP END PARALLEL call wall_time(tc2) tc = tc + tc2 - tc1 - call gpu_synchronize() - call gpu_copy(tmp_grad1_u12,tmp_grad1_u12p) do m = 1, 4 - call gpu_set_stream(blas_handle, stream(m)) - call gpu_dgemm(blas_handle, "T", "N", ao_num*ao_num, n_blocks, n_points_extra_final_grid, 1.d0 & - , tmp%f(1,1,1), n_points_extra_final_grid, tmp_grad1_u12p%f(1,1,m), n_points_extra_final_grid & - , 0.d0, int2_grad1_u12_ao%f(1,1,ii,m), ao_num*ao_num) + call dgemm("T", "N", ao_num*ao_num, n_blocks, n_points_extra_final_grid, 1.d0 & + , tmp(1,1,1), n_points_extra_final_grid, tmp_grad1_u12(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u12_ao(1,1,ii,m), ao_num*ao_num) enddo enddo @@ -132,12 +122,12 @@ subroutine provide_int2_grad1_u12_ao() !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i_rest, ipoint) & - !$OMP SHARED (n_rest, n_points_extra_final_grid, ii, final_grid_points, tmp_grad1_u12) + !$OMP SHARED (n_rest, n_points_extra_final_grid, ii, tmp_grad1_u12) !$OMP DO do i_rest = 1, n_rest ipoint = ii - 1 + i_rest ! r1 - call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12%f(1,i_rest,1), tmp_grad1_u12%f(1,i_rest,2), & - tmp_grad1_u12%f(1,i_rest,3), tmp_grad1_u12%f(1,i_rest,4)) + call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,i_rest,1), tmp_grad1_u12(1,i_rest,2), & + tmp_grad1_u12(1,i_rest,3), tmp_grad1_u12(1,i_rest,4)) enddo !$OMP END DO !$OMP END PARALLEL @@ -145,23 +135,15 @@ subroutine provide_int2_grad1_u12_ao() tc = tc + tc2 - tc1 do m = 1, 4 - call gpu_set_stream(blas_handle, stream(m)) - call gpu_dgemm(blas_handle, "T", "N", ao_num*ao_num, n_rest, n_points_extra_final_grid, 1.d0 & - , tmp%f(1,1,1), n_points_extra_final_grid, tmp_grad1_u12%f(1,1,m), n_points_extra_final_grid & - , 0.d0, int2_grad1_u12_ao%f(1,1,ii,m), ao_num*ao_num) + call dgemm("T", "N", ao_num*ao_num, n_rest, n_points_extra_final_grid, 1.d0 & + , tmp(1,1,1), n_points_extra_final_grid, tmp_grad1_u12(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u12_ao(1,1,ii,m), ao_num*ao_num) enddo endif - call gpu_synchronize() - call gpu_deallocate(tmp_grad1_u12) - call gpu_deallocate(tmp_grad1_u12p) + deallocate(tmp_grad1_u12) - do i=1,4 - call gpu_stream_destroy(stream(i)) - enddo - - - call gpu_deallocate(tmp) + deallocate(tmp) call wall_time(time1) @@ -169,8 +151,6 @@ subroutine provide_int2_grad1_u12_ao() print*, ' wall time Jastrow derivatives (min) = ', tc / 60.d0 call print_memory_usage() -!TODO -stop ! --- ! --- ! --- @@ -196,7 +176,7 @@ stop !$OMP END DO !$OMP END PARALLEL call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & - , int2_grad1_u12_ao%f(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , int2_grad1_u12_ao(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , 0.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) deallocate(c_mat) @@ -232,7 +212,7 @@ stop !$OMP END PARALLEL call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 & - , int2_grad1_u12_ao%f(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , 1.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) enddo deallocate(c_mat) @@ -281,9 +261,10 @@ stop print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") - call ezfio_set_work_empty(.False.) - write(11) int2_grad1_u12_ao%f(:,:,:,1:3) + call ezfio_set_work_empty(.False.) + write(11) int2_grad1_u12_ao(:,:,:,1:3) close(11) + deallocate(int2_grad1_u12_ao) print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/ao_two_e_tc_tot', action="write") @@ -295,7 +276,6 @@ stop ! ---- - call gpu_deallocate(int2_grad1_u12_ao) deallocate(tc_int_2e_ao) call wall_time(time2) diff --git a/plugins/local/tc_int/compute_tc_int_gpu.irp.f b/plugins/local/tc_int/compute_tc_int_gpu.irp.f index 146574a6..5db07dd6 100644 --- a/plugins/local/tc_int/compute_tc_int_gpu.irp.f +++ b/plugins/local/tc_int/compute_tc_int_gpu.irp.f @@ -3,24 +3,24 @@ subroutine provide_int2_grad1_u12_ao_gpu() - use gpu_module + use gpu BEGIN_DOC ! - ! int2_grad1_u12_ao(i,j,ipoint,1) = \int dr2 [\grad1 u(r1,r2)]_x1 \chi_i(r2) \chi_j(r2) - ! int2_grad1_u12_ao(i,j,ipoint,2) = \int dr2 [\grad1 u(r1,r2)]_y1 \chi_i(r2) \chi_j(r2) - ! int2_grad1_u12_ao(i,j,ipoint,3) = \int dr2 [\grad1 u(r1,r2)]_z1 \chi_i(r2) \chi_j(r2) - ! int2_grad1_u12_ao(i,j,ipoint,4) = \int dr2 [-(1/2) [\grad1 u(r1,r2)]^2] \chi_i(r2) \chi_j(r2) + ! int2_grad1_u12_ao(i,j,ipoint,1) = \int dr2 [\grad1 u(r1,r2)]_x1 \chi_i(r2) \chi_j(r2) + ! int2_grad1_u12_ao(i,j,ipoint,2) = \int dr2 [\grad1 u(r1,r2)]_y1 \chi_i(r2) \chi_j(r2) + ! int2_grad1_u12_ao(i,j,ipoint,3) = \int dr2 [\grad1 u(r1,r2)]_z1 \chi_i(r2) \chi_j(r2) + ! int2_grad1_u12_ao(i,j,ipoint,4) = \int dr2 [-(1/2) [\grad1 u(r1,r2)]^2] \chi_i(r2) \chi_j(r2) ! ! - ! tc_int_2e_ao(k,i,l,j) = (ki|V^TC(r_12)|lj) - ! = where V^TC(r_12) is the total TC operator + ! tc_int_2e_ao(k,i,l,j) = (ki|V^TC(r_12)|lj) + ! = where V^TC(r_12) is the total TC operator ! = tc_grad_and_lapl_ao(k,i,l,j) + tc_grad_square_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j) ! where: ! ! tc_grad_and_lapl_ao(k,i,l,j) = < k l | -1/2 \Delta_1 u(r1,r2) - \grad_1 u(r1,r2) . \grad_1 | ij > - ! = -1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) - ! = 1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 (-1) \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) + ! = -1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) + ! = 1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 (-1) \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) ! ! tc_grad_square_ao(k,i,l,j) = -1/2 ! @@ -37,8 +37,9 @@ subroutine provide_int2_grad1_u12_ao_gpu() double precision :: weight1, ao_k_r, ao_i_r double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq double precision :: time0, time1, time2, tc1, tc2, tc - double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:), tc_int_2e_ao(:,:,:,:) - double precision, allocatable :: tmp(:,:,:), c_mat(:,:,:), tmp_grad1_u12(:,:,:) + type(gpu_double4) :: int2_grad1_u12_ao + type(gpu_double3) :: tmp_grad1_u12, tmp_grad1_u12p, tmp + double precision, allocatable :: c_mat(:,:,:), tc_int_2e_ao(:,:,:,:) double precision, external :: get_ao_two_e_integral @@ -48,11 +49,12 @@ subroutine provide_int2_grad1_u12_ao_gpu() - print*, ' start provide_int2_grad1_u12_ao_gpu ...' + print*, ' start provide_int2_grad1_u12_ao ...' call wall_time(time0) call total_memory(mem) mem = max(1.d0, qp_max_mem - mem) + mem = 6 n_double = mem * 1.d8 n_blocks = int(min(n_double / (n_points_extra_final_grid * 4.d0), 1.d0*n_points_final_grid)) n_rest = int(mod(n_points_final_grid, n_blocks)) @@ -62,41 +64,198 @@ subroutine provide_int2_grad1_u12_ao_gpu() call write_int(6, n_blocks, 'Size of the blocks') call write_int(6, n_rest, 'Size of the last block') + ! --- + ! --- ! --- - allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) - allocate(tc_int_2e_ao(ao_num,ao_num,ao_num,ao_num)) + call gpu_allocate(int2_grad1_u12_ao, ao_num,ao_num,n_points_final_grid,4) - double precision, allocatable :: aos_data1(:,:,:) - double precision, allocatable :: aos_data2(:,:,:) - allocate(aos_data1(n_points_final_grid,ao_num,4)) - allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) - - do k = 1, ao_num - do ipoint = 1, n_points_final_grid - aos_data1(ipoint,k,1) = aos_in_r_array(i,ipoint) - aos_data1(ipoint,k,2) = aos_grad_in_r_array(i,ipoint,1) - aos_data1(ipoint,k,3) = aos_grad_in_r_array(i,ipoint,2) - aos_data1(ipoint,k,4) = aos_grad_in_r_array(i,ipoint,3) + call gpu_allocate(tmp,n_points_extra_final_grid,ao_num,ao_num) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (j, i, jpoint) & + !$OMP SHARED (tmp, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) + !$OMP DO SCHEDULE (static) + do j = 1, ao_num + do i = 1, ao_num + do jpoint = 1, n_points_extra_final_grid + tmp%f(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) + enddo enddo + enddo + !$OMP END DO + !$OMP END PARALLEL - do ipoint = 1, n_points_extra_final_grid - aos_data1(ipoint,k,1) = aos_in_r_array_extra(i,ipoint) - aos_data1(ipoint,k,2) = aos_grad_in_r_array_extra(i,ipoint,1) - aos_data1(ipoint,k,3) = aos_grad_in_r_array_extra(i,ipoint,2) - aos_data1(ipoint,k,4) = aos_grad_in_r_array_extra(i,ipoint,3) + call gpu_allocate(tmp_grad1_u12,n_points_extra_final_grid,n_blocks,4) + call gpu_allocate(tmp_grad1_u12p,n_points_extra_final_grid,n_blocks,4) + + tc = 0.d0 + + type(gpu_stream) :: stream(4) + do i=1,4 + call gpu_stream_create(stream(i)) + enddo + + do i_pass = 1, n_pass + ii = (i_pass-1)*n_blocks + 1 + + call wall_time(tc1) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i_blocks, ipoint) & + !$OMP SHARED (n_blocks, n_points_extra_final_grid, ii, final_grid_points, tmp_grad1_u12) + !$OMP DO + do i_blocks = 1, n_blocks + ipoint = ii - 1 + i_blocks ! r1 + call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12%f(1,i_blocks,1), tmp_grad1_u12%f(1,i_blocks,2), & + tmp_grad1_u12%f(1,i_blocks,3), tmp_grad1_u12%f(1,i_blocks,4)) + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(tc2) + tc = tc + tc2 - tc1 + + call gpu_synchronize() + call gpu_copy(tmp_grad1_u12,tmp_grad1_u12p) + do m = 1, 4 + call gpu_set_stream(blas_handle, stream(m)) + call gpu_dgemm(blas_handle, "T", "N", ao_num*ao_num, n_blocks, n_points_extra_final_grid, 1.d0 & + , tmp%f(1,1,1), n_points_extra_final_grid, tmp_grad1_u12p%f(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u12_ao%f(1,1,ii,m), ao_num*ao_num) enddo enddo - call tc_int_bh(n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, & - jBH_size, jBH_m, jBH_n, jBH_o, jBH_c, & - final_grid_points, final_grid_points_extra, nucl_coord, & - final_weight_at_r_vector, final_weight_at_r_vector_extra, & - aos_data1, aos_data2, int2_grad1_u12_ao, tc_int_2e_ao) + if(n_rest .gt. 0) then + + ii = n_pass*n_blocks + 1 + + call wall_time(tc1) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i_rest, ipoint) & + !$OMP SHARED (n_rest, n_points_extra_final_grid, ii, final_grid_points, tmp_grad1_u12) + !$OMP DO + do i_rest = 1, n_rest + ipoint = ii - 1 + i_rest ! r1 + call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12%f(1,i_rest,1), tmp_grad1_u12%f(1,i_rest,2), & + tmp_grad1_u12%f(1,i_rest,3), tmp_grad1_u12%f(1,i_rest,4)) + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(tc2) + tc = tc + tc2 - tc1 + + do m = 1, 4 + call gpu_set_stream(blas_handle, stream(m)) + call gpu_dgemm(blas_handle, "T", "N", ao_num*ao_num, n_rest, n_points_extra_final_grid, 1.d0 & + , tmp%f(1,1,1), n_points_extra_final_grid, tmp_grad1_u12%f(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u12_ao%f(1,1,ii,m), ao_num*ao_num) + enddo + + endif + call gpu_synchronize() + call gpu_deallocate(tmp_grad1_u12) + call gpu_deallocate(tmp_grad1_u12p) + + do i=1,4 + call gpu_stream_destroy(stream(i)) + enddo + + + call gpu_deallocate(tmp) + + + call wall_time(time1) + print*, ' wall time for int2_grad1_u12_ao (min) = ', (time1-time0) / 60.d0 + print*, ' wall time Jastrow derivatives (min) = ', tc / 60.d0 + call print_memory_usage() + +!TODO +stop + ! --- + ! --- + ! --- + + + allocate(tc_int_2e_ao(ao_num,ao_num,ao_num,ao_num)) + + call wall_time(time1) + + allocate(c_mat(n_points_final_grid,ao_num,ao_num)) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint) & + !$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + c_mat(ipoint,k,i) = final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,k) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , int2_grad1_u12_ao%f(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 0.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) + deallocate(c_mat) + + call wall_time(time2) + print*, ' wall time of Hermitian part of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0 + call print_memory_usage() ! --- - call wall_time(time1) + call wall_time(time1) + + allocate(c_mat(n_points_final_grid,ao_num,ao_num)) + do m = 1, 3 + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & + !$OMP SHARED (aos_in_r_array_transp, aos_grad_in_r_array_transp_bis, c_mat, & + !$OMP ao_num, n_points_final_grid, final_weight_at_r_vector, m) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + + weight1 = 0.5d0 * final_weight_at_r_vector(ipoint) + ao_i_r = aos_in_r_array_transp(ipoint,i) + ao_k_r = aos_in_r_array_transp(ipoint,k) + + c_mat(ipoint,k,i) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,m) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,m)) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 & + , int2_grad1_u12_ao%f(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 1.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) + enddo + deallocate(c_mat) + + call wall_time(time2) + print*, ' wall time of non-Hermitian part of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0 + call print_memory_usage() + + ! --- + + call wall_time(time1) + + call sum_A_At(tc_int_2e_ao(1,1,1,1), ao_num*ao_num) + + call wall_time(time2) + print*, ' lower- and upper-triangle of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0 + call print_memory_usage() + + ! --- + + call wall_time(time1) PROVIDE ao_integrals_map !$OMP PARALLEL DEFAULT(NONE) & @@ -107,7 +266,7 @@ subroutine provide_int2_grad1_u12_ao_gpu() do l = 1, ao_num do i = 1, ao_num do k = 1, ao_num - ! < 1:i, 2:j | 1:k, 2:l > + ! < 1:i, 2:j | 1:k, 2:l > tc_int_2e_ao(k,i,l,j) = tc_int_2e_ao(k,i,l,j) + get_ao_two_e_integral(i, j, k, l, ao_integrals_map) enddo enddo @@ -125,7 +284,7 @@ subroutine provide_int2_grad1_u12_ao_gpu() print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") call ezfio_set_work_empty(.False.) - write(11) int2_grad1_u12_ao(:,:,:,1:3) + write(11) int2_grad1_u12_ao%f(:,:,:,1:3) close(11) print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' @@ -138,7 +297,7 @@ subroutine provide_int2_grad1_u12_ao_gpu() ! ---- - deallocate(int2_grad1_u12_ao) + call gpu_deallocate(int2_grad1_u12_ao) deallocate(tc_int_2e_ao) call wall_time(time2) diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f new file mode 100644 index 00000000..6eb3a8b5 --- /dev/null +++ b/plugins/local/tc_int/deb_tc_int_cuda.irp.f @@ -0,0 +1,446 @@ +! --- + +program write_tc_int_cuda + + implicit none + + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + + call write_int(6, my_n_pt_r_grid, 'radial external grid over') + call write_int(6, my_n_pt_a_grid, 'angular external grid over') + + call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over') + call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over') + + call main() + +end + +! --- + +subroutine main() + + implicit none + + !call deb_int_long_range_gpu() + !call deb_int_bh_kernel_gpu() + call deb_int2_grad1_u12_ao_gpu() + + return +end + +! --- + +subroutine deb_int_long_range_gpu() + + use gpu_module + + implicit none + + integer :: i, j, k + integer :: ipoint, jpoint + + integer :: nBlocks, blockSize + + double precision :: acc_thr, err_tot, nrm_tot, err_loc + + double precision :: time0, time1 + double precision :: cuda_time0, cuda_time1 + double precision :: cpu_time0, cpu_time1 + + double precision, allocatable :: aos_data2(:,:,:) + double precision, allocatable :: int_fct_long_range(:,:,:) + double precision, allocatable :: int_fct_long_range_gpu(:,:,:) + + + + call wall_time(time0) + print*, ' start deb_int_long_range_gpu' + + + ! --- + + nBlocks = 256 + blockSize = 32 + + allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) + allocate(int_fct_long_range_gpu(n_points_extra_final_grid,ao_num,ao_num)) + + do k = 1, ao_num + do ipoint = 1, n_points_extra_final_grid + aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) + aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) + aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) + aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) + enddo + enddo + + ! --- + + call wall_time(cuda_time0) + + call deb_int_long_range(nBlocks, blockSize, & + n_points_extra_final_grid, ao_num, final_weight_at_r_vector_extra, aos_data2, & + int_fct_long_range_gpu) + + call wall_time(cuda_time1) + print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0 + + deallocate(aos_data2) + + ! --- + + allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) + + call wall_time(cpu_time0) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (j, i, jpoint) & + !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) + !$OMP DO SCHEDULE (static) + do j = 1, ao_num + do i = 1, ao_num + do jpoint = 1, n_points_extra_final_grid + int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(cpu_time1) + print*, ' wall time on CPU (min) = ', (cpu_time1-cpu_time0) / 60.d0 + + ! --- + + acc_thr = 1d-12 + err_tot = 0.d0 + nrm_tot = 0.d0 + + do j = 1, ao_num + do i = 1, ao_num + do jpoint = 1, n_points_extra_final_grid + err_loc = dabs(int_fct_long_range(jpoint,i,j) - int_fct_long_range_gpu(jpoint,i,j)) + if(err_loc > acc_thr) then + print*, " error on", jpoint, i, j + print*, " CPU res", int_fct_long_range (jpoint,i,j) + print*, " GPU res", int_fct_long_range_gpu(jpoint,i,j) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(int_fct_long_range(jpoint,i,j)) + enddo + enddo + enddo + + print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + deallocate(int_fct_long_range) + deallocate(int_fct_long_range_gpu) + + call wall_time(time1) + print*, ' wall time for deb_int_long_range_gpu (min) = ', (time1-time0) / 60.d0 + + return +end + +! --- + +subroutine deb_int_bh_kernel_gpu() + + use gpu_module + + implicit none + + integer :: m + integer :: ipoint, jpoint + + integer :: nBlocks, blockSize + + double precision :: acc_thr, err_tot, nrm_tot, err_loc + + double precision :: time0, time1 + double precision :: cuda_time0, cuda_time1 + double precision :: cpu_time0, cpu_time1 + + double precision, allocatable :: r1(:,:), r2(:,:) + double precision, allocatable :: grad1_u12(:,:,:) + double precision, allocatable :: grad1_u12_gpu(:,:,:) + + + + call wall_time(time0) + print*, ' start deb_int_bh_kernel_gpu' + + + ! --- + + allocate(r1(n_points_final_grid,3)) + allocate(r2(n_points_extra_final_grid,3)) + + do ipoint = 1, n_points_final_grid + r1(ipoint,1) = final_grid_points(1,ipoint) + r1(ipoint,2) = final_grid_points(2,ipoint) + r1(ipoint,3) = final_grid_points(3,ipoint) + enddo + + do ipoint = 1, n_points_extra_final_grid + r2(ipoint,1) = final_grid_points_extra(1,ipoint) + r2(ipoint,2) = final_grid_points_extra(2,ipoint) + r2(ipoint,3) = final_grid_points_extra(3,ipoint) + enddo + + ! --- + + nBlocks = 256 + blockSize = 32 + + allocate(grad1_u12_gpu(n_points_extra_final_grid,n_points_final_grid,4)) + + call wall_time(cuda_time0) + + call deb_int_bh_kernel(nBlocks, blockSize, & + n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & + r1, r2, nucl_coord, jBH_c, jBH_m, jBH_n, jBH_o, & + grad1_u12_gpu) + + call wall_time(cuda_time1) + print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0 + + ! --- + + allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) + + call wall_time(cpu_time0) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) + !$OMP DO + do ipoint = 1, n_points_final_grid + call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & + , grad1_u12(1,ipoint,2) & + , grad1_u12(1,ipoint,3) & + , grad1_u12(1,ipoint,4) ) + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(cpu_time1) + print*, ' wall time on CPU (min) = ', (cpu_time1-cpu_time0) / 60.d0 + + ! --- + + acc_thr = 1d-12 + err_tot = 0.d0 + nrm_tot = 0.d0 + + do m = 1, 4 + do ipoint = 1, n_points_final_grid + do jpoint = 1, n_points_extra_final_grid + err_loc = dabs(grad1_u12(jpoint,ipoint,m) - grad1_u12_gpu(jpoint,ipoint,m)) + if(err_loc > acc_thr) then + print*, " error on", jpoint, ipoint, m + print*, " CPU res", grad1_u12 (jpoint,ipoint,m) + print*, " GPU res", grad1_u12_gpu(jpoint,ipoint,m) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(grad1_u12(jpoint,ipoint,m)) + enddo + enddo + enddo + + print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + deallocate(r1, r2) + deallocate(grad1_u12) + deallocate(grad1_u12_gpu) + + call wall_time(time1) + print*, ' wall time for deb_int_bh_kernel_gpu (min) = ', (time1-time0) / 60.d0 + + return +end + +! --- + +subroutine deb_int2_grad1_u12_ao_gpu() + + use gpu_module + + implicit none + + integer :: m + integer :: i, j, k + integer :: ipoint, jpoint + + integer :: nBlocks, blockSize + + double precision :: acc_thr, err_tot, nrm_tot, err_loc + + double precision :: time0, time1 + double precision :: cuda_time0, cuda_time1 + double precision :: cpu_time0, cpu_time1 + + double precision, allocatable :: r1(:,:), r2(:,:), aos_data2(:,:,:) + double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:) + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:) + + + + call wall_time(time0) + print*, ' start deb_int2_grad1_u12_ao_gpu' + + + ! --- + + allocate(r1(n_points_final_grid,3)) + allocate(r2(n_points_extra_final_grid,3)) + allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) + + do ipoint = 1, n_points_final_grid + r1(ipoint,1) = final_grid_points(1,ipoint) + r1(ipoint,2) = final_grid_points(2,ipoint) + r1(ipoint,3) = final_grid_points(3,ipoint) + enddo + + do ipoint = 1, n_points_extra_final_grid + r2(ipoint,1) = final_grid_points_extra(1,ipoint) + r2(ipoint,2) = final_grid_points_extra(2,ipoint) + r2(ipoint,3) = final_grid_points_extra(3,ipoint) + enddo + + do k = 1, ao_num + do ipoint = 1, n_points_extra_final_grid + aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) + aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) + aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) + aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) + enddo + enddo + + ! --- + + nBlocks = 256 + blockSize = 32 + + allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,4)) + + call wall_time(cuda_time0) + + call deb_int2_grad1_u12_ao(nBlocks, blockSize, & + n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & + r1, r2, final_weight_at_r_vector_extra, nucl_coord, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & + int2_grad1_u12_ao_gpu) + + call wall_time(cuda_time1) + print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0 + + ! --- + + + allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) + allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) + + call wall_time(cpu_time0) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (j, i, jpoint) & + !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) + !$OMP DO SCHEDULE (static) + do j = 1, ao_num + do i = 1, ao_num + do jpoint = 1, n_points_extra_final_grid + int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) + !$OMP DO + do ipoint = 1, n_points_final_grid + call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & + , grad1_u12(1,ipoint,2) & + , grad1_u12(1,ipoint,3) & + , grad1_u12(1,ipoint,4) ) + enddo + !$OMP END DO + !$OMP END PARALLEL + + do m = 1, 4 + call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & + , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) + enddo + + call wall_time(cpu_time1) + print*, ' wall time on CPU (min) = ', (cpu_time1-cpu_time0) / 60.d0 + + ! --- + + acc_thr = 1d-12 + err_tot = 0.d0 + nrm_tot = 0.d0 + + do m = 1, 4 + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m)) + if(err_loc > acc_thr) then + print*, " error on", i, j, ipoint, m + print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m) + print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m)) + enddo + enddo + enddo + enddo + + print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + deallocate(r1, r2, aos_data2) + deallocate(int_fct_long_range, grad1_u12) + deallocate(int2_grad1_u12_ao) + deallocate(int2_grad1_u12_ao_gpu) + + call wall_time(time1) + print*, ' wall time for deb_int2_grad1_u12_ao_gpu (min) = ', (time1-time0) / 60.d0 + + return +end diff --git a/plugins/local/tc_int/gpu.c b/plugins/local/tc_int/gpu.c deleted file mode 100644 index 139597f9..00000000 --- a/plugins/local/tc_int/gpu.c +++ /dev/null @@ -1,2 +0,0 @@ - - diff --git a/plugins/local/tc_int/gpu_module.F90 b/plugins/local/tc_int/gpu_module.F90 index cf7efad2..f625e57d 100644 --- a/plugins/local/tc_int/gpu_module.F90 +++ b/plugins/local/tc_int/gpu_module.F90 @@ -1,38 +1,111 @@ -! --- - module gpu_module - use iso_c_binding + use, intrinsic :: iso_c_binding implicit none interface - subroutine tc_int_bh(n_grid1, n_grid2, ao_num, n_nuc, & - size_bh, m_bh, n_bh, o_bh, c_bh, & - r1, r2, rn, wr1, wr2, aos_data1, & - aos_data2, int2_grad1_u12, tc_int_2e_ao) bind(C) + ! --- - import c_int, c_double + subroutine tc_int_c(nBlocks, blockSize, & + n_grid1, n_grid2, n_ao, n_nuc, size_bh, & + r1, wr1, r2, wr2, rn, & + aos_data1, aos_data2, & + c_bh, m_bh, n_bh, o_bh, & + int2_grad1_u12_ao, int_2e_ao) bind(C, name = "tc_int_c") - integer(c_int), intent(in), value :: n_grid1, n_grid2, ao_num, n_nuc, size_bh + import c_int, c_double, c_ptr + integer(c_int), intent(in), value :: nBlocks, blockSize + integer(c_int), intent(in), value :: n_grid1, n_grid2 + integer(c_int), intent(in), value :: n_ao + integer(c_int), intent(in), value :: n_nuc + integer(c_int), intent(in), value :: size_bh + real(c_double), intent(in) :: r1(n_grid1,3), wr1(n_grid1) + real(c_double), intent(in) :: r2(n_grid2,3), wr2(n_grid2) + real(c_double), intent(in) :: rn(n_nuc,3) + real(c_double), intent(in) :: aos_data1(n_grid1,n_ao,4) + real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4) + real(c_double), intent(in) :: c_bh(size_bh,n_nuc) integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) - real(c_double), intent(in) :: c_bh(size_bh,n_nuc) - real(c_double), intent(in) :: r1(n_grid1,3), r2(n_grid2,3) - real(c_double), intent(in) :: rn(n_nuc,3) - real(c_double), intent(in) :: wr1(n_grid1), wr2(n_grid2) - real(c_double), intent(in) :: aos_data1(n_grid1,ao_num,4), aos_data2(n_grid2,ao_num,4) - real(c_double), intent(out) :: int2_grad1_u12(n_grid1,ao_num,ao_num,4) - real(c_double), intent(out) :: tc_int_2e_ao(ao_num,ao_num,ao_num,ao_num) + real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4) + real(c_double), intent(out) :: int_2e_ao(n_ao,n_ao,n_ao,n_ao) + + end subroutine tc_int_c + + ! --- + + subroutine deb_int_long_range(nBlocks, blockSize, & + n_grid2, n_ao, wr2, aos_data2, & + int_fct_long_range) bind(C, name = "deb_int_long_range") + + import c_int, c_double + integer(c_int), intent(in), value :: nBlocks, blockSize + integer(c_int), intent(in), value :: n_grid2 + integer(c_int), intent(in), value :: n_ao + real(c_double), intent(in) :: wr2(n_grid2) + real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4) + real(c_double), intent(out) :: int_fct_long_range(n_grid2,n_ao,n_ao) + + end subroutine deb_int_long_range + + ! --- + + subroutine deb_int_bh_kernel(nBlocks, blockSize, & + n_grid1, n_grid2, n_ao, n_nuc, size_bh, & + r1, r2, rn, c_bh, m_bh, n_bh, o_bh, & + grad1_u12) bind(C, name = "deb_int_bh_kernel") + + import c_int, c_double, c_ptr + integer(c_int), intent(in), value :: nBlocks, blockSize + integer(c_int), intent(in), value :: n_grid1, n_grid2 + integer(c_int), intent(in), value :: n_ao + integer(c_int), intent(in), value :: n_nuc + integer(c_int), intent(in), value :: size_bh + real(c_double), intent(in) :: r1(n_grid1,3) + real(c_double), intent(in) :: r2(n_grid2,3) + real(c_double), intent(in) :: rn(n_nuc,3) + real(c_double), intent(in) :: c_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) + real(c_double), intent(out) :: grad1_u12(n_grid2,n_grid1,4) + + end subroutine deb_int_bh_kernel + + ! --- + + subroutine deb_int2_grad1_u12_ao(nBlocks, blockSize, & + n_grid1, n_grid2, n_ao, n_nuc, size_bh, & + r1, r2, wr2, rn, aos_data2, c_bh, m_bh, n_bh, o_bh, & + int2_grad1_u12_ao) bind(C, name ="deb_int2_grad1_u12_ao") + + import c_int, c_double, c_ptr + integer(c_int), intent(in), value :: nBlocks, blockSize + integer(c_int), intent(in), value :: n_grid1, n_grid2 + integer(c_int), intent(in), value :: n_ao + integer(c_int), intent(in), value :: n_nuc + integer(c_int), intent(in), value :: size_bh + real(c_double), intent(in) :: r1(n_grid1,3) + real(c_double), intent(in) :: r2(n_grid2,3) + real(c_double), intent(in) :: wr2(n_grid2) + real(c_double), intent(in) :: rn(n_nuc,3) + real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4) + real(c_double), intent(in) :: c_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) + real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4) + + end subroutine deb_int2_grad1_u12_ao + + ! --- - end subroutine - end interface -end module +end module gpu_module -! --- diff --git a/plugins/local/tc_int/write_tc_int_cuda.irp.f b/plugins/local/tc_int/write_tc_int_cuda.irp.f new file mode 100644 index 00000000..de3be412 --- /dev/null +++ b/plugins/local/tc_int/write_tc_int_cuda.irp.f @@ -0,0 +1,194 @@ +! --- + +program write_tc_int_cuda + + implicit none + + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + + call write_int(6, my_n_pt_r_grid, 'radial external grid over') + call write_int(6, my_n_pt_a_grid, 'angular external grid over') + + call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over') + call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over') + + call main() + +end + +! --- + +subroutine main() + + implicit none + + PROVIDE io_tc_integ + + print*, 'io_tc_integ = ', io_tc_integ + + if(io_tc_integ .ne. "Write") then + print*, 'io_tc_integ != Write' + print*, io_tc_integ + stop + endif + + call do_work_on_gpu() + + call ezfio_set_tc_keywords_io_tc_integ('Read') + +end + +! --- + +subroutine do_work_on_gpu() + + use gpu_module + + implicit none + + integer :: k, ipoint + integer :: nBlocks, blockSize + integer :: n_grid1, n_grid2 + integer :: n_ao + integer :: n_nuc + integer :: size_bh + + double precision, allocatable :: r1(:,:), wr1(:), r2(:,:), wr2(:), rn(:,:) + double precision, allocatable :: aos_data1(:,:,:), aos_data2(:,:,:) + double precision, allocatable :: c_bh(:,:) + integer, allocatable :: m_bh(:,:), n_bh(:,:), o_bh(:,:) + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: int_2e_ao(:,:,:,:) + + double precision :: time0, time1 + double precision :: cuda_time0, cuda_time1 + + call wall_time(time0) + print*, ' start calculation of TC-integrals' + + nBlocks = 100 + blockSize = 32 + + n_grid1 = n_points_final_grid + n_grid2 = n_points_extra_final_grid + + n_ao = ao_num + n_nuc = nucl_num + + size_bh = jBH_size + + print*, " nBlocks =", nBlocks + print*, " blockSize =", blockSize + print*, " n_grid1 =", n_grid1 + print*, " n_grid2 =", n_grid2 + print*, " n_ao =", n_ao + print*, " n_nuc =", n_nuc + print *, " size_bh =", size_bh + + allocate(r1(n_grid1,3), wr1(n_grid1)) + allocate(r2(n_grid2,3), wr2(n_grid2)) + allocate(rn(n_nuc,3)) + allocate(aos_data1(n_grid1,n_ao,4)) + allocate(aos_data2(n_grid2,n_ao,4)) + allocate(c_bh(size_bh,n_nuc), m_bh(size_bh,n_nuc), n_bh(size_bh,n_nuc), o_bh(size_bh,n_nuc)) + allocate(int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4)) + allocate(int_2e_ao(n_ao,n_ao,n_ao,n_ao)) + + do ipoint = 1, n_points_final_grid + r1(ipoint,1) = final_grid_points(1,ipoint) + r1(ipoint,2) = final_grid_points(2,ipoint) + r1(ipoint,3) = final_grid_points(3,ipoint) + wr1(ipoint) = final_weight_at_r_vector(ipoint) + enddo + + do ipoint = 1, n_points_extra_final_grid + r2(ipoint,1) = final_grid_points_extra(1,ipoint) + r2(ipoint,2) = final_grid_points_extra(2,ipoint) + r2(ipoint,3) = final_grid_points_extra(3,ipoint) + wr2(ipoint) = final_weight_at_r_vector_extra(ipoint) + enddo + + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint) + aos_data1(ipoint,k,2) = aos_grad_in_r_array(k,ipoint,1) + aos_data1(ipoint,k,3) = aos_grad_in_r_array(k,ipoint,2) + aos_data1(ipoint,k,4) = aos_grad_in_r_array(k,ipoint,3) + enddo + + do ipoint = 1, n_points_extra_final_grid + aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) + aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) + aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) + aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) + enddo + enddo + + rn(:,:) = nucl_coord(:,:) + + c_bh(:,:) = jBH_c(:,:) + m_bh(:,:) = jBH_m(:,:) + n_bh(:,:) = jBH_n(:,:) + o_bh(:,:) = jBH_o(:,:) + + call wall_time(cuda_time0) + print*, ' start CUDA kernel' + + int2_grad1_u12_ao = 0.d0 + int_2e_ao = 0.d0 + + call tc_int_c(nBlocks, blockSize, & + n_grid1, n_grid2, n_ao, n_nuc, size_bh, & + r1, wr1, r2, wr2, rn, aos_data1, aos_data2, & + c_bh, m_bh, n_bh, o_bh, & + int2_grad1_u12_ao, int_2e_ao) + + call wall_time(cuda_time1) + print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0 + + deallocate(r1, wr1, r2, wr2, rn) + deallocate(aos_data1, aos_data2) + deallocate(c_bh, m_bh, n_bh, o_bh) + + ! --- + + print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") + call ezfio_set_work_empty(.False.) + write(11) int2_grad1_u12_ao(:,:,:,1:3) + close(11) + deallocate(int2_grad1_u12_ao) + + print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/ao_two_e_tc_tot', action="write") + call ezfio_set_work_empty(.False.) + do k = 1, ao_num + write(11) int_2e_ao(:,:,:,k) + enddo + close(11) + deallocate(int_2e_ao) + + ! ---- + + + call wall_time(time1) + print*, ' wall time for TC-integrals (min) = ', (time1-time0) / 60.d0 + + return +end + +! --- From f43ee8cf61e6a59a1e6d8b013cd5fe75b1724b42 Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Thu, 1 Aug 2024 18:29:50 +0200 Subject: [PATCH 04/19] int2_grad1_u12_ao is computed correctly on CUDA --- plugins/local/tc_int/EZFIO.cfg | 36 +++ plugins/local/tc_int/deb_tc_int_cuda.irp.f | 283 +++------------------ plugins/local/tc_int/gpu_module.F90 | 58 +---- 3 files changed, 75 insertions(+), 302 deletions(-) create mode 100644 plugins/local/tc_int/EZFIO.cfg diff --git a/plugins/local/tc_int/EZFIO.cfg b/plugins/local/tc_int/EZFIO.cfg new file mode 100644 index 00000000..12366f01 --- /dev/null +++ b/plugins/local/tc_int/EZFIO.cfg @@ -0,0 +1,36 @@ +[nxBlocks] +type: integer +doc: nb of x blocks in the Grid +interface: ezfio,provider,ocaml +default: 10 + +[nyBlocks] +type: integer +doc: nb of y blocks in the Grid +interface: ezfio,provider,ocaml +default: 10 + +[nzBlocks] +type: integer +doc: nb of z blocks in the Grid +interface: ezfio,provider,ocaml +default: 1 + +[blockxSize] +type: integer +doc: size of x blocks +interface: ezfio,provider,ocaml +default: 32 + +[blockySize] +type: integer +doc: size of y blocks +interface: ezfio,provider,ocaml +default: 32 + +[blockzSize] +type: integer +doc: size of z blocks +interface: ezfio,provider,ocaml +default: 1 + diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f index 6eb3a8b5..a7b5b125 100644 --- a/plugins/local/tc_int/deb_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/deb_tc_int_cuda.irp.f @@ -36,8 +36,6 @@ subroutine main() implicit none - !call deb_int_long_range_gpu() - !call deb_int_bh_kernel_gpu() call deb_int2_grad1_u12_ao_gpu() return @@ -45,246 +43,6 @@ end ! --- -subroutine deb_int_long_range_gpu() - - use gpu_module - - implicit none - - integer :: i, j, k - integer :: ipoint, jpoint - - integer :: nBlocks, blockSize - - double precision :: acc_thr, err_tot, nrm_tot, err_loc - - double precision :: time0, time1 - double precision :: cuda_time0, cuda_time1 - double precision :: cpu_time0, cpu_time1 - - double precision, allocatable :: aos_data2(:,:,:) - double precision, allocatable :: int_fct_long_range(:,:,:) - double precision, allocatable :: int_fct_long_range_gpu(:,:,:) - - - - call wall_time(time0) - print*, ' start deb_int_long_range_gpu' - - - ! --- - - nBlocks = 256 - blockSize = 32 - - allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) - allocate(int_fct_long_range_gpu(n_points_extra_final_grid,ao_num,ao_num)) - - do k = 1, ao_num - do ipoint = 1, n_points_extra_final_grid - aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) - aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) - aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) - aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) - enddo - enddo - - ! --- - - call wall_time(cuda_time0) - - call deb_int_long_range(nBlocks, blockSize, & - n_points_extra_final_grid, ao_num, final_weight_at_r_vector_extra, aos_data2, & - int_fct_long_range_gpu) - - call wall_time(cuda_time1) - print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0 - - deallocate(aos_data2) - - ! --- - - allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) - - call wall_time(cpu_time0) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (j, i, jpoint) & - !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) - !$OMP DO SCHEDULE (static) - do j = 1, ao_num - do i = 1, ao_num - do jpoint = 1, n_points_extra_final_grid - int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - call wall_time(cpu_time1) - print*, ' wall time on CPU (min) = ', (cpu_time1-cpu_time0) / 60.d0 - - ! --- - - acc_thr = 1d-12 - err_tot = 0.d0 - nrm_tot = 0.d0 - - do j = 1, ao_num - do i = 1, ao_num - do jpoint = 1, n_points_extra_final_grid - err_loc = dabs(int_fct_long_range(jpoint,i,j) - int_fct_long_range_gpu(jpoint,i,j)) - if(err_loc > acc_thr) then - print*, " error on", jpoint, i, j - print*, " CPU res", int_fct_long_range (jpoint,i,j) - print*, " GPU res", int_fct_long_range_gpu(jpoint,i,j) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(int_fct_long_range(jpoint,i,j)) - enddo - enddo - enddo - - print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - deallocate(int_fct_long_range) - deallocate(int_fct_long_range_gpu) - - call wall_time(time1) - print*, ' wall time for deb_int_long_range_gpu (min) = ', (time1-time0) / 60.d0 - - return -end - -! --- - -subroutine deb_int_bh_kernel_gpu() - - use gpu_module - - implicit none - - integer :: m - integer :: ipoint, jpoint - - integer :: nBlocks, blockSize - - double precision :: acc_thr, err_tot, nrm_tot, err_loc - - double precision :: time0, time1 - double precision :: cuda_time0, cuda_time1 - double precision :: cpu_time0, cpu_time1 - - double precision, allocatable :: r1(:,:), r2(:,:) - double precision, allocatable :: grad1_u12(:,:,:) - double precision, allocatable :: grad1_u12_gpu(:,:,:) - - - - call wall_time(time0) - print*, ' start deb_int_bh_kernel_gpu' - - - ! --- - - allocate(r1(n_points_final_grid,3)) - allocate(r2(n_points_extra_final_grid,3)) - - do ipoint = 1, n_points_final_grid - r1(ipoint,1) = final_grid_points(1,ipoint) - r1(ipoint,2) = final_grid_points(2,ipoint) - r1(ipoint,3) = final_grid_points(3,ipoint) - enddo - - do ipoint = 1, n_points_extra_final_grid - r2(ipoint,1) = final_grid_points_extra(1,ipoint) - r2(ipoint,2) = final_grid_points_extra(2,ipoint) - r2(ipoint,3) = final_grid_points_extra(3,ipoint) - enddo - - ! --- - - nBlocks = 256 - blockSize = 32 - - allocate(grad1_u12_gpu(n_points_extra_final_grid,n_points_final_grid,4)) - - call wall_time(cuda_time0) - - call deb_int_bh_kernel(nBlocks, blockSize, & - n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & - r1, r2, nucl_coord, jBH_c, jBH_m, jBH_n, jBH_o, & - grad1_u12_gpu) - - call wall_time(cuda_time1) - print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0 - - ! --- - - allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) - - call wall_time(cpu_time0) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) - !$OMP DO - do ipoint = 1, n_points_final_grid - call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & - , grad1_u12(1,ipoint,2) & - , grad1_u12(1,ipoint,3) & - , grad1_u12(1,ipoint,4) ) - enddo - !$OMP END DO - !$OMP END PARALLEL - - call wall_time(cpu_time1) - print*, ' wall time on CPU (min) = ', (cpu_time1-cpu_time0) / 60.d0 - - ! --- - - acc_thr = 1d-12 - err_tot = 0.d0 - nrm_tot = 0.d0 - - do m = 1, 4 - do ipoint = 1, n_points_final_grid - do jpoint = 1, n_points_extra_final_grid - err_loc = dabs(grad1_u12(jpoint,ipoint,m) - grad1_u12_gpu(jpoint,ipoint,m)) - if(err_loc > acc_thr) then - print*, " error on", jpoint, ipoint, m - print*, " CPU res", grad1_u12 (jpoint,ipoint,m) - print*, " GPU res", grad1_u12_gpu(jpoint,ipoint,m) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(grad1_u12(jpoint,ipoint,m)) - enddo - enddo - enddo - - print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - deallocate(r1, r2) - deallocate(grad1_u12) - deallocate(grad1_u12_gpu) - - call wall_time(time1) - print*, ' wall time for deb_int_bh_kernel_gpu (min) = ', (time1-time0) / 60.d0 - - return -end - -! --- - subroutine deb_int2_grad1_u12_ao_gpu() use gpu_module @@ -295,15 +53,14 @@ subroutine deb_int2_grad1_u12_ao_gpu() integer :: i, j, k integer :: ipoint, jpoint - integer :: nBlocks, blockSize - double precision :: acc_thr, err_tot, nrm_tot, err_loc double precision :: time0, time1 double precision :: cuda_time0, cuda_time1 double precision :: cpu_time0, cpu_time1 + double precision :: cpu_ttime0, cpu_ttime1 - double precision, allocatable :: r1(:,:), r2(:,:), aos_data2(:,:,:) + double precision, allocatable :: r1(:,:), r2(:,:), rn(:,:), aos_data2(:,:,:) double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:) double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:) @@ -318,6 +75,7 @@ subroutine deb_int2_grad1_u12_ao_gpu() allocate(r1(n_points_final_grid,3)) allocate(r2(n_points_extra_final_grid,3)) + allocate(rn(3,nucl_num)) allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) do ipoint = 1, n_points_final_grid @@ -332,6 +90,12 @@ subroutine deb_int2_grad1_u12_ao_gpu() r2(ipoint,3) = final_grid_points_extra(3,ipoint) enddo + do k = 1, nucl_num + rn(1,k) = nucl_coord(k,1) + rn(2,k) = nucl_coord(k,2) + rn(3,k) = nucl_coord(k,3) + enddo + do k = 1, ao_num do ipoint = 1, n_points_extra_final_grid aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) @@ -343,20 +107,21 @@ subroutine deb_int2_grad1_u12_ao_gpu() ! --- - nBlocks = 256 - blockSize = 32 + PROVIDE nxBlocks nyBlocks nzBlocks + PROVIDE blockxSize blockySize blockzSize allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,4)) call wall_time(cuda_time0) - call deb_int2_grad1_u12_ao(nBlocks, blockSize, & - n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & - r1, r2, final_weight_at_r_vector_extra, nucl_coord, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & + call deb_int2_grad1_u12_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & + n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & + final_grid_points, final_grid_points_extra, final_weight_at_r_vector_extra, rn, & + aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & int2_grad1_u12_ao_gpu) call wall_time(cuda_time1) - print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0 + write(*,"(A,2X,F15.7)") ' wall time for CUDA kernel (sec) = ', (cuda_time1 - cuda_time0) ! --- @@ -367,6 +132,7 @@ subroutine deb_int2_grad1_u12_ao_gpu() call wall_time(cpu_time0) + call wall_time(cpu_ttime0) !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (j, i, jpoint) & @@ -381,8 +147,11 @@ subroutine deb_int2_grad1_u12_ao_gpu() enddo !$OMP END DO !$OMP END PARALLEL + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (cpu_ttime1 - cpu_ttime0) + call wall_time(cpu_ttime0) !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (ipoint) & @@ -396,15 +165,21 @@ subroutine deb_int2_grad1_u12_ao_gpu() enddo !$OMP END DO !$OMP END PARALLEL + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (cpu_ttime1 - cpu_ttime0) + + call wall_time(cpu_ttime0) do m = 1, 4 call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) enddo + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for DGEMM (sec) = ', (cpu_ttime1 - cpu_ttime0) call wall_time(cpu_time1) - print*, ' wall time on CPU (min) = ', (cpu_time1-cpu_time0) / 60.d0 + write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (cpu_time1 - cpu_time0) ! --- @@ -434,13 +209,13 @@ subroutine deb_int2_grad1_u12_ao_gpu() ! --- - deallocate(r1, r2, aos_data2) + deallocate(r1, r2, rn, aos_data2) deallocate(int_fct_long_range, grad1_u12) deallocate(int2_grad1_u12_ao) deallocate(int2_grad1_u12_ao_gpu) call wall_time(time1) - print*, ' wall time for deb_int2_grad1_u12_ao_gpu (min) = ', (time1-time0) / 60.d0 + write(*,"(A,2X,F15.7)") ' wall time for deb_int2_grad1_u12_ao_gpu (sec) = ', (time1 - time0) return end diff --git a/plugins/local/tc_int/gpu_module.F90 b/plugins/local/tc_int/gpu_module.F90 index f625e57d..1f07a2ea 100644 --- a/plugins/local/tc_int/gpu_module.F90 +++ b/plugins/local/tc_int/gpu_module.F90 @@ -38,61 +38,23 @@ module gpu_module ! --- - subroutine deb_int_long_range(nBlocks, blockSize, & - n_grid2, n_ao, wr2, aos_data2, & - int_fct_long_range) bind(C, name = "deb_int_long_range") - - import c_int, c_double - integer(c_int), intent(in), value :: nBlocks, blockSize - integer(c_int), intent(in), value :: n_grid2 - integer(c_int), intent(in), value :: n_ao - real(c_double), intent(in) :: wr2(n_grid2) - real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4) - real(c_double), intent(out) :: int_fct_long_range(n_grid2,n_ao,n_ao) - - end subroutine deb_int_long_range - - ! --- - - subroutine deb_int_bh_kernel(nBlocks, blockSize, & - n_grid1, n_grid2, n_ao, n_nuc, size_bh, & - r1, r2, rn, c_bh, m_bh, n_bh, o_bh, & - grad1_u12) bind(C, name = "deb_int_bh_kernel") + subroutine deb_int2_grad1_u12_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & + n_grid1, n_grid2, n_ao, n_nuc, size_bh, & + r1, r2, wr2, rn, aos_data2, c_bh, m_bh, n_bh, o_bh, & + int2_grad1_u12_ao) bind(C, name ="deb_int2_grad1_u12_ao") import c_int, c_double, c_ptr - integer(c_int), intent(in), value :: nBlocks, blockSize + integer(c_int), intent(in), value :: nxBlocks, blockxSize + integer(c_int), intent(in), value :: nyBlocks, blockySize + integer(c_int), intent(in), value :: nzBlocks, blockzSize integer(c_int), intent(in), value :: n_grid1, n_grid2 integer(c_int), intent(in), value :: n_ao integer(c_int), intent(in), value :: n_nuc integer(c_int), intent(in), value :: size_bh - real(c_double), intent(in) :: r1(n_grid1,3) - real(c_double), intent(in) :: r2(n_grid2,3) - real(c_double), intent(in) :: rn(n_nuc,3) - real(c_double), intent(in) :: c_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) - real(c_double), intent(out) :: grad1_u12(n_grid2,n_grid1,4) - - end subroutine deb_int_bh_kernel - - ! --- - - subroutine deb_int2_grad1_u12_ao(nBlocks, blockSize, & - n_grid1, n_grid2, n_ao, n_nuc, size_bh, & - r1, r2, wr2, rn, aos_data2, c_bh, m_bh, n_bh, o_bh, & - int2_grad1_u12_ao) bind(C, name ="deb_int2_grad1_u12_ao") - - import c_int, c_double, c_ptr - integer(c_int), intent(in), value :: nBlocks, blockSize - integer(c_int), intent(in), value :: n_grid1, n_grid2 - integer(c_int), intent(in), value :: n_ao - integer(c_int), intent(in), value :: n_nuc - integer(c_int), intent(in), value :: size_bh - real(c_double), intent(in) :: r1(n_grid1,3) - real(c_double), intent(in) :: r2(n_grid2,3) + real(c_double), intent(in) :: r1(3,n_grid1) + real(c_double), intent(in) :: r2(3,n_grid2) real(c_double), intent(in) :: wr2(n_grid2) - real(c_double), intent(in) :: rn(n_nuc,3) + real(c_double), intent(in) :: rn(3,n_nuc) real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4) real(c_double), intent(in) :: c_bh(size_bh,n_nuc) integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) From 76ec02812e983d3cd04a63511bfe439b707e0082 Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Fri, 2 Aug 2024 12:19:05 +0200 Subject: [PATCH 05/19] fix nb of blocks autom --- plugins/local/tc_int/deb_tc_int_cuda.irp.f | 189 +++++++++++---------- 1 file changed, 99 insertions(+), 90 deletions(-) diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f index a7b5b125..f888d792 100644 --- a/plugins/local/tc_int/deb_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/deb_tc_int_cuda.irp.f @@ -107,9 +107,18 @@ subroutine deb_int2_grad1_u12_ao_gpu() ! --- + integer :: nB + integer :: sB + PROVIDE nxBlocks nyBlocks nzBlocks PROVIDE blockxSize blockySize blockzSize + sB = 32 + nB = (n_points_final_grid + sB - 1) / sB + + call ezfio_set_tc_int_blockxSize(sB) + call ezfio_set_tc_int_nxBlocks(nB) + allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,4)) call wall_time(cuda_time0) @@ -126,96 +135,96 @@ subroutine deb_int2_grad1_u12_ao_gpu() ! --- - allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) - allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) - allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) - - call wall_time(cpu_time0) - - call wall_time(cpu_ttime0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (j, i, jpoint) & - !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) - !$OMP DO SCHEDULE (static) - do j = 1, ao_num - do i = 1, ao_num - do jpoint = 1, n_points_extra_final_grid - int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - call wall_time(cpu_ttime0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) - !$OMP DO - do ipoint = 1, n_points_final_grid - call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & - , grad1_u12(1,ipoint,2) & - , grad1_u12(1,ipoint,3) & - , grad1_u12(1,ipoint,4) ) - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - call wall_time(cpu_ttime0) - do m = 1, 4 - call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & - , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & - , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) - enddo - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for DGEMM (sec) = ', (cpu_ttime1 - cpu_ttime0) - - call wall_time(cpu_time1) - write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (cpu_time1 - cpu_time0) - - ! --- - - acc_thr = 1d-12 - err_tot = 0.d0 - nrm_tot = 0.d0 - - do m = 1, 4 - do ipoint = 1, n_points_final_grid - do j = 1, ao_num - do i = 1, ao_num - err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m)) - if(err_loc > acc_thr) then - print*, " error on", i, j, ipoint, m - print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m) - print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m)) - enddo - enddo - enddo - enddo - - print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - deallocate(r1, r2, rn, aos_data2) - deallocate(int_fct_long_range, grad1_u12) - deallocate(int2_grad1_u12_ao) - deallocate(int2_grad1_u12_ao_gpu) - - call wall_time(time1) - write(*,"(A,2X,F15.7)") ' wall time for deb_int2_grad1_u12_ao_gpu (sec) = ', (time1 - time0) +! allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) +! allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) +! allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) +! +! call wall_time(cpu_time0) +! +! call wall_time(cpu_ttime0) +! !$OMP PARALLEL & +! !$OMP DEFAULT (NONE) & +! !$OMP PRIVATE (j, i, jpoint) & +! !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) +! !$OMP DO SCHEDULE (static) +! do j = 1, ao_num +! do i = 1, ao_num +! do jpoint = 1, n_points_extra_final_grid +! int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) +! enddo +! enddo +! enddo +! !$OMP END DO +! !$OMP END PARALLEL +! call wall_time(cpu_ttime1) +! write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (cpu_ttime1 - cpu_ttime0) +! +! +! call wall_time(cpu_ttime0) +! !$OMP PARALLEL & +! !$OMP DEFAULT (NONE) & +! !$OMP PRIVATE (ipoint) & +! !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) +! !$OMP DO +! do ipoint = 1, n_points_final_grid +! call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & +! , grad1_u12(1,ipoint,2) & +! , grad1_u12(1,ipoint,3) & +! , grad1_u12(1,ipoint,4) ) +! enddo +! !$OMP END DO +! !$OMP END PARALLEL +! call wall_time(cpu_ttime1) +! write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (cpu_ttime1 - cpu_ttime0) +! +! +! call wall_time(cpu_ttime0) +! do m = 1, 4 +! call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & +! , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & +! , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) +! enddo +! call wall_time(cpu_ttime1) +! write(*,"(A,2X,F15.7)") ' wall time for DGEMM (sec) = ', (cpu_ttime1 - cpu_ttime0) +! +! call wall_time(cpu_time1) +! write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (cpu_time1 - cpu_time0) +! +! ! --- +! +! acc_thr = 1d-12 +! err_tot = 0.d0 +! nrm_tot = 0.d0 +! +! do m = 1, 4 +! do ipoint = 1, n_points_final_grid +! do j = 1, ao_num +! do i = 1, ao_num +! err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m)) +! if(err_loc > acc_thr) then +! print*, " error on", i, j, ipoint, m +! print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m) +! print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m) +! stop +! endif +! err_tot = err_tot + err_loc +! nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m)) +! enddo +! enddo +! enddo +! enddo +! +! print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot +! +! ! --- +! +! deallocate(r1, r2, rn, aos_data2) +! deallocate(int_fct_long_range, grad1_u12) +! deallocate(int2_grad1_u12_ao) +! deallocate(int2_grad1_u12_ao_gpu) +! +! call wall_time(time1) +! write(*,"(A,2X,F15.7)") ' wall time for deb_int2_grad1_u12_ao_gpu (sec) = ', (time1 - time0) return end From 4bd8b710a548db5efddaf79fcdb555dace58a1c6 Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Fri, 2 Aug 2024 21:16:27 +0200 Subject: [PATCH 06/19] CuTC integrals: OK --- plugins/local/tc_int/compute_tc_int.irp.f | 11 +- plugins/local/tc_int/compute_tc_int_gpu.irp.f | 7 +- .../{gpu_module.F90 => cutc_module.F90} | 49 +- plugins/local/tc_int/deb_tc_int_cuda.irp.f | 487 ++++++++++++++---- plugins/local/tc_int/install | 17 + plugins/local/tc_int/uninstall | 13 + plugins/local/tc_int/write_tc_int_cuda.irp.f | 88 +--- 7 files changed, 482 insertions(+), 190 deletions(-) rename plugins/local/tc_int/{gpu_module.F90 => cutc_module.F90} (54%) create mode 100755 plugins/local/tc_int/install create mode 100755 plugins/local/tc_int/uninstall diff --git a/plugins/local/tc_int/compute_tc_int.irp.f b/plugins/local/tc_int/compute_tc_int.irp.f index 35034454..97815904 100644 --- a/plugins/local/tc_int/compute_tc_int.irp.f +++ b/plugins/local/tc_int/compute_tc_int.irp.f @@ -149,6 +149,7 @@ subroutine provide_int2_grad1_u12_ao() call wall_time(time1) print*, ' wall time for int2_grad1_u12_ao (min) = ', (time1-time0) / 60.d0 print*, ' wall time Jastrow derivatives (min) = ', tc / 60.d0 + call print_memory_usage() ! --- @@ -156,11 +157,11 @@ subroutine provide_int2_grad1_u12_ao() ! --- + allocate(c_mat(n_points_final_grid,ao_num,ao_num)) allocate(tc_int_2e_ao(ao_num,ao_num,ao_num,ao_num)) call wall_time(time1) - allocate(c_mat(n_points_final_grid,ao_num,ao_num)) !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i, k, ipoint) & @@ -178,17 +179,16 @@ subroutine provide_int2_grad1_u12_ao() call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & , int2_grad1_u12_ao(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , 0.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) - deallocate(c_mat) call wall_time(time2) print*, ' wall time of Hermitian part of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0 + call print_memory_usage() ! --- call wall_time(time1) - allocate(c_mat(n_points_final_grid,ao_num,ao_num)) do m = 1, 3 !$OMP PARALLEL & !$OMP DEFAULT (NONE) & @@ -215,10 +215,12 @@ subroutine provide_int2_grad1_u12_ao() , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , 1.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) enddo - deallocate(c_mat) call wall_time(time2) print*, ' wall time of non-Hermitian part of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0 + + deallocate(c_mat) + call print_memory_usage() ! --- @@ -229,6 +231,7 @@ subroutine provide_int2_grad1_u12_ao() call wall_time(time2) print*, ' lower- and upper-triangle of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0 + call print_memory_usage() ! --- diff --git a/plugins/local/tc_int/compute_tc_int_gpu.irp.f b/plugins/local/tc_int/compute_tc_int_gpu.irp.f index 5db07dd6..c2653ac6 100644 --- a/plugins/local/tc_int/compute_tc_int_gpu.irp.f +++ b/plugins/local/tc_int/compute_tc_int_gpu.irp.f @@ -178,11 +178,11 @@ stop ! --- + allocate(c_mat(n_points_final_grid,ao_num,ao_num)) allocate(tc_int_2e_ao(ao_num,ao_num,ao_num,ao_num)) call wall_time(time1) - allocate(c_mat(n_points_final_grid,ao_num,ao_num)) !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i, k, ipoint) & @@ -200,7 +200,6 @@ stop call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & , int2_grad1_u12_ao%f(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , 0.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) - deallocate(c_mat) call wall_time(time2) print*, ' wall time of Hermitian part of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0 @@ -210,7 +209,6 @@ stop call wall_time(time1) - allocate(c_mat(n_points_final_grid,ao_num,ao_num)) do m = 1, 3 !$OMP PARALLEL & !$OMP DEFAULT (NONE) & @@ -237,12 +235,13 @@ stop , int2_grad1_u12_ao%f(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , 1.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) enddo - deallocate(c_mat) call wall_time(time2) print*, ' wall time of non-Hermitian part of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0 call print_memory_usage() + deallocate(c_mat) + ! --- call wall_time(time1) diff --git a/plugins/local/tc_int/gpu_module.F90 b/plugins/local/tc_int/cutc_module.F90 similarity index 54% rename from plugins/local/tc_int/gpu_module.F90 rename to plugins/local/tc_int/cutc_module.F90 index 1f07a2ea..69c2a131 100644 --- a/plugins/local/tc_int/gpu_module.F90 +++ b/plugins/local/tc_int/cutc_module.F90 @@ -1,5 +1,5 @@ -module gpu_module +module cutc_module use, intrinsic :: iso_c_binding @@ -9,7 +9,8 @@ module gpu_module ! --- - subroutine tc_int_c(nBlocks, blockSize, & + subroutine tc_int_c(nxBlocks, nyBlocks, nzBlocks, & + blockxSize, blockySize, blockzSize, & n_grid1, n_grid2, n_ao, n_nuc, size_bh, & r1, wr1, r2, wr2, rn, & aos_data1, aos_data2, & @@ -17,14 +18,16 @@ module gpu_module int2_grad1_u12_ao, int_2e_ao) bind(C, name = "tc_int_c") import c_int, c_double, c_ptr - integer(c_int), intent(in), value :: nBlocks, blockSize + integer(c_int), intent(in), value :: nxBlocks, blockxSize + integer(c_int), intent(in), value :: nyBlocks, blockySize + integer(c_int), intent(in), value :: nzBlocks, blockzSize integer(c_int), intent(in), value :: n_grid1, n_grid2 integer(c_int), intent(in), value :: n_ao integer(c_int), intent(in), value :: n_nuc integer(c_int), intent(in), value :: size_bh - real(c_double), intent(in) :: r1(n_grid1,3), wr1(n_grid1) - real(c_double), intent(in) :: r2(n_grid2,3), wr2(n_grid2) - real(c_double), intent(in) :: rn(n_nuc,3) + real(c_double), intent(in) :: r1(3,n_grid1), wr1(n_grid1) + real(c_double), intent(in) :: r2(3,n_grid2), wr2(n_grid2) + real(c_double), intent(in) :: rn(3,n_nuc) real(c_double), intent(in) :: aos_data1(n_grid1,n_ao,4) real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4) real(c_double), intent(in) :: c_bh(size_bh,n_nuc) @@ -66,8 +69,40 @@ module gpu_module ! --- + subroutine deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, & + blockxSize, blockySize, blockzSize, & + n_grid1, n_grid2, n_ao, n_nuc, size_bh, & + r1, wr1, r2, wr2, rn, & + aos_data1, aos_data2, & + c_bh, m_bh, n_bh, o_bh, & + int2_grad1_u12_ao, int_2e_ao) bind(C, name = "deb_int_2e_ao") + + import c_int, c_double, c_ptr + integer(c_int), intent(in), value :: nxBlocks, blockxSize + integer(c_int), intent(in), value :: nyBlocks, blockySize + integer(c_int), intent(in), value :: nzBlocks, blockzSize + integer(c_int), intent(in), value :: n_grid1, n_grid2 + integer(c_int), intent(in), value :: n_ao + integer(c_int), intent(in), value :: n_nuc + integer(c_int), intent(in), value :: size_bh + real(c_double), intent(in) :: r1(3,n_grid1), wr1(n_grid1) + real(c_double), intent(in) :: r2(3,n_grid2), wr2(n_grid2) + real(c_double), intent(in) :: rn(3,n_nuc) + real(c_double), intent(in) :: aos_data1(n_grid1,n_ao,4) + real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4) + real(c_double), intent(in) :: c_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) + integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) + real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4) + real(c_double), intent(out) :: int_2e_ao(n_ao,n_ao,n_ao,n_ao) + + end subroutine deb_int_2e_ao + + ! --- + end interface -end module gpu_module +end module cutc_module diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f index f888d792..2c4e975b 100644 --- a/plugins/local/tc_int/deb_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/deb_tc_int_cuda.irp.f @@ -36,7 +36,8 @@ subroutine main() implicit none - call deb_int2_grad1_u12_ao_gpu() + !call deb_int2_grad1_u12_ao_gpu() + call deb_int_2e_ao_gpu() return end @@ -45,7 +46,7 @@ end subroutine deb_int2_grad1_u12_ao_gpu() - use gpu_module + use cutc_module implicit none @@ -56,11 +57,10 @@ subroutine deb_int2_grad1_u12_ao_gpu() double precision :: acc_thr, err_tot, nrm_tot, err_loc double precision :: time0, time1 - double precision :: cuda_time0, cuda_time1 double precision :: cpu_time0, cpu_time1 double precision :: cpu_ttime0, cpu_ttime1 - double precision, allocatable :: r1(:,:), r2(:,:), rn(:,:), aos_data2(:,:,:) + double precision, allocatable :: rn(:,:), aos_data2(:,:,:) double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:) double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:) @@ -73,23 +73,9 @@ subroutine deb_int2_grad1_u12_ao_gpu() ! --- - allocate(r1(n_points_final_grid,3)) - allocate(r2(n_points_extra_final_grid,3)) allocate(rn(3,nucl_num)) allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) - do ipoint = 1, n_points_final_grid - r1(ipoint,1) = final_grid_points(1,ipoint) - r1(ipoint,2) = final_grid_points(2,ipoint) - r1(ipoint,3) = final_grid_points(3,ipoint) - enddo - - do ipoint = 1, n_points_extra_final_grid - r2(ipoint,1) = final_grid_points_extra(1,ipoint) - r2(ipoint,2) = final_grid_points_extra(2,ipoint) - r2(ipoint,3) = final_grid_points_extra(3,ipoint) - enddo - do k = 1, nucl_num rn(1,k) = nucl_coord(k,1) rn(2,k) = nucl_coord(k,2) @@ -121,110 +107,387 @@ subroutine deb_int2_grad1_u12_ao_gpu() allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,4)) - call wall_time(cuda_time0) - call deb_int2_grad1_u12_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & final_grid_points, final_grid_points_extra, final_weight_at_r_vector_extra, rn, & aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & int2_grad1_u12_ao_gpu) - call wall_time(cuda_time1) - write(*,"(A,2X,F15.7)") ' wall time for CUDA kernel (sec) = ', (cuda_time1 - cuda_time0) + ! --- + + allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) + allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) + + call wall_time(cpu_time0) + + call wall_time(cpu_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (j, i, jpoint) & + !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) + !$OMP DO SCHEDULE (static) + do j = 1, ao_num + do i = 1, ao_num + do jpoint = 1, n_points_extra_final_grid + int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (cpu_ttime1 - cpu_ttime0) + + + call wall_time(cpu_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) + !$OMP DO + do ipoint = 1, n_points_final_grid + call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & + , grad1_u12(1,ipoint,2) & + , grad1_u12(1,ipoint,3) & + , grad1_u12(1,ipoint,4) ) + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (cpu_ttime1 - cpu_ttime0) + + + call wall_time(cpu_ttime0) + do m = 1, 4 + call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & + , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) + enddo + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for DGEMM (sec) = ', (cpu_ttime1 - cpu_ttime0) + + call wall_time(cpu_time1) + write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (cpu_time1 - cpu_time0) ! --- + acc_thr = 1d-12 + err_tot = 0.d0 + nrm_tot = 0.d0 -! allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) -! allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) -! allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) -! -! call wall_time(cpu_time0) -! -! call wall_time(cpu_ttime0) -! !$OMP PARALLEL & -! !$OMP DEFAULT (NONE) & -! !$OMP PRIVATE (j, i, jpoint) & -! !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) -! !$OMP DO SCHEDULE (static) -! do j = 1, ao_num -! do i = 1, ao_num -! do jpoint = 1, n_points_extra_final_grid -! int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) -! enddo -! enddo -! enddo -! !$OMP END DO -! !$OMP END PARALLEL -! call wall_time(cpu_ttime1) -! write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (cpu_ttime1 - cpu_ttime0) -! -! -! call wall_time(cpu_ttime0) -! !$OMP PARALLEL & -! !$OMP DEFAULT (NONE) & -! !$OMP PRIVATE (ipoint) & -! !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) -! !$OMP DO -! do ipoint = 1, n_points_final_grid -! call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & -! , grad1_u12(1,ipoint,2) & -! , grad1_u12(1,ipoint,3) & -! , grad1_u12(1,ipoint,4) ) -! enddo -! !$OMP END DO -! !$OMP END PARALLEL -! call wall_time(cpu_ttime1) -! write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (cpu_ttime1 - cpu_ttime0) -! -! -! call wall_time(cpu_ttime0) -! do m = 1, 4 -! call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & -! , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & -! , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) -! enddo -! call wall_time(cpu_ttime1) -! write(*,"(A,2X,F15.7)") ' wall time for DGEMM (sec) = ', (cpu_ttime1 - cpu_ttime0) -! -! call wall_time(cpu_time1) -! write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (cpu_time1 - cpu_time0) -! -! ! --- -! -! acc_thr = 1d-12 -! err_tot = 0.d0 -! nrm_tot = 0.d0 -! -! do m = 1, 4 -! do ipoint = 1, n_points_final_grid -! do j = 1, ao_num -! do i = 1, ao_num -! err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m)) -! if(err_loc > acc_thr) then -! print*, " error on", i, j, ipoint, m -! print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m) -! print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m) -! stop -! endif -! err_tot = err_tot + err_loc -! nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m)) -! enddo -! enddo -! enddo -! enddo -! -! print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot -! -! ! --- -! -! deallocate(r1, r2, rn, aos_data2) -! deallocate(int_fct_long_range, grad1_u12) -! deallocate(int2_grad1_u12_ao) -! deallocate(int2_grad1_u12_ao_gpu) -! -! call wall_time(time1) -! write(*,"(A,2X,F15.7)") ' wall time for deb_int2_grad1_u12_ao_gpu (sec) = ', (time1 - time0) + do m = 1, 4 + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m)) + if(err_loc > acc_thr) then + print*, " error on", i, j, ipoint, m + print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m) + print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m)) + enddo + enddo + enddo + enddo + + print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + deallocate(int_fct_long_range, grad1_u12) + deallocate(int2_grad1_u12_ao) + deallocate(int2_grad1_u12_ao_gpu) + deallocate(rn, aos_data2) + + call wall_time(time1) + write(*,"(A,2X,F15.7)") ' wall time for deb_int2_grad1_u12_ao_gpu (sec) = ', (time1 - time0) + + return +end + +! --- + +subroutine deb_int_2e_ao_gpu() + + use cutc_module + + implicit none + + integer :: m + integer :: i, j, k, l + integer :: ipoint, jpoint + + double precision :: weight1, ao_i_r, ao_k_r + + double precision :: acc_thr, err_tot, nrm_tot, err_loc + + double precision :: time0, time1 + double precision :: cpu_time0, cpu_time1 + double precision :: cpu_ttime0, cpu_ttime1 + double precision :: tt1, tt2 + + double precision, allocatable :: rn(:,:), aos_data1(:,:,:), aos_data2(:,:,:) + double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:), c_mat(:,:,:) + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:) + double precision, allocatable :: int_2e_ao(:,:,:,:) + double precision, allocatable :: int_2e_ao_gpu(:,:,:,:) + + + + call wall_time(time0) + print*, ' start deb_int_2e_ao_gpu' + + + ! --- + + allocate(rn(3,nucl_num)) + allocate(aos_data1(n_points_final_grid,ao_num,4)) + allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) + + do k = 1, nucl_num + rn(1,k) = nucl_coord(k,1) + rn(2,k) = nucl_coord(k,2) + rn(3,k) = nucl_coord(k,3) + enddo + + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint) + aos_data1(ipoint,k,2) = aos_grad_in_r_array(k,ipoint,1) + aos_data1(ipoint,k,3) = aos_grad_in_r_array(k,ipoint,2) + aos_data1(ipoint,k,4) = aos_grad_in_r_array(k,ipoint,3) + enddo + enddo + + do k = 1, ao_num + do ipoint = 1, n_points_extra_final_grid + aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) + aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) + aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) + aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) + enddo + enddo + + ! --- + + integer :: nB + integer :: sB + + PROVIDE nxBlocks nyBlocks nzBlocks + PROVIDE blockxSize blockySize blockzSize + + sB = 32 + nB = (n_points_final_grid + sB - 1) / sB + + call ezfio_set_tc_int_blockxSize(sB) + call ezfio_set_tc_int_nxBlocks(nB) + + allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,4)) + allocate(int_2e_ao_gpu(ao_num,ao_num,ao_num,ao_num)) + + call deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & + n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & + final_grid_points, final_weight_at_r_vector, & + final_grid_points_extra, final_weight_at_r_vector_extra, & + rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & + int2_grad1_u12_ao_gpu, int_2e_ao_gpu) + + ! --- + + allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) + allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) + allocate(c_mat(n_points_final_grid,ao_num,ao_num)) + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) + allocate(int_2e_ao(ao_num,ao_num,ao_num,ao_num)) + + call wall_time(cpu_time0) + + call wall_time(cpu_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (j, i, jpoint) & + !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) + !$OMP DO SCHEDULE (static) + do j = 1, ao_num + do i = 1, ao_num + do jpoint = 1, n_points_extra_final_grid + int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (cpu_ttime1 - cpu_ttime0) + + + call wall_time(cpu_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) + !$OMP DO + do ipoint = 1, n_points_final_grid + call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & + , grad1_u12(1,ipoint,2) & + , grad1_u12(1,ipoint,3) & + , grad1_u12(1,ipoint,4) ) + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (cpu_ttime1 - cpu_ttime0) + + + call wall_time(cpu_ttime0) + do m = 1, 4 + call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & + , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) + enddo + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for DGEMM of integ over r2 (sec) = ', (cpu_ttime1 - cpu_ttime0) + + + call wall_time(cpu_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint) & + !$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + c_mat(ipoint,k,i) = final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,k) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time of Hermitian part (sec) = ', (cpu_ttime1 - cpu_ttime0) + + + call wall_time(cpu_ttime0) + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , int2_grad1_u12_ao(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 0.d0, int_2e_ao(1,1,1,1), ao_num*ao_num) + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for DGEMM of Hermitian part (sec) = ', (cpu_ttime1 - cpu_ttime0) + + + tt1 = 0.d0 + tt2 = 0.d0 + do m = 1, 3 + + call wall_time(cpu_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & + !$OMP SHARED (aos_in_r_array_transp, aos_grad_in_r_array_transp_bis, c_mat, & + !$OMP ao_num, n_points_final_grid, final_weight_at_r_vector, m) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + + weight1 = final_weight_at_r_vector(ipoint) + ao_i_r = aos_in_r_array_transp(ipoint,i) + ao_k_r = aos_in_r_array_transp(ipoint,k) + + c_mat(ipoint,k,i) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,m) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,m)) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(cpu_ttime1) + tt1 += cpu_ttime1 - cpu_ttime0 + + call wall_time(cpu_ttime0) + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -0.5d0 & + , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 1.d0, int_2e_ao(1,1,1,1), ao_num*ao_num) + call wall_time(cpu_ttime1) + tt2 += cpu_ttime1 - cpu_ttime0 + enddo + write(*,"(A,2X,F15.7)") ' wall time of non-Hermitian part (sec) = ', tt1 + write(*,"(A,2X,F15.7)") ' wall time for DGEMM of non Hermitian part (sec) = ', tt2 + + + call wall_time(cpu_ttime0) + call sum_A_At(int_2e_ao(1,1,1,1), ao_num*ao_num) + call wall_time(cpu_ttime1) + write(*,"(A,2X,F15.7)") ' wall time of A + A.T (sec) = ', cpu_ttime1 - cpu_ttime0 + + + call wall_time(cpu_time1) + write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (cpu_time1 - cpu_time0) + + ! --- + + acc_thr = 1d-12 + + print *, ' precision on int2_grad1_u12_ao ' + err_tot = 0.d0 + nrm_tot = 0.d0 + do m = 1, 4 + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m)) + if(err_loc > acc_thr) then + print*, " error on", i, j, ipoint, m + print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m) + print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on int2_grad1_u12_ao (%) =', 100.d0 * err_tot / nrm_tot + + + print *, ' precision on int_2e_ao ' + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, ao_num + do j = 1, ao_num + do k = 1, ao_num + do l = 1, ao_num + err_loc = dabs(int_2e_ao(l,k,j,i) - int_2e_ao_gpu(l,k,j,i)) + if(err_loc > acc_thr) then + print*, " error on", l, k, j, i + print*, " CPU res", int_2e_ao (l,k,j,i) + print*, " GPU res", int_2e_ao_gpu(l,k,j,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(int_2e_ao(l,k,j,i)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on int_2e_ao (%) =', 100.d0 * err_tot / nrm_tot + + + ! --- + + deallocate(int_fct_long_range, grad1_u12, c_mat) + deallocate(int_2e_ao, int2_grad1_u12_ao) + deallocate(int_2e_ao_gpu, int2_grad1_u12_ao_gpu) + deallocate(rn, aos_data1, aos_data2) + + call wall_time(time1) + write(*,"(A,2X,F15.7)") ' wall time for deb_int_2e_ao_gpu (sec) = ', (time1 - time0) return end diff --git a/plugins/local/tc_int/install b/plugins/local/tc_int/install new file mode 100755 index 00000000..9d1886f0 --- /dev/null +++ b/plugins/local/tc_int/install @@ -0,0 +1,17 @@ +#!/bin/bash + +# Check if the QP_ROOT environment variable is set. +if [[ -z ${QP_ROOT} ]] +then + print "The QP_ROOT environment variable is not set." + print "Please reload the quantum_package.rc file." + exit -1 +fi + +git clone https://github.com/AbdAmmar/CuTC +cd CuTC +source config/env.rc +make + +ln -s ${PWD}/CuTC/build/libtc_int_cu.so ${QP_ROOT}/lib + diff --git a/plugins/local/tc_int/uninstall b/plugins/local/tc_int/uninstall new file mode 100755 index 00000000..3dd3612c --- /dev/null +++ b/plugins/local/tc_int/uninstall @@ -0,0 +1,13 @@ +#!/bin/bash + +# Check if the QP_ROOT environment variable is set. +if [[ -z ${QP_ROOT} ]] +then + print "The QP_ROOT environment variable is not set." + print "Please reload the quantum_package.rc file." + exit -1 +fi + +rm -rf ${PWD}/CuTC +rm ${QP_ROOT}/lib/libtc_int_cu.so + diff --git a/plugins/local/tc_int/write_tc_int_cuda.irp.f b/plugins/local/tc_int/write_tc_int_cuda.irp.f index de3be412..bc1a118d 100644 --- a/plugins/local/tc_int/write_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/write_tc_int_cuda.irp.f @@ -56,21 +56,13 @@ end subroutine do_work_on_gpu() - use gpu_module + use cutc_module implicit none integer :: k, ipoint - integer :: nBlocks, blockSize - integer :: n_grid1, n_grid2 - integer :: n_ao - integer :: n_nuc - integer :: size_bh - double precision, allocatable :: r1(:,:), wr1(:), r2(:,:), wr2(:), rn(:,:) double precision, allocatable :: aos_data1(:,:,:), aos_data2(:,:,:) - double precision, allocatable :: c_bh(:,:) - integer, allocatable :: m_bh(:,:), n_bh(:,:), o_bh(:,:) double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) double precision, allocatable :: int_2e_ao(:,:,:,:) @@ -80,47 +72,11 @@ subroutine do_work_on_gpu() call wall_time(time0) print*, ' start calculation of TC-integrals' - nBlocks = 100 - blockSize = 32 + allocate(aos_data1(n_points_final_grid,ao_num,4)) + allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) + allocate(int_2e_ao(ao_num,ao_num,ao_num,ao_num)) - n_grid1 = n_points_final_grid - n_grid2 = n_points_extra_final_grid - - n_ao = ao_num - n_nuc = nucl_num - - size_bh = jBH_size - - print*, " nBlocks =", nBlocks - print*, " blockSize =", blockSize - print*, " n_grid1 =", n_grid1 - print*, " n_grid2 =", n_grid2 - print*, " n_ao =", n_ao - print*, " n_nuc =", n_nuc - print *, " size_bh =", size_bh - - allocate(r1(n_grid1,3), wr1(n_grid1)) - allocate(r2(n_grid2,3), wr2(n_grid2)) - allocate(rn(n_nuc,3)) - allocate(aos_data1(n_grid1,n_ao,4)) - allocate(aos_data2(n_grid2,n_ao,4)) - allocate(c_bh(size_bh,n_nuc), m_bh(size_bh,n_nuc), n_bh(size_bh,n_nuc), o_bh(size_bh,n_nuc)) - allocate(int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4)) - allocate(int_2e_ao(n_ao,n_ao,n_ao,n_ao)) - - do ipoint = 1, n_points_final_grid - r1(ipoint,1) = final_grid_points(1,ipoint) - r1(ipoint,2) = final_grid_points(2,ipoint) - r1(ipoint,3) = final_grid_points(3,ipoint) - wr1(ipoint) = final_weight_at_r_vector(ipoint) - enddo - - do ipoint = 1, n_points_extra_final_grid - r2(ipoint,1) = final_grid_points_extra(1,ipoint) - r2(ipoint,2) = final_grid_points_extra(2,ipoint) - r2(ipoint,3) = final_grid_points_extra(3,ipoint) - wr2(ipoint) = final_weight_at_r_vector_extra(ipoint) - enddo do k = 1, ao_num do ipoint = 1, n_points_final_grid @@ -138,31 +94,37 @@ subroutine do_work_on_gpu() enddo enddo - rn(:,:) = nucl_coord(:,:) + ! --- + + integer :: nB + integer :: sB + + PROVIDE nxBlocks nyBlocks nzBlocks + PROVIDE blockxSize blockySize blockzSize + + sB = 32 + nB = (n_points_final_grid + sB - 1) / sB + + call ezfio_set_tc_int_blockxSize(sB) + call ezfio_set_tc_int_nxBlocks(nB) + - c_bh(:,:) = jBH_c(:,:) - m_bh(:,:) = jBH_m(:,:) - n_bh(:,:) = jBH_n(:,:) - o_bh(:,:) = jBH_o(:,:) call wall_time(cuda_time0) print*, ' start CUDA kernel' - int2_grad1_u12_ao = 0.d0 - int_2e_ao = 0.d0 - - call tc_int_c(nBlocks, blockSize, & - n_grid1, n_grid2, n_ao, n_nuc, size_bh, & - r1, wr1, r2, wr2, rn, aos_data1, aos_data2, & - c_bh, m_bh, n_bh, o_bh, & + call tc_int_c(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & + n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & + final_grid_points, final_weight_at_r_vector, & + final_grid_points_extra, final_weight_at_r_vector_extra, & + nucl_coord, aos_data1, aos_data2, & + jBH_c, jBH_m, jBH_n, jBH_o, & int2_grad1_u12_ao, int_2e_ao) call wall_time(cuda_time1) print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0 - deallocate(r1, wr1, r2, wr2, rn) deallocate(aos_data1, aos_data2) - deallocate(c_bh, m_bh, n_bh, o_bh) ! --- From 8e0fe19ab4061467533d93d2aba90768eaea23b4 Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Sat, 3 Aug 2024 12:32:35 +0200 Subject: [PATCH 07/19] fixed path for install --- plugins/local/tc_int/install | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/local/tc_int/install b/plugins/local/tc_int/install index 9d1886f0..34e56b4a 100755 --- a/plugins/local/tc_int/install +++ b/plugins/local/tc_int/install @@ -12,6 +12,7 @@ git clone https://github.com/AbdAmmar/CuTC cd CuTC source config/env.rc make +cd .. ln -s ${PWD}/CuTC/build/libtc_int_cu.so ${QP_ROOT}/lib From c4b19af2ecfbb049a8b5db5e3cc5546dc109932f Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Sat, 3 Aug 2024 18:08:23 +0200 Subject: [PATCH 08/19] cuda tc-integrals tested --- plugins/local/tc_int/EZFIO.cfg | 4 +- plugins/local/tc_int/cutc_module.F90 | 28 --- plugins/local/tc_int/deb_tc_int_cuda.irp.f | 184 ++----------------- plugins/local/tc_int/write_tc_int_cuda.irp.f | 41 ++++- 4 files changed, 56 insertions(+), 201 deletions(-) diff --git a/plugins/local/tc_int/EZFIO.cfg b/plugins/local/tc_int/EZFIO.cfg index 12366f01..5615ce4b 100644 --- a/plugins/local/tc_int/EZFIO.cfg +++ b/plugins/local/tc_int/EZFIO.cfg @@ -8,7 +8,7 @@ default: 10 type: integer doc: nb of y blocks in the Grid interface: ezfio,provider,ocaml -default: 10 +default: 1 [nzBlocks] type: integer @@ -26,7 +26,7 @@ default: 32 type: integer doc: size of y blocks interface: ezfio,provider,ocaml -default: 32 +default: 1 [blockzSize] type: integer diff --git a/plugins/local/tc_int/cutc_module.F90 b/plugins/local/tc_int/cutc_module.F90 index 69c2a131..b96c1bef 100644 --- a/plugins/local/tc_int/cutc_module.F90 +++ b/plugins/local/tc_int/cutc_module.F90 @@ -41,34 +41,6 @@ module cutc_module ! --- - subroutine deb_int2_grad1_u12_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & - n_grid1, n_grid2, n_ao, n_nuc, size_bh, & - r1, r2, wr2, rn, aos_data2, c_bh, m_bh, n_bh, o_bh, & - int2_grad1_u12_ao) bind(C, name ="deb_int2_grad1_u12_ao") - - import c_int, c_double, c_ptr - integer(c_int), intent(in), value :: nxBlocks, blockxSize - integer(c_int), intent(in), value :: nyBlocks, blockySize - integer(c_int), intent(in), value :: nzBlocks, blockzSize - integer(c_int), intent(in), value :: n_grid1, n_grid2 - integer(c_int), intent(in), value :: n_ao - integer(c_int), intent(in), value :: n_nuc - integer(c_int), intent(in), value :: size_bh - real(c_double), intent(in) :: r1(3,n_grid1) - real(c_double), intent(in) :: r2(3,n_grid2) - real(c_double), intent(in) :: wr2(n_grid2) - real(c_double), intent(in) :: rn(3,n_nuc) - real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4) - real(c_double), intent(in) :: c_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) - real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4) - - end subroutine deb_int2_grad1_u12_ao - - ! --- - subroutine deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, & blockxSize, blockySize, blockzSize, & n_grid1, n_grid2, n_ao, n_nuc, size_bh, & diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f index 2c4e975b..75e3b4fe 100644 --- a/plugins/local/tc_int/deb_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/deb_tc_int_cuda.irp.f @@ -36,7 +36,6 @@ subroutine main() implicit none - !call deb_int2_grad1_u12_ao_gpu() call deb_int_2e_ao_gpu() return @@ -44,173 +43,6 @@ end ! --- -subroutine deb_int2_grad1_u12_ao_gpu() - - use cutc_module - - implicit none - - integer :: m - integer :: i, j, k - integer :: ipoint, jpoint - - double precision :: acc_thr, err_tot, nrm_tot, err_loc - - double precision :: time0, time1 - double precision :: cpu_time0, cpu_time1 - double precision :: cpu_ttime0, cpu_ttime1 - - double precision, allocatable :: rn(:,:), aos_data2(:,:,:) - double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:) - double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) - double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:) - - - - call wall_time(time0) - print*, ' start deb_int2_grad1_u12_ao_gpu' - - - ! --- - - allocate(rn(3,nucl_num)) - allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) - - do k = 1, nucl_num - rn(1,k) = nucl_coord(k,1) - rn(2,k) = nucl_coord(k,2) - rn(3,k) = nucl_coord(k,3) - enddo - - do k = 1, ao_num - do ipoint = 1, n_points_extra_final_grid - aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) - aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) - aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) - aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) - enddo - enddo - - ! --- - - integer :: nB - integer :: sB - - PROVIDE nxBlocks nyBlocks nzBlocks - PROVIDE blockxSize blockySize blockzSize - - sB = 32 - nB = (n_points_final_grid + sB - 1) / sB - - call ezfio_set_tc_int_blockxSize(sB) - call ezfio_set_tc_int_nxBlocks(nB) - - allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,4)) - - call deb_int2_grad1_u12_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & - n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & - final_grid_points, final_grid_points_extra, final_weight_at_r_vector_extra, rn, & - aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & - int2_grad1_u12_ao_gpu) - - ! --- - - allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) - allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) - allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) - - call wall_time(cpu_time0) - - call wall_time(cpu_ttime0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (j, i, jpoint) & - !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) - !$OMP DO SCHEDULE (static) - do j = 1, ao_num - do i = 1, ao_num - do jpoint = 1, n_points_extra_final_grid - int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - call wall_time(cpu_ttime0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) - !$OMP DO - do ipoint = 1, n_points_final_grid - call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & - , grad1_u12(1,ipoint,2) & - , grad1_u12(1,ipoint,3) & - , grad1_u12(1,ipoint,4) ) - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - call wall_time(cpu_ttime0) - do m = 1, 4 - call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & - , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & - , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) - enddo - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for DGEMM (sec) = ', (cpu_ttime1 - cpu_ttime0) - - call wall_time(cpu_time1) - write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (cpu_time1 - cpu_time0) - - ! --- - - acc_thr = 1d-12 - err_tot = 0.d0 - nrm_tot = 0.d0 - - do m = 1, 4 - do ipoint = 1, n_points_final_grid - do j = 1, ao_num - do i = 1, ao_num - err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m)) - if(err_loc > acc_thr) then - print*, " error on", i, j, ipoint, m - print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m) - print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m)) - enddo - enddo - enddo - enddo - - print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - deallocate(int_fct_long_range, grad1_u12) - deallocate(int2_grad1_u12_ao) - deallocate(int2_grad1_u12_ao_gpu) - deallocate(rn, aos_data2) - - call wall_time(time1) - write(*,"(A,2X,F15.7)") ' wall time for deb_int2_grad1_u12_ao_gpu (sec) = ', (time1 - time0) - - return -end - -! --- - subroutine deb_int_2e_ao_gpu() use cutc_module @@ -479,6 +311,22 @@ subroutine deb_int_2e_ao_gpu() print *, ' absolute accuracy on int_2e_ao (%) =', 100.d0 * err_tot / nrm_tot + ! --- + + print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") + call ezfio_set_work_empty(.False.) + write(11) int2_grad1_u12_ao_gpu(:,:,:,1:3) + close(11) + + print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/ao_two_e_tc_tot', action="write") + call ezfio_set_work_empty(.False.) + do k = 1, ao_num + write(11) int_2e_ao_gpu(:,:,:,k) + enddo + close(11) + ! --- deallocate(int_fct_long_range, grad1_u12, c_mat) diff --git a/plugins/local/tc_int/write_tc_int_cuda.irp.f b/plugins/local/tc_int/write_tc_int_cuda.irp.f index bc1a118d..b74cd0cd 100644 --- a/plugins/local/tc_int/write_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/write_tc_int_cuda.irp.f @@ -62,7 +62,7 @@ subroutine do_work_on_gpu() integer :: k, ipoint - double precision, allocatable :: aos_data1(:,:,:), aos_data2(:,:,:) + double precision, allocatable :: rn(:,:), aos_data1(:,:,:), aos_data2(:,:,:) double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) double precision, allocatable :: int_2e_ao(:,:,:,:) @@ -72,12 +72,19 @@ subroutine do_work_on_gpu() call wall_time(time0) print*, ' start calculation of TC-integrals' + allocate(rn(3,nucl_num)) allocate(aos_data1(n_points_final_grid,ao_num,4)) allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) allocate(int_2e_ao(ao_num,ao_num,ao_num,ao_num)) + do k = 1, nucl_num + rn(1,k) = nucl_coord(k,1) + rn(2,k) = nucl_coord(k,2) + rn(3,k) = nucl_coord(k,3) + enddo + do k = 1, ao_num do ipoint = 1, n_points_final_grid aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint) @@ -117,8 +124,7 @@ subroutine do_work_on_gpu() n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & final_grid_points, final_weight_at_r_vector, & final_grid_points_extra, final_weight_at_r_vector_extra, & - nucl_coord, aos_data1, aos_data2, & - jBH_c, jBH_m, jBH_n, jBH_o, & + rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & int2_grad1_u12_ao, int_2e_ao) call wall_time(cuda_time1) @@ -128,6 +134,35 @@ subroutine do_work_on_gpu() ! --- + integer :: i, j, l + double precision :: t1, t2 + double precision, external :: get_ao_two_e_integral + + call wall_time(t1) + + PROVIDE ao_integrals_map + !$OMP PARALLEL DEFAULT(NONE) & + !$OMP SHARED(ao_num, int_2e_ao, ao_integrals_map) & + !$OMP PRIVATE(i, j, k, l) + !$OMP DO COLLAPSE(3) + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + ! < 1:i, 2:j | 1:k, 2:l > + int_2e_ao(k,i,l,j) = int_2e_ao(k,i,l,j) + get_ao_two_e_integral(i, j, k, l, ao_integrals_map) + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(t2) + print*, ' wall time of Coulomb part of tc_int_2e_ao (min) ', (t2 - t1) / 60.d0 + + ! --- + print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") call ezfio_set_work_empty(.False.) From 672fdfd7f2217831b7d4ec177deef526b7afc45b Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Sat, 3 Aug 2024 18:08:23 +0200 Subject: [PATCH 09/19] cuda tc-integrals tested --- plugins/local/tc_int/EZFIO.cfg | 4 +- plugins/local/tc_int/cutc_module.F90 | 28 ---- plugins/local/tc_int/deb_tc_int_cuda.irp.f | 168 ------------------- plugins/local/tc_int/write_tc_int_cuda.irp.f | 41 ++++- 4 files changed, 40 insertions(+), 201 deletions(-) diff --git a/plugins/local/tc_int/EZFIO.cfg b/plugins/local/tc_int/EZFIO.cfg index 12366f01..5615ce4b 100644 --- a/plugins/local/tc_int/EZFIO.cfg +++ b/plugins/local/tc_int/EZFIO.cfg @@ -8,7 +8,7 @@ default: 10 type: integer doc: nb of y blocks in the Grid interface: ezfio,provider,ocaml -default: 10 +default: 1 [nzBlocks] type: integer @@ -26,7 +26,7 @@ default: 32 type: integer doc: size of y blocks interface: ezfio,provider,ocaml -default: 32 +default: 1 [blockzSize] type: integer diff --git a/plugins/local/tc_int/cutc_module.F90 b/plugins/local/tc_int/cutc_module.F90 index 69c2a131..b96c1bef 100644 --- a/plugins/local/tc_int/cutc_module.F90 +++ b/plugins/local/tc_int/cutc_module.F90 @@ -41,34 +41,6 @@ module cutc_module ! --- - subroutine deb_int2_grad1_u12_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & - n_grid1, n_grid2, n_ao, n_nuc, size_bh, & - r1, r2, wr2, rn, aos_data2, c_bh, m_bh, n_bh, o_bh, & - int2_grad1_u12_ao) bind(C, name ="deb_int2_grad1_u12_ao") - - import c_int, c_double, c_ptr - integer(c_int), intent(in), value :: nxBlocks, blockxSize - integer(c_int), intent(in), value :: nyBlocks, blockySize - integer(c_int), intent(in), value :: nzBlocks, blockzSize - integer(c_int), intent(in), value :: n_grid1, n_grid2 - integer(c_int), intent(in), value :: n_ao - integer(c_int), intent(in), value :: n_nuc - integer(c_int), intent(in), value :: size_bh - real(c_double), intent(in) :: r1(3,n_grid1) - real(c_double), intent(in) :: r2(3,n_grid2) - real(c_double), intent(in) :: wr2(n_grid2) - real(c_double), intent(in) :: rn(3,n_nuc) - real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4) - real(c_double), intent(in) :: c_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) - real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4) - - end subroutine deb_int2_grad1_u12_ao - - ! --- - subroutine deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, & blockxSize, blockySize, blockzSize, & n_grid1, n_grid2, n_ao, n_nuc, size_bh, & diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f index 2c4e975b..a080666e 100644 --- a/plugins/local/tc_int/deb_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/deb_tc_int_cuda.irp.f @@ -36,7 +36,6 @@ subroutine main() implicit none - !call deb_int2_grad1_u12_ao_gpu() call deb_int_2e_ao_gpu() return @@ -44,173 +43,6 @@ end ! --- -subroutine deb_int2_grad1_u12_ao_gpu() - - use cutc_module - - implicit none - - integer :: m - integer :: i, j, k - integer :: ipoint, jpoint - - double precision :: acc_thr, err_tot, nrm_tot, err_loc - - double precision :: time0, time1 - double precision :: cpu_time0, cpu_time1 - double precision :: cpu_ttime0, cpu_ttime1 - - double precision, allocatable :: rn(:,:), aos_data2(:,:,:) - double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:) - double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) - double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:) - - - - call wall_time(time0) - print*, ' start deb_int2_grad1_u12_ao_gpu' - - - ! --- - - allocate(rn(3,nucl_num)) - allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) - - do k = 1, nucl_num - rn(1,k) = nucl_coord(k,1) - rn(2,k) = nucl_coord(k,2) - rn(3,k) = nucl_coord(k,3) - enddo - - do k = 1, ao_num - do ipoint = 1, n_points_extra_final_grid - aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) - aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) - aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) - aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) - enddo - enddo - - ! --- - - integer :: nB - integer :: sB - - PROVIDE nxBlocks nyBlocks nzBlocks - PROVIDE blockxSize blockySize blockzSize - - sB = 32 - nB = (n_points_final_grid + sB - 1) / sB - - call ezfio_set_tc_int_blockxSize(sB) - call ezfio_set_tc_int_nxBlocks(nB) - - allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,4)) - - call deb_int2_grad1_u12_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & - n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & - final_grid_points, final_grid_points_extra, final_weight_at_r_vector_extra, rn, & - aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & - int2_grad1_u12_ao_gpu) - - ! --- - - allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) - allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) - allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) - - call wall_time(cpu_time0) - - call wall_time(cpu_ttime0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (j, i, jpoint) & - !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) - !$OMP DO SCHEDULE (static) - do j = 1, ao_num - do i = 1, ao_num - do jpoint = 1, n_points_extra_final_grid - int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - call wall_time(cpu_ttime0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) - !$OMP DO - do ipoint = 1, n_points_final_grid - call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & - , grad1_u12(1,ipoint,2) & - , grad1_u12(1,ipoint,3) & - , grad1_u12(1,ipoint,4) ) - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - call wall_time(cpu_ttime0) - do m = 1, 4 - call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & - , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & - , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) - enddo - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for DGEMM (sec) = ', (cpu_ttime1 - cpu_ttime0) - - call wall_time(cpu_time1) - write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (cpu_time1 - cpu_time0) - - ! --- - - acc_thr = 1d-12 - err_tot = 0.d0 - nrm_tot = 0.d0 - - do m = 1, 4 - do ipoint = 1, n_points_final_grid - do j = 1, ao_num - do i = 1, ao_num - err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m)) - if(err_loc > acc_thr) then - print*, " error on", i, j, ipoint, m - print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m) - print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m)) - enddo - enddo - enddo - enddo - - print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - deallocate(int_fct_long_range, grad1_u12) - deallocate(int2_grad1_u12_ao) - deallocate(int2_grad1_u12_ao_gpu) - deallocate(rn, aos_data2) - - call wall_time(time1) - write(*,"(A,2X,F15.7)") ' wall time for deb_int2_grad1_u12_ao_gpu (sec) = ', (time1 - time0) - - return -end - -! --- - subroutine deb_int_2e_ao_gpu() use cutc_module diff --git a/plugins/local/tc_int/write_tc_int_cuda.irp.f b/plugins/local/tc_int/write_tc_int_cuda.irp.f index bc1a118d..b74cd0cd 100644 --- a/plugins/local/tc_int/write_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/write_tc_int_cuda.irp.f @@ -62,7 +62,7 @@ subroutine do_work_on_gpu() integer :: k, ipoint - double precision, allocatable :: aos_data1(:,:,:), aos_data2(:,:,:) + double precision, allocatable :: rn(:,:), aos_data1(:,:,:), aos_data2(:,:,:) double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) double precision, allocatable :: int_2e_ao(:,:,:,:) @@ -72,12 +72,19 @@ subroutine do_work_on_gpu() call wall_time(time0) print*, ' start calculation of TC-integrals' + allocate(rn(3,nucl_num)) allocate(aos_data1(n_points_final_grid,ao_num,4)) allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) allocate(int_2e_ao(ao_num,ao_num,ao_num,ao_num)) + do k = 1, nucl_num + rn(1,k) = nucl_coord(k,1) + rn(2,k) = nucl_coord(k,2) + rn(3,k) = nucl_coord(k,3) + enddo + do k = 1, ao_num do ipoint = 1, n_points_final_grid aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint) @@ -117,8 +124,7 @@ subroutine do_work_on_gpu() n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & final_grid_points, final_weight_at_r_vector, & final_grid_points_extra, final_weight_at_r_vector_extra, & - nucl_coord, aos_data1, aos_data2, & - jBH_c, jBH_m, jBH_n, jBH_o, & + rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & int2_grad1_u12_ao, int_2e_ao) call wall_time(cuda_time1) @@ -128,6 +134,35 @@ subroutine do_work_on_gpu() ! --- + integer :: i, j, l + double precision :: t1, t2 + double precision, external :: get_ao_two_e_integral + + call wall_time(t1) + + PROVIDE ao_integrals_map + !$OMP PARALLEL DEFAULT(NONE) & + !$OMP SHARED(ao_num, int_2e_ao, ao_integrals_map) & + !$OMP PRIVATE(i, j, k, l) + !$OMP DO COLLAPSE(3) + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + ! < 1:i, 2:j | 1:k, 2:l > + int_2e_ao(k,i,l,j) = int_2e_ao(k,i,l,j) + get_ao_two_e_integral(i, j, k, l, ao_integrals_map) + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(t2) + print*, ' wall time of Coulomb part of tc_int_2e_ao (min) ', (t2 - t1) / 60.d0 + + ! --- + print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") call ezfio_set_work_empty(.False.) From dee440747fe438287b4f48f36fdeaa755f3eab95 Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Sun, 4 Aug 2024 11:58:19 +0200 Subject: [PATCH 10/19] devide BH jast coeff by 2 direct --- plugins/local/jastrow/bh_param.irp.f | 8 +++++++ .../non_h_ints_mu/jast_deriv_utils_vect.irp.f | 3 --- plugins/local/non_h_ints_mu/tc_integ.irp.f | 2 +- plugins/local/tc_int/compute_tc_int.irp.f | 4 ++-- plugins/local/tc_int/cutc_module.F90 | 4 ++-- plugins/local/tc_int/deb_tc_int_cuda.irp.f | 21 ++----------------- plugins/local/tc_int/jast_grad_full.irp.f | 5 ----- plugins/local/tc_int/write_tc_int_cuda.irp.f | 4 ++-- plugins/local/tc_scf/tc_scf_energy.irp.f | 2 +- 9 files changed, 18 insertions(+), 35 deletions(-) diff --git a/plugins/local/jastrow/bh_param.irp.f b/plugins/local/jastrow/bh_param.irp.f index 1ed871bc..b9d51dd3 100644 --- a/plugins/local/jastrow/bh_param.irp.f +++ b/plugins/local/jastrow/bh_param.irp.f @@ -232,6 +232,14 @@ ! --- + do i_nucl = 1, nucl_num + do p = 1, jBH_size + if(jBH_m(p,i_nucl) .eq. jBH_n(p,i_nucl)) then + jBH_c(p,i_nucl) = 0.5d0 * jBH_c(p,i_nucl) + endif + enddo + enddo + print *, ' parameters for Boys-Handy Jastrow' print *, ' nb of terms per nucleus = ', jBH_size diff --git a/plugins/local/non_h_ints_mu/jast_deriv_utils_vect.irp.f b/plugins/local/non_h_ints_mu/jast_deriv_utils_vect.irp.f index 2c41b535..4fc537c8 100644 --- a/plugins/local/non_h_ints_mu/jast_deriv_utils_vect.irp.f +++ b/plugins/local/non_h_ints_mu/jast_deriv_utils_vect.irp.f @@ -335,9 +335,6 @@ subroutine grad1_j12_r1_seq(r1, n_grid2, gradx, grady, gradz) npA = jBH_n(p,i_nucl) opA = jBH_o(p,i_nucl) tmp = jBH_c(p,i_nucl) - if(mpA .eq. npA) then - tmp = tmp * 0.5d0 - endif tmp1 = double_p(mpA) * f1A_power(mpA-1) * f2A_power(npA) + double_p(npA) * f1A_power(npA-1) * f2A_power(mpA) tmp1 = tmp1 * g12_power(opA) * tmp diff --git a/plugins/local/non_h_ints_mu/tc_integ.irp.f b/plugins/local/non_h_ints_mu/tc_integ.irp.f index 58e3db48..ce7ab101 100644 --- a/plugins/local/non_h_ints_mu/tc_integ.irp.f +++ b/plugins/local/non_h_ints_mu/tc_integ.irp.f @@ -204,7 +204,7 @@ BEGIN_PROVIDER [double precision, int2_grad1_u12_ao, (ao_num, ao_num, n_points_f print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") - call ezfio_set_work_empty(.False.) + call ezfio_set_work_empty(.False.) write(11) int2_grad1_u12_ao close(11) call ezfio_set_tc_keywords_io_tc_integ('Read') diff --git a/plugins/local/tc_int/compute_tc_int.irp.f b/plugins/local/tc_int/compute_tc_int.irp.f index 97815904..e6881f34 100644 --- a/plugins/local/tc_int/compute_tc_int.irp.f +++ b/plugins/local/tc_int/compute_tc_int.irp.f @@ -200,7 +200,7 @@ subroutine provide_int2_grad1_u12_ao() do k = 1, ao_num do ipoint = 1, n_points_final_grid - weight1 = 0.5d0 * final_weight_at_r_vector(ipoint) + weight1 = final_weight_at_r_vector(ipoint) ao_i_r = aos_in_r_array_transp(ipoint,i) ao_k_r = aos_in_r_array_transp(ipoint,k) @@ -211,7 +211,7 @@ subroutine provide_int2_grad1_u12_ao() !$OMP END DO !$OMP END PARALLEL - call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 & + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -0.5d0 & , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , 1.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) enddo diff --git a/plugins/local/tc_int/cutc_module.F90 b/plugins/local/tc_int/cutc_module.F90 index b96c1bef..1f55d763 100644 --- a/plugins/local/tc_int/cutc_module.F90 +++ b/plugins/local/tc_int/cutc_module.F90 @@ -34,7 +34,7 @@ module cutc_module integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) - real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4) + real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,3) real(c_double), intent(out) :: int_2e_ao(n_ao,n_ao,n_ao,n_ao) end subroutine tc_int_c @@ -66,7 +66,7 @@ module cutc_module integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) - real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4) + real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,3) real(c_double), intent(out) :: int_2e_ao(n_ao,n_ao,n_ao,n_ao) end subroutine deb_int_2e_ao diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f index 75e3b4fe..1a43141c 100644 --- a/plugins/local/tc_int/deb_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/deb_tc_int_cuda.irp.f @@ -119,7 +119,7 @@ subroutine deb_int_2e_ao_gpu() call ezfio_set_tc_int_blockxSize(sB) call ezfio_set_tc_int_nxBlocks(nB) - allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,4)) + allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,3)) allocate(int_2e_ao_gpu(ao_num,ao_num,ao_num,ao_num)) call deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & @@ -268,7 +268,7 @@ subroutine deb_int_2e_ao_gpu() print *, ' precision on int2_grad1_u12_ao ' err_tot = 0.d0 nrm_tot = 0.d0 - do m = 1, 4 + do m = 1, 3 do ipoint = 1, n_points_final_grid do j = 1, ao_num do i = 1, ao_num @@ -310,23 +310,6 @@ subroutine deb_int_2e_ao_gpu() enddo print *, ' absolute accuracy on int_2e_ao (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") - call ezfio_set_work_empty(.False.) - write(11) int2_grad1_u12_ao_gpu(:,:,:,1:3) - close(11) - - print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/ao_two_e_tc_tot', action="write") - call ezfio_set_work_empty(.False.) - do k = 1, ao_num - write(11) int_2e_ao_gpu(:,:,:,k) - enddo - close(11) - ! --- deallocate(int_fct_long_range, grad1_u12, c_mat) diff --git a/plugins/local/tc_int/jast_grad_full.irp.f b/plugins/local/tc_int/jast_grad_full.irp.f index 2f6abf39..943d8567 100644 --- a/plugins/local/tc_int/jast_grad_full.irp.f +++ b/plugins/local/tc_int/jast_grad_full.irp.f @@ -170,11 +170,6 @@ subroutine grad1_j12_r1_seq(r1, n_grid2, gradx, grady, gradz) npA = jBH_n(p,i_nucl) opA = jBH_o(p,i_nucl) - ! TODO to it when reading the parameters - if(mpA .eq. npA) then - tmp = tmp * 0.5d0 - endif - tmp1 = double_p(mpA) * f1A_power(mpA-1) * f2A_power(npA) + double_p(npA) * f1A_power(npA-1) * f2A_power(mpA) tmp1 = tmp1 * g12_power(opA) * tmp tmp2 = double_p(opA) * g12_power(opA-1) * (f1A_power(mpA) * f2A_power(npA) + f1A_power(npA) * f2A_power(mpA)) * tmp diff --git a/plugins/local/tc_int/write_tc_int_cuda.irp.f b/plugins/local/tc_int/write_tc_int_cuda.irp.f index b74cd0cd..7d0a0385 100644 --- a/plugins/local/tc_int/write_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/write_tc_int_cuda.irp.f @@ -75,7 +75,7 @@ subroutine do_work_on_gpu() allocate(rn(3,nucl_num)) allocate(aos_data1(n_points_final_grid,ao_num,4)) allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) - allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) allocate(int_2e_ao(ao_num,ao_num,ao_num,ao_num)) @@ -166,7 +166,7 @@ subroutine do_work_on_gpu() print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") call ezfio_set_work_empty(.False.) - write(11) int2_grad1_u12_ao(:,:,:,1:3) + write(11) int2_grad1_u12_ao close(11) deallocate(int2_grad1_u12_ao) diff --git a/plugins/local/tc_scf/tc_scf_energy.irp.f b/plugins/local/tc_scf/tc_scf_energy.irp.f index 74ab9d05..80ef2afb 100644 --- a/plugins/local/tc_scf/tc_scf_energy.irp.f +++ b/plugins/local/tc_scf/tc_scf_energy.irp.f @@ -28,7 +28,7 @@ enddo enddo - if((three_body_h_tc .eq. .False.) .and. (.not. noL_standard)) then + if((three_body_h_tc .eqv. .False.) .and. (.not. noL_standard)) then TC_HF_three_e_energy = 0.d0 else TC_HF_three_e_energy = noL_0e From 67a2f0eb0cf7fe1290987795375ea529b355f575 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Mon, 5 Aug 2024 14:49:35 +0200 Subject: [PATCH 11/19] openmp issue solved --- plugins/local/tc_int/compute_tc_int.irp.f | 4 ++++ plugins/local/tc_int/write_tc_int_cuda.irp.f | 3 +++ 2 files changed, 7 insertions(+) diff --git a/plugins/local/tc_int/compute_tc_int.irp.f b/plugins/local/tc_int/compute_tc_int.irp.f index e6881f34..60c0d53b 100644 --- a/plugins/local/tc_int/compute_tc_int.irp.f +++ b/plugins/local/tc_int/compute_tc_int.irp.f @@ -236,9 +236,13 @@ subroutine provide_int2_grad1_u12_ao() ! --- + double precision :: tmp_omp + call wall_time(time1) PROVIDE ao_integrals_map + tmp_omp = get_ao_two_e_integral(1, 1, 1, 1, ao_integrals_map) + !$OMP PARALLEL DEFAULT(NONE) & !$OMP SHARED(ao_num, tc_int_2e_ao, ao_integrals_map) & !$OMP PRIVATE(i, j, k, l) diff --git a/plugins/local/tc_int/write_tc_int_cuda.irp.f b/plugins/local/tc_int/write_tc_int_cuda.irp.f index 7d0a0385..212518ee 100644 --- a/plugins/local/tc_int/write_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/write_tc_int_cuda.irp.f @@ -136,11 +136,14 @@ subroutine do_work_on_gpu() integer :: i, j, l double precision :: t1, t2 + double precision :: tmp double precision, external :: get_ao_two_e_integral call wall_time(t1) PROVIDE ao_integrals_map + tmp = get_ao_two_e_integral(1, 1, 1, 1, ao_integrals_map) + !$OMP PARALLEL DEFAULT(NONE) & !$OMP SHARED(ao_num, int_2e_ao, ao_integrals_map) & !$OMP PRIVATE(i, j, k, l) From 4744fc7360fbb6b0aced8c2da89934a2ba0dd491 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Mon, 5 Aug 2024 14:50:37 +0200 Subject: [PATCH 12/19] openmp issue solved --- plugins/local/tc_bi_ortho/tc_utils.irp.f | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/local/tc_bi_ortho/tc_utils.irp.f b/plugins/local/tc_bi_ortho/tc_utils.irp.f index 2aa148a3..55263be7 100644 --- a/plugins/local/tc_bi_ortho/tc_utils.irp.f +++ b/plugins/local/tc_bi_ortho/tc_utils.irp.f @@ -11,6 +11,8 @@ subroutine write_tc_energy() ! GS ! --- + call htilde_mu_mat_opt_bi_ortho(psi_det(1,1,1), psi_det(1,1,1), N_int, hmono, htwoe, hthree, htot) + allocate(E_TC_tmp(N_det), E_1e_tmp(N_det), E_2e_tmp(N_det), E_3e_tmp(N_det)) !$OMP PARALLEL & From 84445aa591668092ac3f0a8c80508fb72f099acc Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Thu, 8 Aug 2024 11:39:42 +0200 Subject: [PATCH 13/19] Combine calculation of Left & RIGHT MOs in r --- plugins/local/bi_ort_ints/bi_ort_ints.irp.f | 56 +++++- plugins/local/bi_ort_ints/no_dressing.irp.f | 2 + .../local/bi_ortho_mos/bi_ort_mos_in_r.irp.f | 165 ++++++------------ .../bi_ortho_mos/bi_ort_mos_in_r_old.irp.f | 137 +++++++++++++++ plugins/local/tc_bi_ortho/tc_utils.irp.f | 2 + 5 files changed, 246 insertions(+), 116 deletions(-) create mode 100644 plugins/local/bi_ortho_mos/bi_ort_mos_in_r_old.irp.f diff --git a/plugins/local/bi_ort_ints/bi_ort_ints.irp.f b/plugins/local/bi_ort_ints/bi_ort_ints.irp.f index 0349c731..b691e47e 100644 --- a/plugins/local/bi_ort_ints/bi_ort_ints.irp.f +++ b/plugins/local/bi_ort_ints/bi_ort_ints.irp.f @@ -17,12 +17,14 @@ program bi_ort_ints ! call test_3e ! call test_5idx ! call test_5idx2 - call test_4idx() +! call test_4idx() !call test_4idx_n4() !call test_4idx2() !call test_5idx2 !call test_5idx + call test_mos_in_r() + end subroutine test_5idx2 @@ -472,4 +474,56 @@ subroutine test_4idx() return end +! --- + +subroutine test_mos_in_r() + + implicit none + + integer :: i, j + double precision :: err_tot, nrm_tot, err_loc, acc_thr + + PROVIDE mos_l_in_r_array_transp_old mos_r_in_r_array_transp_old + PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp + + acc_thr = 1d-12 + + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do j = 1, n_points_final_grid + err_loc = dabs(mos_l_in_r_array_transp_old(j,i) - mos_l_in_r_array_transp(j,i)) + if(err_loc > acc_thr) then + print*, " error on", j, i + print*, " old res", mos_l_in_r_array_transp_old(j,i) + print*, " new res", mos_l_in_r_array_transp (j,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(mos_l_in_r_array_transp_old(j,i)) + enddo + enddo + print *, ' absolute accuracy on mos_l_in_r_array_transp (%) =', 100.d0 * err_tot / nrm_tot + + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do j = 1, n_points_final_grid + err_loc = dabs(mos_r_in_r_array_transp_old(j,i) - mos_r_in_r_array_transp(j,i)) + if(err_loc > acc_thr) then + print*, " error on", j, i + print*, " old res", mos_r_in_r_array_transp_old(j,i) + print*, " new res", mos_r_in_r_array_transp (j,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(mos_r_in_r_array_transp_old(j,i)) + enddo + enddo + print *, ' absolute accuracy on mos_r_in_r_array_transp (%) =', 100.d0 * err_tot / nrm_tot + + return +end + +! --- diff --git a/plugins/local/bi_ort_ints/no_dressing.irp.f b/plugins/local/bi_ort_ints/no_dressing.irp.f index fd2c6285..3bbf31f9 100644 --- a/plugins/local/bi_ort_ints/no_dressing.irp.f +++ b/plugins/local/bi_ort_ints/no_dressing.irp.f @@ -710,6 +710,8 @@ BEGIN_PROVIDER [double precision, noL_0e] endif + print*, " noL_0e =", noL_0e + END_PROVIDER ! --- diff --git a/plugins/local/bi_ortho_mos/bi_ort_mos_in_r.irp.f b/plugins/local/bi_ortho_mos/bi_ort_mos_in_r.irp.f index 25572854..15ed2ce4 100644 --- a/plugins/local/bi_ortho_mos/bi_ort_mos_in_r.irp.f +++ b/plugins/local/bi_ortho_mos/bi_ort_mos_in_r.irp.f @@ -1,135 +1,70 @@ -! TODO: left & right MO without duplicate AO calculation - -! --- - -BEGIN_PROVIDER[double precision, mos_r_in_r_array, (mo_num, n_points_final_grid)] + BEGIN_PROVIDER[double precision, mos_l_in_r_array_transp, (n_points_final_grid, mo_num)] +&BEGIN_PROVIDER[double precision, mos_r_in_r_array_transp, (n_points_final_grid, mo_num)] BEGIN_DOC - ! mos_in_r_array(i,j) = value of the ith RIGHT mo on the jth grid point + ! + ! mos_l_in_r_array_transp(i,j) = value of the jth left-mo on the ith grid point + ! mos_r_in_r_array_transp(i,j) = value of the jth right-mo on the ith grid point + ! END_DOC implicit none - integer :: i, j - double precision :: mos_array(mo_num), r(3) - !$OMP PARALLEL DO & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, r, mos_array) & - !$OMP SHARED (mos_r_in_r_array, n_points_final_grid, mo_num, final_grid_points) + integer :: i + double precision :: tt0, tt1, tt2, tt3 + double precision :: r(3) + double precision, allocatable :: aos_r(:,:) + + call cpu_time(tt0) + + allocate(aos_r(ao_num,n_points_final_grid)) + + ! provide everything required before OpenMP + r(1) = final_grid_points(1,1) + r(2) = final_grid_points(2,1) + r(3) = final_grid_points(3,1) + call give_all_aos_at_r(r, aos_r(1,1)) + + + call cpu_time(tt2) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, r) & + !$OMP SHARED(n_points_final_grid, final_grid_points, aos_r) + !$OMP DO do i = 1, n_points_final_grid r(1) = final_grid_points(1,i) r(2) = final_grid_points(2,i) r(3) = final_grid_points(3,i) - call give_all_mos_r_at_r(r, mos_array) - do j = 1, mo_num - mos_r_in_r_array(j,i) = mos_array(j) - enddo + call give_all_aos_at_r(r, aos_r(1,i)) enddo - !$OMP END PARALLEL DO - -END_PROVIDER + !$OMP END DO + !$OMP END PARALLEL -! --- + call cpu_time(tt3) + write(*,"(A,2X,F15.7)") ' wall time for AOs on r (sec) = ', (tt3 - tt2) -BEGIN_PROVIDER[double precision, mos_r_in_r_array_transp, (n_points_final_grid, mo_num)] - BEGIN_DOC - ! mos_r_in_r_array_transp(i,j) = value of the jth mo on the ith grid point - END_DOC + call dgemm("T", "N", n_points_final_grid, mo_num, ao_num, & + 1.d0, & + aos_r(1,1), ao_num, & + mo_l_coef(1,1), ao_num, & + 0.d0, & + mos_l_in_r_array_transp(1,1), n_points_final_grid) - implicit none - integer :: i,j + call dgemm("T", "N", n_points_final_grid, mo_num, ao_num, & + 1.d0, & + aos_r(1,1), ao_num, & + mo_r_coef(1,1), ao_num, & + 0.d0, & + mos_r_in_r_array_transp(1,1), n_points_final_grid) - do i = 1, n_points_final_grid - do j = 1, mo_num - mos_r_in_r_array_transp(i,j) = mos_r_in_r_array(j,i) - enddo - enddo - -END_PROVIDER - -! --- - -subroutine give_all_mos_r_at_r(r, mos_r_array) - - BEGIN_DOC - ! mos_r_array(i) = ith RIGHT MO function evaluated at "r" - END_DOC - - implicit none - double precision, intent(in) :: r(3) - double precision, intent(out) :: mos_r_array(mo_num) - double precision :: aos_array(ao_num) - - call give_all_aos_at_r(r, aos_array) - call dgemv('N', mo_num, ao_num, 1.d0, mo_r_coef_transp, mo_num, aos_array, 1, 0.d0, mos_r_array, 1) - -end subroutine give_all_mos_r_at_r - -! --- - -BEGIN_PROVIDER[double precision, mos_l_in_r_array, (mo_num, n_points_final_grid)] - - BEGIN_DOC - ! mos_in_r_array(i,j) = value of the ith LEFT mo on the jth grid point - END_DOC - - implicit none - integer :: i, j - double precision :: mos_array(mo_num), r(3) - - !$OMP PARALLEL DO & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,r,mos_array,j) & - !$OMP SHARED(mos_l_in_r_array,n_points_final_grid,mo_num,final_grid_points) - do i = 1, n_points_final_grid - r(1) = final_grid_points(1,i) - r(2) = final_grid_points(2,i) - r(3) = final_grid_points(3,i) - call give_all_mos_l_at_r(r, mos_array) - do j = 1, mo_num - mos_l_in_r_array(j,i) = mos_array(j) - enddo - enddo - !$OMP END PARALLEL DO - -END_PROVIDER - -! --- - -subroutine give_all_mos_l_at_r(r, mos_l_array) - - BEGIN_DOC - ! mos_l_array(i) = ith LEFT MO function evaluated at "r" - END_DOC - - implicit none - double precision, intent(in) :: r(3) - double precision, intent(out) :: mos_l_array(mo_num) - double precision :: aos_array(ao_num) - - call give_all_aos_at_r(r, aos_array) - call dgemv('N', mo_num, ao_num, 1.d0, mo_l_coef_transp, mo_num, aos_array, 1, 0.d0, mos_l_array, 1) - -end subroutine give_all_mos_l_at_r - -! --- - -BEGIN_PROVIDER[double precision, mos_l_in_r_array_transp, (n_points_final_grid,mo_num)] - - BEGIN_DOC - ! mos_l_in_r_array_transp(i,j) = value of the jth mo on the ith grid point - END_DOC - - implicit none - integer :: i, j - - do i = 1, n_points_final_grid - do j = 1, mo_num - mos_l_in_r_array_transp(i,j) = mos_l_in_r_array(j,i) - enddo - enddo + deallocate(aos_r) + + call cpu_time(tt1) + write(*,"(A,2X,F15.7)") ' wall time for mos_l_in_r_array_transp & mos_r_in_r_array_transp (sec) = ', (tt1 - tt0) END_PROVIDER diff --git a/plugins/local/bi_ortho_mos/bi_ort_mos_in_r_old.irp.f b/plugins/local/bi_ortho_mos/bi_ort_mos_in_r_old.irp.f new file mode 100644 index 00000000..9fd671f8 --- /dev/null +++ b/plugins/local/bi_ortho_mos/bi_ort_mos_in_r_old.irp.f @@ -0,0 +1,137 @@ + +! TODO: left & right MO without duplicate AO calculation + +! --- + +BEGIN_PROVIDER[double precision, mos_r_in_r_array, (mo_num, n_points_final_grid)] + + BEGIN_DOC + ! mos_in_r_array(i,j) = value of the ith RIGHT mo on the jth grid point + END_DOC + + implicit none + integer :: i, j + double precision :: mos_array(mo_num), r(3) + + !$OMP PARALLEL DO & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, r, mos_array) & + !$OMP SHARED (mos_r_in_r_array, n_points_final_grid, mo_num, final_grid_points) + do i = 1, n_points_final_grid + r(1) = final_grid_points(1,i) + r(2) = final_grid_points(2,i) + r(3) = final_grid_points(3,i) + call give_all_mos_r_at_r(r, mos_array) + do j = 1, mo_num + mos_r_in_r_array(j,i) = mos_array(j) + enddo + enddo + !$OMP END PARALLEL DO + +END_PROVIDER + +! --- + +BEGIN_PROVIDER[double precision, mos_r_in_r_array_transp_old, (n_points_final_grid, mo_num)] + + BEGIN_DOC + ! mos_r_in_r_array_transp_old(i,j) = value of the jth mo on the ith grid point + END_DOC + + implicit none + integer :: i,j + + do i = 1, n_points_final_grid + do j = 1, mo_num + mos_r_in_r_array_transp_old(i,j) = mos_r_in_r_array(j,i) + enddo + enddo + +END_PROVIDER + +! --- + +subroutine give_all_mos_r_at_r(r, mos_r_array) + + BEGIN_DOC + ! mos_r_array(i) = ith RIGHT MO function evaluated at "r" + END_DOC + + implicit none + double precision, intent(in) :: r(3) + double precision, intent(out) :: mos_r_array(mo_num) + double precision :: aos_array(ao_num) + + call give_all_aos_at_r(r, aos_array) + call dgemv('N', mo_num, ao_num, 1.d0, mo_r_coef_transp, mo_num, aos_array, 1, 0.d0, mos_r_array, 1) + +end subroutine give_all_mos_r_at_r + +! --- + +BEGIN_PROVIDER[double precision, mos_l_in_r_array, (mo_num, n_points_final_grid)] + + BEGIN_DOC + ! mos_in_r_array(i,j) = value of the ith LEFT mo on the jth grid point + END_DOC + + implicit none + integer :: i, j + double precision :: mos_array(mo_num), r(3) + + !$OMP PARALLEL DO & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,r,mos_array,j) & + !$OMP SHARED(mos_l_in_r_array,n_points_final_grid,mo_num,final_grid_points) + do i = 1, n_points_final_grid + r(1) = final_grid_points(1,i) + r(2) = final_grid_points(2,i) + r(3) = final_grid_points(3,i) + call give_all_mos_l_at_r(r, mos_array) + do j = 1, mo_num + mos_l_in_r_array(j,i) = mos_array(j) + enddo + enddo + !$OMP END PARALLEL DO + +END_PROVIDER + +! --- + +subroutine give_all_mos_l_at_r(r, mos_l_array) + + BEGIN_DOC + ! mos_l_array(i) = ith LEFT MO function evaluated at "r" + END_DOC + + implicit none + double precision, intent(in) :: r(3) + double precision, intent(out) :: mos_l_array(mo_num) + double precision :: aos_array(ao_num) + + call give_all_aos_at_r(r, aos_array) + call dgemv('N', mo_num, ao_num, 1.d0, mo_l_coef_transp, mo_num, aos_array, 1, 0.d0, mos_l_array, 1) + +end subroutine give_all_mos_l_at_r + +! --- + +BEGIN_PROVIDER[double precision, mos_l_in_r_array_transp_old, (n_points_final_grid,mo_num)] + + BEGIN_DOC + ! mos_l_in_r_array_transp_old(i,j) = value of the jth mo on the ith grid point + END_DOC + + implicit none + integer :: i, j + + do i = 1, n_points_final_grid + do j = 1, mo_num + mos_l_in_r_array_transp_old(i,j) = mos_l_in_r_array(j,i) + enddo + enddo + +END_PROVIDER + +! --- + diff --git a/plugins/local/tc_bi_ortho/tc_utils.irp.f b/plugins/local/tc_bi_ortho/tc_utils.irp.f index 2aa148a3..4dfd4316 100644 --- a/plugins/local/tc_bi_ortho/tc_utils.irp.f +++ b/plugins/local/tc_bi_ortho/tc_utils.irp.f @@ -8,6 +8,8 @@ subroutine write_tc_energy() double precision :: E_1e, E_2e, E_3e double precision, allocatable :: E_TC_tmp(:), E_1e_tmp(:), E_2e_tmp(:), E_3e_tmp(:) + call htilde_mu_mat_opt_bi_ortho(psi_det(1,1,1), psi_det(1,1,1), N_int, hmono, htwoe, hthree, htot) + ! GS ! --- From df1c27c7672a9236ab8b417a766c4b569bcd1735 Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Thu, 8 Aug 2024 12:47:47 +0200 Subject: [PATCH 14/19] Optim in int2_grad1_u12_bimo_t --- plugins/local/bi_ort_ints/bi_ort_ints.irp.f | 41 +- .../local/bi_ort_ints/semi_num_ints_mo.irp.f | 374 ++---------------- .../bi_ort_ints/semi_num_ints_mo_old.irp.f | 362 +++++++++++++++++ .../local/bi_ortho_mos/bi_ort_mos_in_r.irp.f | 8 +- 4 files changed, 440 insertions(+), 345 deletions(-) create mode 100644 plugins/local/bi_ort_ints/semi_num_ints_mo_old.irp.f diff --git a/plugins/local/bi_ort_ints/bi_ort_ints.irp.f b/plugins/local/bi_ort_ints/bi_ort_ints.irp.f index b691e47e..0398a18f 100644 --- a/plugins/local/bi_ort_ints/bi_ort_ints.irp.f +++ b/plugins/local/bi_ort_ints/bi_ort_ints.irp.f @@ -24,6 +24,7 @@ program bi_ort_ints !call test_5idx call test_mos_in_r() + call test_int2_grad1_u12_bimo_t() end @@ -486,7 +487,7 @@ subroutine test_mos_in_r() PROVIDE mos_l_in_r_array_transp_old mos_r_in_r_array_transp_old PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp - acc_thr = 1d-12 + acc_thr = 1d-13 err_tot = 0.d0 nrm_tot = 0.d0 @@ -527,3 +528,41 @@ end ! --- +subroutine test_int2_grad1_u12_bimo_t() + + implicit none + integer :: i, j, ipoint, m + double precision :: err_tot, nrm_tot, err_loc, acc_thr + + PROVIDE int2_grad1_u12_bimo_t_old + PROVIDE int2_grad1_u12_bimo_t + + acc_thr = 1d-13 + + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do j = 1, mo_num + do m = 1, 3 + do ipoint = 1, n_points_final_grid + err_loc = dabs(int2_grad1_u12_bimo_t_old(ipoint,m,j,i) - int2_grad1_u12_bimo_t(ipoint,m,j,i)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m, j, i + print*, " old res", int2_grad1_u12_bimo_t_old(ipoint,m,j,i) + print*, " new res", int2_grad1_u12_bimo_t (ipoint,m,j,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(int2_grad1_u12_bimo_t_old(ipoint,m,j,i)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on int2_grad1_u12_bimo_t (%) =', 100.d0 * err_tot / nrm_tot + + return +end + +! --- + + diff --git a/plugins/local/bi_ort_ints/semi_num_ints_mo.irp.f b/plugins/local/bi_ort_ints/semi_num_ints_mo.irp.f index 77e4cb9b..1fd5d666 100644 --- a/plugins/local/bi_ort_ints/semi_num_ints_mo.irp.f +++ b/plugins/local/bi_ort_ints/semi_num_ints_mo.irp.f @@ -1,360 +1,54 @@ ! --- -! TODO :: optimization : transform into a DGEMM - -BEGIN_PROVIDER [ double precision, mo_v_ki_bi_ortho_erf_rk_cst_mu, (mo_num, mo_num, n_points_final_grid)] - - BEGIN_DOC - ! - ! mo_v_ki_bi_ortho_erf_rk_cst_mu(k,i,ip) = int dr chi_k(r) phi_i(r) (erf(mu |r - R_ip|) - 1 )/(2|r - R_ip|) on the BI-ORTHO MO basis - ! - ! where phi_k(r) is a LEFT MOs and phi_i(r) is a RIGHT MO - ! - ! R_ip = the "ip"-th point of the DFT Grid - ! - END_DOC - - implicit none - integer :: ipoint - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (n_points_final_grid,v_ij_erf_rk_cst_mu,mo_v_ki_bi_ortho_erf_rk_cst_mu) - !$OMP DO SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - call ao_to_mo_bi_ortho( v_ij_erf_rk_cst_mu (1,1,ipoint), size(v_ij_erf_rk_cst_mu, 1) & - , mo_v_ki_bi_ortho_erf_rk_cst_mu(1,1,ipoint), size(mo_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) - enddo - !$OMP END DO - !$OMP END PARALLEL - - mo_v_ki_bi_ortho_erf_rk_cst_mu = mo_v_ki_bi_ortho_erf_rk_cst_mu * 0.5d0 - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ double precision, mo_v_ki_bi_ortho_erf_rk_cst_mu_transp, (n_points_final_grid, mo_num, mo_num)] - - BEGIN_DOC - ! - ! int dr phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/(2|r - R|) on the BI-ORTHO MO basis - ! - END_DOC - - implicit none - integer :: ipoint, i, j - - do i = 1, mo_num - do j = 1, mo_num - do ipoint = 1, n_points_final_grid - mo_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,j,i) = mo_v_ki_bi_ortho_erf_rk_cst_mu(j,i,ipoint) - enddo - enddo - enddo - - !FREE mo_v_ki_bi_ortho_erf_rk_cst_mu - -END_PROVIDER - -! --- - -! TODO :: optimization : transform into a DGEMM - -BEGIN_PROVIDER [ double precision, mo_x_v_ki_bi_ortho_erf_rk_cst_mu, (mo_num, mo_num, 3, n_points_final_grid)] - - BEGIN_DOC - ! - ! mo_x_v_ki_bi_ortho_erf_rk_cst_mu(k,i,m,ip) = int dr x(m) * chi_k(r) phi_i(r) (erf(mu |r - R_ip|) - 1)/2|r - R_ip| on the BI-ORTHO MO basis - ! - ! where chi_k(r)/phi_i(r) are left/right MOs, m=1 => x(m) = x, m=2 => x(m) = y, m=3 => x(m) = z, - ! - ! R_ip = the "ip"-th point of the DFT Grid - ! - END_DOC - - implicit none - integer :: ipoint - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (n_points_final_grid,x_v_ij_erf_rk_cst_mu_transp,mo_x_v_ki_bi_ortho_erf_rk_cst_mu) - !$OMP DO SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - - call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,1,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & - , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,1,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) - call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,2,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & - , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,2,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) - call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,3,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & - , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,3,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) - - enddo - !$OMP END DO - !$OMP END PARALLEL - - mo_x_v_ki_bi_ortho_erf_rk_cst_mu = 0.5d0 * mo_x_v_ki_bi_ortho_erf_rk_cst_mu - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao_transp, (ao_num, ao_num, 3, n_points_final_grid)] - - implicit none - integer :: i, j, ipoint - double precision :: wall0, wall1 - - !print *, ' providing int2_grad1_u12_ao_transp ...' - !call wall_time(wall0) - - if(test_cycle_tc) then - - PROVIDE int2_grad1_u12_ao_test - - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = 1, ao_num - int2_grad1_u12_ao_transp(j,i,1,ipoint) = int2_grad1_u12_ao_test(j,i,ipoint,1) - int2_grad1_u12_ao_transp(j,i,2,ipoint) = int2_grad1_u12_ao_test(j,i,ipoint,2) - int2_grad1_u12_ao_transp(j,i,3,ipoint) = int2_grad1_u12_ao_test(j,i,ipoint,3) - enddo - enddo - enddo - - FREE int2_grad1_u12_ao_test - - else - - PROVIDE int2_grad1_u12_ao - - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = 1, ao_num - int2_grad1_u12_ao_transp(j,i,1,ipoint) = int2_grad1_u12_ao(j,i,ipoint,1) - int2_grad1_u12_ao_transp(j,i,2,ipoint) = int2_grad1_u12_ao(j,i,ipoint,2) - int2_grad1_u12_ao_transp(j,i,3,ipoint) = int2_grad1_u12_ao(j,i,ipoint,3) - enddo - enddo - enddo - - endif - - !call wall_time(wall1) - !print *, ' wall time for int2_grad1_u12_ao_transp (min) = ', (wall1 - wall0) / 60.d0 - !call print_memory_usage() - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [double precision, int2_grad1_u12_bimo_transp, (mo_num, mo_num, 3, n_points_final_grid)] - - implicit none - integer :: ipoint - double precision :: wall0, wall1 - - PROVIDE mo_l_coef mo_r_coef - PROVIDE int2_grad1_u12_ao_transp - - !print *, ' providing int2_grad1_u12_bimo_transp ...' - !call wall_time(wall0) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (n_points_final_grid,int2_grad1_u12_ao_transp,int2_grad1_u12_bimo_transp) - !$OMP DO SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - call ao_to_mo_bi_ortho( int2_grad1_u12_ao_transp (1,1,1,ipoint), size(int2_grad1_u12_ao_transp , 1) & - , int2_grad1_u12_bimo_transp(1,1,1,ipoint), size(int2_grad1_u12_bimo_transp, 1) ) - call ao_to_mo_bi_ortho( int2_grad1_u12_ao_transp (1,1,2,ipoint), size(int2_grad1_u12_ao_transp , 1) & - , int2_grad1_u12_bimo_transp(1,1,2,ipoint), size(int2_grad1_u12_bimo_transp, 1) ) - call ao_to_mo_bi_ortho( int2_grad1_u12_ao_transp (1,1,3,ipoint), size(int2_grad1_u12_ao_transp , 1) & - , int2_grad1_u12_bimo_transp(1,1,3,ipoint), size(int2_grad1_u12_bimo_transp, 1) ) - enddo - !$OMP END DO - !$OMP END PARALLEL - - !FREE int2_grad1_u12_ao_transp - - !call wall_time(wall1) - !print *, ' wall time for int2_grad1_u12_bimo_transp (min) =', (wall1 - wall0) / 60.d0 - !call print_memory_usage() - -END_PROVIDER - -! --- - BEGIN_PROVIDER [double precision, int2_grad1_u12_bimo_t, (n_points_final_grid, 3, mo_num, mo_num)] implicit none - integer :: i, j, ipoint - double precision :: wall0, wall1 - - !call wall_time(wall0) - !print *, ' providing int2_grad1_u12_bimo_t ...' + integer :: i, j, ipoint + double precision :: tt1, tt2 + double precision, allocatable :: tmp(:,:,:,:) PROVIDE mo_l_coef mo_r_coef - PROVIDE int2_grad1_u12_bimo_transp + PROVIDE int2_grad1_u12_ao + call wall_time(tt1) + + allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp (1,1,ipoint,1), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp (1,1,ipoint,2), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp (1,1,ipoint,3), mo_num) + enddo + !$OMP END DO + !$OMP END PARALLEL + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) + !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) do ipoint = 1, n_points_final_grid do i = 1, mo_num do j = 1, mo_num - int2_grad1_u12_bimo_t(ipoint,1,j,i) = int2_grad1_u12_bimo_transp(j,i,1,ipoint) - int2_grad1_u12_bimo_t(ipoint,2,j,i) = int2_grad1_u12_bimo_transp(j,i,2,ipoint) - int2_grad1_u12_bimo_t(ipoint,3,j,i) = int2_grad1_u12_bimo_transp(j,i,3,ipoint) + int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) + int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) + int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) enddo enddo enddo + !$OMP END DO + !$OMP END PARALLEL - FREE int2_grad1_u12_bimo_transp + deallocate(tmp) - !call wall_time(wall1) - !print *, ' wall time for int2_grad1_u12_bimo_t (min) =', (wall1 - wall0) / 60.d0 - !call print_memory_usage() - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [double precision, int2_grad1_u12_ao_t, (n_points_final_grid, 3, ao_num, ao_num)] - - implicit none - integer :: i, j, ipoint - double precision :: wall0, wall1 - - !call wall_time(wall0) - !print *, ' providing int2_grad1_u12_ao_t ...' - - PROVIDE int2_grad1_u12_ao - - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = 1, ao_num - int2_grad1_u12_ao_t(ipoint,1,j,i) = int2_grad1_u12_ao(j,i,ipoint,1) - int2_grad1_u12_ao_t(ipoint,2,j,i) = int2_grad1_u12_ao(j,i,ipoint,2) - int2_grad1_u12_ao_t(ipoint,3,j,i) = int2_grad1_u12_ao(j,i,ipoint,3) - enddo - enddo - enddo - - !call wall_time(wall1) - !print *, ' wall time for int2_grad1_u12_ao_t (min) =', (wall1 - wall0) / 60.d0 - !call print_memory_usage() - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ double precision, mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp, (n_points_final_grid, 3, mo_num, mo_num)] - - implicit none - integer :: i, j, ipoint - - do i = 1, mo_num - do j = 1, mo_num - do ipoint = 1, n_points_final_grid - mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,1,j,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu(j,i,1,ipoint) - mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,2,j,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu(j,i,2,ipoint) - mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,3,j,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu(j,i,3,ipoint) - enddo - enddo - enddo -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ double precision, x_W_ki_bi_ortho_erf_rk, (n_points_final_grid, 3, mo_num, mo_num)] - - BEGIN_DOC - ! - ! x_W_ki_bi_ortho_erf_rk(ip,m,k,i) = \int dr chi_k(r) \frac{(1 - erf(mu |r-R_ip|))}{2|r-R_ip|} (x(m)-R_ip(m)) phi_i(r) ON THE BI-ORTHO MO BASIS - ! - ! where chi_k(r)/phi_i(r) are left/right MOs, m=1 => X(m) = x, m=2 => X(m) = y, m=3 => X(m) = z, - ! - ! R_ip = the "ip"-th point of the DFT Grid - END_DOC - - implicit none - include 'constants.include.F' - - integer :: ipoint, m, i, k - double precision :: xyz - double precision :: wall0, wall1 - - !print*, ' providing x_W_ki_bi_ortho_erf_rk ...' - !call wall_time(wall0) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint,m,i,k,xyz) & - !$OMP SHARED (x_W_ki_bi_ortho_erf_rk,n_points_final_grid,mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp,mo_v_ki_bi_ortho_erf_rk_cst_mu_transp,mo_num,final_grid_points) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num - do k = 1, mo_num - do m = 1, 3 - do ipoint = 1, n_points_final_grid - xyz = final_grid_points(m,ipoint) - x_W_ki_bi_ortho_erf_rk(ipoint,m,k,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,m,k,i) - xyz * mo_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,k,i) - enddo - enddo - enddo - enddo - - !$OMP END DO - !$OMP END PARALLEL - - ! FREE mo_v_ki_bi_ortho_erf_rk_cst_mu_transp - ! FREE mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp - - !call wall_time(wall1) - !print *, ' time to provide x_W_ki_bi_ortho_erf_rk = ', wall1 - wall0 - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ double precision, x_W_ki_bi_ortho_erf_rk_diag, (n_points_final_grid, 3, mo_num)] - BEGIN_DOC - ! x_W_ki_bi_ortho_erf_rk_diag(ip,m,i) = \int dr chi_i(r) (1 - erf(mu |r-R_ip|)) (x(m)-X(m)_ip) phi_i(r) ON THE BI-ORTHO MO BASIS -! -! where chi_k(r)/phi_i(r) are left/right MOs, m=1 => X(m) = x, m=2 => X(m) = y, m=3 => X(m) = z, -! -! R_ip = the "ip"-th point of the DFT Grid - END_DOC - - implicit none - include 'constants.include.F' - - integer :: ipoint, m, i - double precision :: xyz - double precision :: wall0, wall1 - - !print*,'providing x_W_ki_bi_ortho_erf_rk_diag ...' - !call wall_time(wall0) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint,m,i,xyz) & - !$OMP SHARED (x_W_ki_bi_ortho_erf_rk_diag,n_points_final_grid,mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp,mo_v_ki_bi_ortho_erf_rk_cst_mu_transp,mo_num,final_grid_points) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num - do m = 1, 3 - do ipoint = 1, n_points_final_grid - xyz = final_grid_points(m,ipoint) - x_W_ki_bi_ortho_erf_rk_diag(ipoint,m,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,m,i,i) - xyz * mo_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,i,i) - enddo - enddo - enddo - - !$OMP END DO - !$OMP END PARALLEL - - !call wall_time(wall1) - !print*,'time to provide x_W_ki_bi_ortho_erf_rk_diag = ',wall1 - wall0 + call wall_time(tt2) + write(*,"(A,2X,F15.7)") ' wall time for int2_grad1_u12_bimo_t (sec) = ', (tt2 - tt1) END_PROVIDER diff --git a/plugins/local/bi_ort_ints/semi_num_ints_mo_old.irp.f b/plugins/local/bi_ort_ints/semi_num_ints_mo_old.irp.f new file mode 100644 index 00000000..c2b9ad6d --- /dev/null +++ b/plugins/local/bi_ort_ints/semi_num_ints_mo_old.irp.f @@ -0,0 +1,362 @@ + +! --- + +! TODO :: optimization : transform into a DGEMM + +BEGIN_PROVIDER [ double precision, mo_v_ki_bi_ortho_erf_rk_cst_mu, (mo_num, mo_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! mo_v_ki_bi_ortho_erf_rk_cst_mu(k,i,ip) = int dr chi_k(r) phi_i(r) (erf(mu |r - R_ip|) - 1 )/(2|r - R_ip|) on the BI-ORTHO MO basis + ! + ! where phi_k(r) is a LEFT MOs and phi_i(r) is a RIGHT MO + ! + ! R_ip = the "ip"-th point of the DFT Grid + ! + END_DOC + + implicit none + integer :: ipoint + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid,v_ij_erf_rk_cst_mu,mo_v_ki_bi_ortho_erf_rk_cst_mu) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho( v_ij_erf_rk_cst_mu (1,1,ipoint), size(v_ij_erf_rk_cst_mu, 1) & + , mo_v_ki_bi_ortho_erf_rk_cst_mu(1,1,ipoint), size(mo_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) + enddo + !$OMP END DO + !$OMP END PARALLEL + + mo_v_ki_bi_ortho_erf_rk_cst_mu = mo_v_ki_bi_ortho_erf_rk_cst_mu * 0.5d0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, mo_v_ki_bi_ortho_erf_rk_cst_mu_transp, (n_points_final_grid, mo_num, mo_num)] + + BEGIN_DOC + ! + ! int dr phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/(2|r - R|) on the BI-ORTHO MO basis + ! + END_DOC + + implicit none + integer :: ipoint, i, j + + do i = 1, mo_num + do j = 1, mo_num + do ipoint = 1, n_points_final_grid + mo_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,j,i) = mo_v_ki_bi_ortho_erf_rk_cst_mu(j,i,ipoint) + enddo + enddo + enddo + + !FREE mo_v_ki_bi_ortho_erf_rk_cst_mu + +END_PROVIDER + +! --- + +! TODO :: optimization : transform into a DGEMM + +BEGIN_PROVIDER [ double precision, mo_x_v_ki_bi_ortho_erf_rk_cst_mu, (mo_num, mo_num, 3, n_points_final_grid)] + + BEGIN_DOC + ! + ! mo_x_v_ki_bi_ortho_erf_rk_cst_mu(k,i,m,ip) = int dr x(m) * chi_k(r) phi_i(r) (erf(mu |r - R_ip|) - 1)/2|r - R_ip| on the BI-ORTHO MO basis + ! + ! where chi_k(r)/phi_i(r) are left/right MOs, m=1 => x(m) = x, m=2 => x(m) = y, m=3 => x(m) = z, + ! + ! R_ip = the "ip"-th point of the DFT Grid + ! + END_DOC + + implicit none + integer :: ipoint + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid,x_v_ij_erf_rk_cst_mu_transp,mo_x_v_ki_bi_ortho_erf_rk_cst_mu) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + + call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,1,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & + , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,1,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) + call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,2,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & + , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,2,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) + call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,3,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & + , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,3,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) + + enddo + !$OMP END DO + !$OMP END PARALLEL + + mo_x_v_ki_bi_ortho_erf_rk_cst_mu = 0.5d0 * mo_x_v_ki_bi_ortho_erf_rk_cst_mu + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao_transp, (ao_num, ao_num, 3, n_points_final_grid)] + + implicit none + integer :: i, j, ipoint + double precision :: wall0, wall1 + + !print *, ' providing int2_grad1_u12_ao_transp ...' + !call wall_time(wall0) + + if(test_cycle_tc) then + + PROVIDE int2_grad1_u12_ao_test + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, ao_num + int2_grad1_u12_ao_transp(j,i,1,ipoint) = int2_grad1_u12_ao_test(j,i,ipoint,1) + int2_grad1_u12_ao_transp(j,i,2,ipoint) = int2_grad1_u12_ao_test(j,i,ipoint,2) + int2_grad1_u12_ao_transp(j,i,3,ipoint) = int2_grad1_u12_ao_test(j,i,ipoint,3) + enddo + enddo + enddo + + FREE int2_grad1_u12_ao_test + + else + + PROVIDE int2_grad1_u12_ao + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, ao_num + int2_grad1_u12_ao_transp(j,i,1,ipoint) = int2_grad1_u12_ao(j,i,ipoint,1) + int2_grad1_u12_ao_transp(j,i,2,ipoint) = int2_grad1_u12_ao(j,i,ipoint,2) + int2_grad1_u12_ao_transp(j,i,3,ipoint) = int2_grad1_u12_ao(j,i,ipoint,3) + enddo + enddo + enddo + + endif + + !call wall_time(wall1) + !print *, ' wall time for int2_grad1_u12_ao_transp (min) = ', (wall1 - wall0) / 60.d0 + !call print_memory_usage() + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, int2_grad1_u12_bimo_transp, (mo_num, mo_num, 3, n_points_final_grid)] + + implicit none + integer :: ipoint + double precision :: wall0, wall1 + + PROVIDE mo_l_coef mo_r_coef + PROVIDE int2_grad1_u12_ao_transp + + !print *, ' providing int2_grad1_u12_bimo_transp ...' + !call wall_time(wall0) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid,int2_grad1_u12_ao_transp,int2_grad1_u12_bimo_transp) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho( int2_grad1_u12_ao_transp (1,1,1,ipoint), size(int2_grad1_u12_ao_transp , 1) & + , int2_grad1_u12_bimo_transp(1,1,1,ipoint), size(int2_grad1_u12_bimo_transp, 1) ) + call ao_to_mo_bi_ortho( int2_grad1_u12_ao_transp (1,1,2,ipoint), size(int2_grad1_u12_ao_transp , 1) & + , int2_grad1_u12_bimo_transp(1,1,2,ipoint), size(int2_grad1_u12_bimo_transp, 1) ) + call ao_to_mo_bi_ortho( int2_grad1_u12_ao_transp (1,1,3,ipoint), size(int2_grad1_u12_ao_transp , 1) & + , int2_grad1_u12_bimo_transp(1,1,3,ipoint), size(int2_grad1_u12_bimo_transp, 1) ) + enddo + !$OMP END DO + !$OMP END PARALLEL + + !FREE int2_grad1_u12_ao_transp + + !call wall_time(wall1) + !print *, ' wall time for int2_grad1_u12_bimo_transp (min) =', (wall1 - wall0) / 60.d0 + !call print_memory_usage() + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, int2_grad1_u12_bimo_t_old, (n_points_final_grid, 3, mo_num, mo_num)] + + implicit none + integer :: i, j, ipoint + double precision :: wall0, wall1 + + !call wall_time(wall0) + !print *, ' providing int2_grad1_u12_bimo_t_old ...' + + PROVIDE mo_l_coef mo_r_coef + PROVIDE int2_grad1_u12_bimo_transp + + do ipoint = 1, n_points_final_grid + do i = 1, mo_num + do j = 1, mo_num + int2_grad1_u12_bimo_t_old(ipoint,1,j,i) = int2_grad1_u12_bimo_transp(j,i,1,ipoint) + int2_grad1_u12_bimo_t_old(ipoint,2,j,i) = int2_grad1_u12_bimo_transp(j,i,2,ipoint) + int2_grad1_u12_bimo_t_old(ipoint,3,j,i) = int2_grad1_u12_bimo_transp(j,i,3,ipoint) + enddo + enddo + enddo + + FREE int2_grad1_u12_bimo_transp + + !call wall_time(wall1) + !print *, ' wall time for int2_grad1_u12_bimo_t_old (min) =', (wall1 - wall0) / 60.d0 + !call print_memory_usage() + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, int2_grad1_u12_ao_t, (n_points_final_grid, 3, ao_num, ao_num)] + + implicit none + integer :: i, j, ipoint + double precision :: wall0, wall1 + + !call wall_time(wall0) + !print *, ' providing int2_grad1_u12_ao_t ...' + + PROVIDE int2_grad1_u12_ao + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, ao_num + int2_grad1_u12_ao_t(ipoint,1,j,i) = int2_grad1_u12_ao(j,i,ipoint,1) + int2_grad1_u12_ao_t(ipoint,2,j,i) = int2_grad1_u12_ao(j,i,ipoint,2) + int2_grad1_u12_ao_t(ipoint,3,j,i) = int2_grad1_u12_ao(j,i,ipoint,3) + enddo + enddo + enddo + + !call wall_time(wall1) + !print *, ' wall time for int2_grad1_u12_ao_t (min) =', (wall1 - wall0) / 60.d0 + !call print_memory_usage() + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp, (n_points_final_grid, 3, mo_num, mo_num)] + + implicit none + integer :: i, j, ipoint + + do i = 1, mo_num + do j = 1, mo_num + do ipoint = 1, n_points_final_grid + mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,1,j,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu(j,i,1,ipoint) + mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,2,j,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu(j,i,2,ipoint) + mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,3,j,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu(j,i,3,ipoint) + enddo + enddo + enddo +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, x_W_ki_bi_ortho_erf_rk, (n_points_final_grid, 3, mo_num, mo_num)] + + BEGIN_DOC + ! + ! x_W_ki_bi_ortho_erf_rk(ip,m,k,i) = \int dr chi_k(r) \frac{(1 - erf(mu |r-R_ip|))}{2|r-R_ip|} (x(m)-R_ip(m)) phi_i(r) ON THE BI-ORTHO MO BASIS + ! + ! where chi_k(r)/phi_i(r) are left/right MOs, m=1 => X(m) = x, m=2 => X(m) = y, m=3 => X(m) = z, + ! + ! R_ip = the "ip"-th point of the DFT Grid + END_DOC + + implicit none + include 'constants.include.F' + + integer :: ipoint, m, i, k + double precision :: xyz + double precision :: wall0, wall1 + + !print*, ' providing x_W_ki_bi_ortho_erf_rk ...' + !call wall_time(wall0) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint,m,i,k,xyz) & + !$OMP SHARED (x_W_ki_bi_ortho_erf_rk,n_points_final_grid,mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp,mo_v_ki_bi_ortho_erf_rk_cst_mu_transp,mo_num,final_grid_points) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num + do k = 1, mo_num + do m = 1, 3 + do ipoint = 1, n_points_final_grid + xyz = final_grid_points(m,ipoint) + x_W_ki_bi_ortho_erf_rk(ipoint,m,k,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,m,k,i) - xyz * mo_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,k,i) + enddo + enddo + enddo + enddo + + !$OMP END DO + !$OMP END PARALLEL + + ! FREE mo_v_ki_bi_ortho_erf_rk_cst_mu_transp + ! FREE mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp + + !call wall_time(wall1) + !print *, ' time to provide x_W_ki_bi_ortho_erf_rk = ', wall1 - wall0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, x_W_ki_bi_ortho_erf_rk_diag, (n_points_final_grid, 3, mo_num)] + BEGIN_DOC + ! x_W_ki_bi_ortho_erf_rk_diag(ip,m,i) = \int dr chi_i(r) (1 - erf(mu |r-R_ip|)) (x(m)-X(m)_ip) phi_i(r) ON THE BI-ORTHO MO BASIS +! +! where chi_k(r)/phi_i(r) are left/right MOs, m=1 => X(m) = x, m=2 => X(m) = y, m=3 => X(m) = z, +! +! R_ip = the "ip"-th point of the DFT Grid + END_DOC + + implicit none + include 'constants.include.F' + + integer :: ipoint, m, i + double precision :: xyz + double precision :: wall0, wall1 + + !print*,'providing x_W_ki_bi_ortho_erf_rk_diag ...' + !call wall_time(wall0) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint,m,i,xyz) & + !$OMP SHARED (x_W_ki_bi_ortho_erf_rk_diag,n_points_final_grid,mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp,mo_v_ki_bi_ortho_erf_rk_cst_mu_transp,mo_num,final_grid_points) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num + do m = 1, 3 + do ipoint = 1, n_points_final_grid + xyz = final_grid_points(m,ipoint) + x_W_ki_bi_ortho_erf_rk_diag(ipoint,m,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,m,i,i) - xyz * mo_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,i,i) + enddo + enddo + enddo + + !$OMP END DO + !$OMP END PARALLEL + + !call wall_time(wall1) + !print*,'time to provide x_W_ki_bi_ortho_erf_rk_diag = ',wall1 - wall0 + +END_PROVIDER + +! --- + diff --git a/plugins/local/bi_ortho_mos/bi_ort_mos_in_r.irp.f b/plugins/local/bi_ortho_mos/bi_ort_mos_in_r.irp.f index 15ed2ce4..b1c2dc87 100644 --- a/plugins/local/bi_ortho_mos/bi_ort_mos_in_r.irp.f +++ b/plugins/local/bi_ortho_mos/bi_ort_mos_in_r.irp.f @@ -16,7 +16,7 @@ double precision :: r(3) double precision, allocatable :: aos_r(:,:) - call cpu_time(tt0) + call wall_time(tt0) allocate(aos_r(ao_num,n_points_final_grid)) @@ -27,7 +27,7 @@ call give_all_aos_at_r(r, aos_r(1,1)) - call cpu_time(tt2) + call wall_time(tt2) !$OMP PARALLEL & !$OMP DEFAULT (NONE) & @@ -43,7 +43,7 @@ !$OMP END DO !$OMP END PARALLEL - call cpu_time(tt3) + call wall_time(tt3) write(*,"(A,2X,F15.7)") ' wall time for AOs on r (sec) = ', (tt3 - tt2) @@ -63,7 +63,7 @@ deallocate(aos_r) - call cpu_time(tt1) + call wall_time(tt1) write(*,"(A,2X,F15.7)") ' wall time for mos_l_in_r_array_transp & mos_r_in_r_array_transp (sec) = ', (tt1 - tt0) END_PROVIDER From 2b244248911b6777596912075f738dd2fbfb952c Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Thu, 8 Aug 2024 23:41:03 +0200 Subject: [PATCH 15/19] 2e-noL with CUDA --- .../local/bi_ort_ints/semi_num_ints_mo.irp.f | 6 +- plugins/local/tc_int/LIB | 2 +- plugins/local/tc_int/NEED | 1 + plugins/local/tc_int/cutc_module.F90 | 62 +- plugins/local/tc_int/deb_int_2e_ao_gpu.irp.f | 282 ++++++++ plugins/local/tc_int/deb_no_2e_gpu.irp.f | 418 ++++++++++++ plugins/local/tc_int/deb_tc_int_cuda.irp.f | 283 +------- plugins/local/tc_int/install | 2 +- plugins/local/tc_int/no_0e.irp.f | 412 ++++++++++++ plugins/local/tc_int/no_1e.irp.f | 602 +++++++++++++++++ plugins/local/tc_int/no_2e.irp.f | 605 ++++++++++++++++++ plugins/local/tc_int/uninstall | 2 +- plugins/local/tc_int/write_tc_int_cuda.irp.f | 12 +- 13 files changed, 2389 insertions(+), 300 deletions(-) create mode 100644 plugins/local/tc_int/deb_int_2e_ao_gpu.irp.f create mode 100644 plugins/local/tc_int/deb_no_2e_gpu.irp.f create mode 100644 plugins/local/tc_int/no_0e.irp.f create mode 100644 plugins/local/tc_int/no_1e.irp.f create mode 100644 plugins/local/tc_int/no_2e.irp.f diff --git a/plugins/local/bi_ort_ints/semi_num_ints_mo.irp.f b/plugins/local/bi_ort_ints/semi_num_ints_mo.irp.f index 1fd5d666..dc7642b0 100644 --- a/plugins/local/bi_ort_ints/semi_num_ints_mo.irp.f +++ b/plugins/local/bi_ort_ints/semi_num_ints_mo.irp.f @@ -21,9 +21,9 @@ BEGIN_PROVIDER [double precision, int2_grad1_u12_bimo_t, (n_points_final_grid, 3 !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) !$OMP DO SCHEDULE (dynamic) do ipoint = 1, n_points_final_grid - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp (1,1,ipoint,1), mo_num) - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp (1,1,ipoint,2), mo_num) - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp (1,1,ipoint,3), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) enddo !$OMP END DO !$OMP END PARALLEL diff --git a/plugins/local/tc_int/LIB b/plugins/local/tc_int/LIB index c41ceb9d..9f7e2d9f 100644 --- a/plugins/local/tc_int/LIB +++ b/plugins/local/tc_int/LIB @@ -1 +1 @@ --ltc_int_cu +-lcutcint diff --git a/plugins/local/tc_int/NEED b/plugins/local/tc_int/NEED index 34d2e183..362314da 100644 --- a/plugins/local/tc_int/NEED +++ b/plugins/local/tc_int/NEED @@ -4,3 +4,4 @@ jastrow qmckl becke_numerical_grid dft_utils_in_r +bi_ortho_mos diff --git a/plugins/local/tc_int/cutc_module.F90 b/plugins/local/tc_int/cutc_module.F90 index 1f55d763..d7b922cd 100644 --- a/plugins/local/tc_int/cutc_module.F90 +++ b/plugins/local/tc_int/cutc_module.F90 @@ -9,13 +9,13 @@ module cutc_module ! --- - subroutine tc_int_c(nxBlocks, nyBlocks, nzBlocks, & - blockxSize, blockySize, blockzSize, & - n_grid1, n_grid2, n_ao, n_nuc, size_bh, & - r1, wr1, r2, wr2, rn, & - aos_data1, aos_data2, & - c_bh, m_bh, n_bh, o_bh, & - int2_grad1_u12_ao, int_2e_ao) bind(C, name = "tc_int_c") + subroutine cutc_int_c(nxBlocks, nyBlocks, nzBlocks, & + blockxSize, blockySize, blockzSize, & + n_grid1, n_grid2, n_ao, n_nuc, size_bh, & + r1, wr1, r2, wr2, rn, & + aos_data1, aos_data2, & + c_bh, m_bh, n_bh, o_bh, & + int2_grad1_u12_ao, int_2e_ao) bind(C, name = "cutc_int_c") import c_int, c_double, c_ptr integer(c_int), intent(in), value :: nxBlocks, blockxSize @@ -37,7 +37,7 @@ module cutc_module real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,3) real(c_double), intent(out) :: int_2e_ao(n_ao,n_ao,n_ao,n_ao) - end subroutine tc_int_c + end subroutine cutc_int_c ! --- @@ -50,6 +50,7 @@ module cutc_module int2_grad1_u12_ao, int_2e_ao) bind(C, name = "deb_int_2e_ao") import c_int, c_double, c_ptr + integer(c_int), intent(in), value :: nxBlocks, blockxSize integer(c_int), intent(in), value :: nyBlocks, blockySize integer(c_int), intent(in), value :: nzBlocks, blockzSize @@ -73,6 +74,51 @@ module cutc_module ! --- + subroutine cutc_no_2e(n_grid1, n_mo, ne_a, ne_b, & + wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & + no_2e) bind(C, name = "cutc_no_2e") + + import c_int, c_double, c_ptr + + integer(c_int), intent(in), value :: n_grid1 + integer(c_int), intent(in), value :: n_mo + integer(c_int), intent(in), value :: ne_a + integer(c_int), intent(in), value :: ne_b + real(c_double), intent(in) :: wr1(n_grid1) + real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) + real(c_double), intent(out) :: no_2e(n_mo,n_mo,n_mo,n_mo) + + end subroutine cutc_no_2e + + ! --- + + subroutine deb_no_2e(n_grid1, n_mo, ne_a, ne_b, & + wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & + tmpO, tmpJ, tmpA, tmpB, tmpC, tmpD, tmpE, & + no_2e) bind(C, name = "deb_no_2e") + + import c_int, c_double, c_ptr + + integer(c_int), intent(in), value :: n_grid1 + integer(c_int), intent(in), value :: n_mo + integer(c_int), intent(in), value :: ne_a + integer(c_int), intent(in), value :: ne_b + real(c_double), intent(in) :: wr1(n_grid1) + real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) + real(c_double), intent(out) :: tmpO(n_grid1), tmpJ(n_grid1,3) + real(c_double), intent(out) :: tmpA(n_grid1,3,n_mo), tmpB(n_grid1,3,n_mo) + real(c_double), intent(out) :: tmpC(n_grid1,4,n_mo,n_mo), tmpD(n_grid1,4,n_mo,n_mo) + real(c_double), intent(out) :: tmpE(n_mo,n_mo,n_mo,n_mo) + real(c_double), intent(out) :: no_2e(n_mo,n_mo,n_mo,n_mo) + + end subroutine deb_no_2e + + ! --- + end interface end module cutc_module diff --git a/plugins/local/tc_int/deb_int_2e_ao_gpu.irp.f b/plugins/local/tc_int/deb_int_2e_ao_gpu.irp.f new file mode 100644 index 00000000..3290c149 --- /dev/null +++ b/plugins/local/tc_int/deb_int_2e_ao_gpu.irp.f @@ -0,0 +1,282 @@ + +! --- + +subroutine deb_int_2e_ao_gpu() + + use cutc_module + + implicit none + + integer :: m + integer :: i, j, k, l + integer :: ipoint, jpoint + + double precision :: weight1, ao_i_r, ao_k_r + + double precision :: acc_thr, err_tot, nrm_tot, err_loc + + double precision :: time0, time1 + double precision :: wall_time0, wall_time1 + double precision :: wall_ttime0, wall_ttime1 + double precision :: tt1, tt2 + + double precision, allocatable :: rn(:,:), aos_data1(:,:,:), aos_data2(:,:,:) + double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:), c_mat(:,:,:) + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:) + double precision, allocatable :: int_2e_ao(:,:,:,:) + double precision, allocatable :: int_2e_ao_gpu(:,:,:,:) + + + + call wall_time(time0) + print*, ' start deb_int_2e_ao_gpu' + + + ! --- + + allocate(rn(3,nucl_num)) + allocate(aos_data1(n_points_final_grid,ao_num,4)) + allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) + + do k = 1, nucl_num + rn(1,k) = nucl_coord(k,1) + rn(2,k) = nucl_coord(k,2) + rn(3,k) = nucl_coord(k,3) + enddo + + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint) + aos_data1(ipoint,k,2) = aos_grad_in_r_array(k,ipoint,1) + aos_data1(ipoint,k,3) = aos_grad_in_r_array(k,ipoint,2) + aos_data1(ipoint,k,4) = aos_grad_in_r_array(k,ipoint,3) + enddo + enddo + + do k = 1, ao_num + do ipoint = 1, n_points_extra_final_grid + aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) + aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) + aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) + aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) + enddo + enddo + + ! --- + + integer :: nB + integer :: sB + + PROVIDE nxBlocks nyBlocks nzBlocks + PROVIDE blockxSize blockySize blockzSize + + sB = 32 + nB = (n_points_final_grid + sB - 1) / sB + + call ezfio_set_tc_int_blockxSize(sB) + call ezfio_set_tc_int_nxBlocks(nB) + + allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,3)) + allocate(int_2e_ao_gpu(ao_num,ao_num,ao_num,ao_num)) + + call deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & + n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & + final_grid_points, final_weight_at_r_vector, & + final_grid_points_extra, final_weight_at_r_vector_extra, & + rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & + int2_grad1_u12_ao_gpu, int_2e_ao_gpu) + + ! --- + + allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) + allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) + allocate(c_mat(n_points_final_grid,ao_num,ao_num)) + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) + allocate(int_2e_ao(ao_num,ao_num,ao_num,ao_num)) + + call wall_time(wall_time0) + + call wall_time(wall_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (j, i, jpoint) & + !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) + !$OMP DO SCHEDULE (static) + do j = 1, ao_num + do i = 1, ao_num + do jpoint = 1, n_points_extra_final_grid + int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (wall_ttime1 - wall_ttime0) + + + call wall_time(wall_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) + !$OMP DO + do ipoint = 1, n_points_final_grid + call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & + , grad1_u12(1,ipoint,2) & + , grad1_u12(1,ipoint,3) & + , grad1_u12(1,ipoint,4) ) + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (wall_ttime1 - wall_ttime0) + + + call wall_time(wall_ttime0) + do m = 1, 4 + call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & + , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) + enddo + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for DGEMM of integ over r2 (sec) = ', (wall_ttime1 - wall_ttime0) + + + call wall_time(wall_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint) & + !$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + c_mat(ipoint,k,i) = final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,k) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time of Hermitian part (sec) = ', (wall_ttime1 - wall_ttime0) + + + call wall_time(wall_ttime0) + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , int2_grad1_u12_ao(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 0.d0, int_2e_ao(1,1,1,1), ao_num*ao_num) + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for DGEMM of Hermitian part (sec) = ', (wall_ttime1 - wall_ttime0) + + + tt1 = 0.d0 + tt2 = 0.d0 + do m = 1, 3 + + call wall_time(wall_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & + !$OMP SHARED (aos_in_r_array_transp, aos_grad_in_r_array_transp_bis, c_mat, & + !$OMP ao_num, n_points_final_grid, final_weight_at_r_vector, m) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + + weight1 = final_weight_at_r_vector(ipoint) + ao_i_r = aos_in_r_array_transp(ipoint,i) + ao_k_r = aos_in_r_array_transp(ipoint,k) + + c_mat(ipoint,k,i) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,m) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,m)) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(wall_ttime1) + tt1 += wall_ttime1 - wall_ttime0 + + call wall_time(wall_ttime0) + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -0.5d0 & + , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 1.d0, int_2e_ao(1,1,1,1), ao_num*ao_num) + call wall_time(wall_ttime1) + tt2 += wall_ttime1 - wall_ttime0 + enddo + write(*,"(A,2X,F15.7)") ' wall time of non-Hermitian part (sec) = ', tt1 + write(*,"(A,2X,F15.7)") ' wall time for DGEMM of non Hermitian part (sec) = ', tt2 + + + call wall_time(wall_ttime0) + call sum_A_At(int_2e_ao(1,1,1,1), ao_num*ao_num) + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time of A + A.T (sec) = ', wall_ttime1 - wall_ttime0 + + + call wall_time(wall_time1) + write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (wall_time1 - wall_time0) + + ! --- + + acc_thr = 1d-12 + + print *, ' precision on int2_grad1_u12_ao ' + err_tot = 0.d0 + nrm_tot = 0.d0 + do m = 1, 3 + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m)) + if(err_loc > acc_thr) then + print*, " error on", i, j, ipoint, m + print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m) + print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on int2_grad1_u12_ao (%) =', 100.d0 * err_tot / nrm_tot + + + print *, ' precision on int_2e_ao ' + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, ao_num + do j = 1, ao_num + do k = 1, ao_num + do l = 1, ao_num + err_loc = dabs(int_2e_ao(l,k,j,i) - int_2e_ao_gpu(l,k,j,i)) + if(err_loc > acc_thr) then + print*, " error on", l, k, j, i + print*, " CPU res", int_2e_ao (l,k,j,i) + print*, " GPU res", int_2e_ao_gpu(l,k,j,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(int_2e_ao(l,k,j,i)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on int_2e_ao (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + deallocate(int_fct_long_range, grad1_u12, c_mat) + deallocate(int_2e_ao, int2_grad1_u12_ao) + deallocate(int_2e_ao_gpu, int2_grad1_u12_ao_gpu) + deallocate(rn, aos_data1, aos_data2) + + call wall_time(time1) + write(*,"(A,2X,F15.7)") ' wall time for deb_int_2e_ao_gpu (sec) = ', (time1 - time0) + + return +end diff --git a/plugins/local/tc_int/deb_no_2e_gpu.irp.f b/plugins/local/tc_int/deb_no_2e_gpu.irp.f new file mode 100644 index 00000000..2be53ddd --- /dev/null +++ b/plugins/local/tc_int/deb_no_2e_gpu.irp.f @@ -0,0 +1,418 @@ + +! --- + +subroutine deb_no_2e_gpu() + + use cutc_module + + implicit none + + integer :: i, j, k, l, ipoint + double precision :: acc_thr, err_tot, nrm_tot, err_loc + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: tmp(:,:,:,:) + double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) + double precision, allocatable :: noL_2e(:,:,:,:) + double precision, allocatable :: noL_2e_gpu(:,:,:,:) + + + PROVIDE mo_l_coef mo_r_coef + PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp + + + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) + print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") + read(11) int2_grad1_u12_ao + close(11) + + allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) + allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(int2_grad1_u12_ao) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) + !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + do i = 1, mo_num + do j = 1, mo_num + int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) + int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) + int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmp) + + ! --- + + allocate(noL_2e_gpu(mo_num,mo_num,mo_num,mo_num)) + + call cutc_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_2e_gpu(1,1,1,1)) + + ! --- + + allocate(noL_2e(mo_num,mo_num,mo_num,mo_num)) + + call provide_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_2e(1,1,1,1)) + + ! --- + + deallocate(int2_grad1_u12_bimo_t) + + acc_thr = 1d-12 + + print *, ' precision on noL_2e ' + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do j = 1, mo_num + do k = 1, mo_num + do l = 1, mo_num + err_loc = dabs(noL_2e(l,k,j,i) - noL_2e_gpu(l,k,j,i)) + if(err_loc > acc_thr) then + print*, " error on", l, k, j, i + print*, " CPU res", noL_2e (l,k,j,i) + print*, " GPU res", noL_2e_gpu(l,k,j,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(noL_2e(l,k,j,i)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on noL_2e (%) =', 100.d0 * err_tot / nrm_tot + + deallocate(noL_2e) + deallocate(noL_2e_gpu) + + + return + +end + +! --- + +subroutine deb_no_2e_gpu_tmp() + + use cutc_module + + implicit none + + integer :: i, j, k, l, m, ipoint + double precision :: acc_thr, err_tot, nrm_tot, err_loc + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: tmp(:,:,:,:) + double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) + double precision, allocatable :: tmpO(:), tmpO_gpu(:) + double precision, allocatable :: tmpJ(:,:), tmpJ_gpu(:,:) + double precision, allocatable :: tmpA(:,:,:), tmpA_gpu(:,:,:) + double precision, allocatable :: tmpB(:,:,:), tmpB_gpu(:,:,:) + double precision, allocatable :: tmpC(:,:,:,:), tmpC_gpu(:,:,:,:) + double precision, allocatable :: tmpD(:,:,:,:), tmpD_gpu(:,:,:,:) + double precision, allocatable :: tmpE(:,:,:,:), tmpE_gpu(:,:,:,:) + double precision, allocatable :: noL_2e(:,:,:,:), noL_2e_gpu(:,:,:,:) + + + PROVIDE mo_l_coef mo_r_coef + PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp + + + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) + print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") + read(11) int2_grad1_u12_ao + close(11) + + allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) + allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(int2_grad1_u12_ao) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) + !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + do i = 1, mo_num + do j = 1, mo_num + int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) + int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) + int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmp) + + ! --- + + allocate(tmpO_gpu(n_points_final_grid)) + allocate(tmpJ_gpu(n_points_final_grid,3)) + allocate(tmpA_gpu(n_points_final_grid,3,mo_num)) + allocate(tmpB_gpu(n_points_final_grid,3,mo_num)) + allocate(tmpC_gpu(n_points_final_grid,4,mo_num,mo_num)) + allocate(tmpD_gpu(n_points_final_grid,4,mo_num,mo_num)) + allocate(tmpE_gpu(mo_num,mo_num,mo_num,mo_num)) + allocate(noL_2e_gpu(mo_num,mo_num,mo_num,mo_num)) + + call deb_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), & + tmpO_gpu(1), tmpJ_gpu(1,1), tmpA_gpu(1,1,1), tmpB_gpu(1,1,1), & + tmpC_gpu(1,1,1,1), tmpD_gpu(1,1,1,1), tmpE_gpu(1,1,1,1), & + noL_2e_gpu(1,1,1,1)) + + ! --- + + allocate(tmpO(n_points_final_grid)) + allocate(tmpJ(n_points_final_grid,3)) + allocate(tmpA(n_points_final_grid,3,mo_num)) + allocate(tmpB(n_points_final_grid,3,mo_num)) + allocate(tmpC(n_points_final_grid,4,mo_num,mo_num)) + allocate(tmpD(n_points_final_grid,4,mo_num,mo_num)) + allocate(tmpE(mo_num,mo_num,mo_num,mo_num)) + allocate(noL_2e(mo_num,mo_num,mo_num,mo_num)) + + call provide_no_2e_tmp(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), & + tmpO(1), tmpJ(1,1), tmpA(1,1,1), tmpB(1,1,1), & + tmpC(1,1,1,1), tmpD(1,1,1,1), tmpE(1,1,1,1), & + noL_2e(1,1,1,1)) + + ! --- + + deallocate(int2_grad1_u12_bimo_t) + + acc_thr = 1d-12 + + ! --- + + err_tot = 0.d0 + nrm_tot = 0.d0 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpO(ipoint) - tmpO_gpu(ipoint)) + if(err_loc > acc_thr) then + print*, " error on", ipoint + print*, " CPU res", tmpO (ipoint) + print*, " GPU res", tmpO_gpu(ipoint) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpO(ipoint)) + enddo + print *, ' absolute accuracy on tmpO (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + err_tot = 0.d0 + nrm_tot = 0.d0 + do m = 1, 3 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpJ(ipoint,m) - tmpJ_gpu(ipoint,m)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m + print*, " CPU res", tmpJ (ipoint,m) + print*, " GPU res", tmpJ_gpu(ipoint,m) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpJ(ipoint,m)) + enddo + enddo + print *, ' absolute accuracy on tmpJ (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do m = 1, 3 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpA(ipoint,m,i) - tmpA_gpu(ipoint,m,i)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m, i + print*, " CPU res", tmpA (ipoint,m,i) + print*, " GPU res", tmpA_gpu(ipoint,m,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpA(ipoint,m,i)) + enddo + enddo + enddo + print *, ' absolute accuracy on tmpA (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do m = 1, 3 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpB(ipoint,m,i) - tmpB_gpu(ipoint,m,i)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m, i + print*, " CPU res", tmpB (ipoint,m,i) + print*, " GPU res", tmpB_gpu(ipoint,m,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpB(ipoint,m,i)) + enddo + enddo + enddo + print *, ' absolute accuracy on tmpB (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do j = 1, mo_num + do m = 1, 3 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpC(ipoint,m,i,j) - tmpC_gpu(ipoint,m,i,j)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m, i, j + print*, " CPU res", tmpC (ipoint,m,i,j) + print*, " GPU res", tmpC_gpu(ipoint,m,i,j) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpC(ipoint,m,i,j)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on tmpC (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do j = 1, mo_num + do m = 1, 3 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpD(ipoint,m,i,j) - tmpD_gpu(ipoint,m,i,j)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m, i, j + print*, " CPU res", tmpD (ipoint,m,i,j) + print*, " GPU res", tmpD_gpu(ipoint,m,i,j) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpD(ipoint,m,i,j)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on tmpD (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do j = 1, mo_num + do k = 1, mo_num + do l = 1, mo_num + err_loc = dabs(tmpE(l,k,j,i) - tmpE_gpu(l,k,j,i)) + if(err_loc > acc_thr) then + print*, " error on", l, k, j, i + print*, " CPU res", tmpE (l,k,j,i) + print*, " GPU res", tmpE_gpu(l,k,j,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpE(l,k,j,i)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on tmpE (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do j = 1, mo_num + do k = 1, mo_num + do l = 1, mo_num + err_loc = dabs(noL_2e(l,k,j,i) - noL_2e_gpu(l,k,j,i)) + if(err_loc > acc_thr) then + print*, " error on", l, k, j, i + print*, " CPU res", noL_2e (l,k,j,i) + print*, " GPU res", noL_2e_gpu(l,k,j,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(noL_2e(l,k,j,i)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on noL_2e (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + deallocate(tmpO, tmpO_gpu) + deallocate(tmpJ, tmpJ_gpu) + deallocate(tmpA, tmpA_gpu) + deallocate(tmpB, tmpB_gpu) + deallocate(tmpC, tmpC_gpu) + deallocate(tmpD, tmpD_gpu) + deallocate(tmpE, tmpE_gpu) + deallocate(noL_2e, noL_2e_gpu) + + return +end + + diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f index 1a43141c..9da9ac95 100644 --- a/plugins/local/tc_int/deb_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/deb_tc_int_cuda.irp.f @@ -36,289 +36,12 @@ subroutine main() implicit none - call deb_int_2e_ao_gpu() + !call deb_int_2e_ao_gpu() + !call deb_no_2e_gpu_tmp() + call deb_no_2e_gpu() return end ! --- -subroutine deb_int_2e_ao_gpu() - - use cutc_module - - implicit none - - integer :: m - integer :: i, j, k, l - integer :: ipoint, jpoint - - double precision :: weight1, ao_i_r, ao_k_r - - double precision :: acc_thr, err_tot, nrm_tot, err_loc - - double precision :: time0, time1 - double precision :: cpu_time0, cpu_time1 - double precision :: cpu_ttime0, cpu_ttime1 - double precision :: tt1, tt2 - - double precision, allocatable :: rn(:,:), aos_data1(:,:,:), aos_data2(:,:,:) - double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:), c_mat(:,:,:) - double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) - double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:) - double precision, allocatable :: int_2e_ao(:,:,:,:) - double precision, allocatable :: int_2e_ao_gpu(:,:,:,:) - - - - call wall_time(time0) - print*, ' start deb_int_2e_ao_gpu' - - - ! --- - - allocate(rn(3,nucl_num)) - allocate(aos_data1(n_points_final_grid,ao_num,4)) - allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) - - do k = 1, nucl_num - rn(1,k) = nucl_coord(k,1) - rn(2,k) = nucl_coord(k,2) - rn(3,k) = nucl_coord(k,3) - enddo - - do k = 1, ao_num - do ipoint = 1, n_points_final_grid - aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint) - aos_data1(ipoint,k,2) = aos_grad_in_r_array(k,ipoint,1) - aos_data1(ipoint,k,3) = aos_grad_in_r_array(k,ipoint,2) - aos_data1(ipoint,k,4) = aos_grad_in_r_array(k,ipoint,3) - enddo - enddo - - do k = 1, ao_num - do ipoint = 1, n_points_extra_final_grid - aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) - aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) - aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) - aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) - enddo - enddo - - ! --- - - integer :: nB - integer :: sB - - PROVIDE nxBlocks nyBlocks nzBlocks - PROVIDE blockxSize blockySize blockzSize - - sB = 32 - nB = (n_points_final_grid + sB - 1) / sB - - call ezfio_set_tc_int_blockxSize(sB) - call ezfio_set_tc_int_nxBlocks(nB) - - allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,3)) - allocate(int_2e_ao_gpu(ao_num,ao_num,ao_num,ao_num)) - - call deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & - n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & - final_grid_points, final_weight_at_r_vector, & - final_grid_points_extra, final_weight_at_r_vector_extra, & - rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & - int2_grad1_u12_ao_gpu, int_2e_ao_gpu) - - ! --- - - allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) - allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) - allocate(c_mat(n_points_final_grid,ao_num,ao_num)) - allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) - allocate(int_2e_ao(ao_num,ao_num,ao_num,ao_num)) - - call wall_time(cpu_time0) - - call wall_time(cpu_ttime0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (j, i, jpoint) & - !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) - !$OMP DO SCHEDULE (static) - do j = 1, ao_num - do i = 1, ao_num - do jpoint = 1, n_points_extra_final_grid - int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - call wall_time(cpu_ttime0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) - !$OMP DO - do ipoint = 1, n_points_final_grid - call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & - , grad1_u12(1,ipoint,2) & - , grad1_u12(1,ipoint,3) & - , grad1_u12(1,ipoint,4) ) - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - call wall_time(cpu_ttime0) - do m = 1, 4 - call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & - , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & - , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) - enddo - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for DGEMM of integ over r2 (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - call wall_time(cpu_ttime0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, k, ipoint) & - !$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector) - !$OMP DO SCHEDULE (static) - do i = 1, ao_num - do k = 1, ao_num - do ipoint = 1, n_points_final_grid - c_mat(ipoint,k,i) = final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,k) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time of Hermitian part (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - call wall_time(cpu_ttime0) - call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & - , int2_grad1_u12_ao(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & - , 0.d0, int_2e_ao(1,1,1,1), ao_num*ao_num) - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time for DGEMM of Hermitian part (sec) = ', (cpu_ttime1 - cpu_ttime0) - - - tt1 = 0.d0 - tt2 = 0.d0 - do m = 1, 3 - - call wall_time(cpu_ttime0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & - !$OMP SHARED (aos_in_r_array_transp, aos_grad_in_r_array_transp_bis, c_mat, & - !$OMP ao_num, n_points_final_grid, final_weight_at_r_vector, m) - !$OMP DO SCHEDULE (static) - do i = 1, ao_num - do k = 1, ao_num - do ipoint = 1, n_points_final_grid - - weight1 = final_weight_at_r_vector(ipoint) - ao_i_r = aos_in_r_array_transp(ipoint,i) - ao_k_r = aos_in_r_array_transp(ipoint,k) - - c_mat(ipoint,k,i) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,m) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,m)) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(cpu_ttime1) - tt1 += cpu_ttime1 - cpu_ttime0 - - call wall_time(cpu_ttime0) - call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -0.5d0 & - , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & - , 1.d0, int_2e_ao(1,1,1,1), ao_num*ao_num) - call wall_time(cpu_ttime1) - tt2 += cpu_ttime1 - cpu_ttime0 - enddo - write(*,"(A,2X,F15.7)") ' wall time of non-Hermitian part (sec) = ', tt1 - write(*,"(A,2X,F15.7)") ' wall time for DGEMM of non Hermitian part (sec) = ', tt2 - - - call wall_time(cpu_ttime0) - call sum_A_At(int_2e_ao(1,1,1,1), ao_num*ao_num) - call wall_time(cpu_ttime1) - write(*,"(A,2X,F15.7)") ' wall time of A + A.T (sec) = ', cpu_ttime1 - cpu_ttime0 - - - call wall_time(cpu_time1) - write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (cpu_time1 - cpu_time0) - - ! --- - - acc_thr = 1d-12 - - print *, ' precision on int2_grad1_u12_ao ' - err_tot = 0.d0 - nrm_tot = 0.d0 - do m = 1, 3 - do ipoint = 1, n_points_final_grid - do j = 1, ao_num - do i = 1, ao_num - err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m)) - if(err_loc > acc_thr) then - print*, " error on", i, j, ipoint, m - print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m) - print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m)) - enddo - enddo - enddo - enddo - print *, ' absolute accuracy on int2_grad1_u12_ao (%) =', 100.d0 * err_tot / nrm_tot - - - print *, ' precision on int_2e_ao ' - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, ao_num - do j = 1, ao_num - do k = 1, ao_num - do l = 1, ao_num - err_loc = dabs(int_2e_ao(l,k,j,i) - int_2e_ao_gpu(l,k,j,i)) - if(err_loc > acc_thr) then - print*, " error on", l, k, j, i - print*, " CPU res", int_2e_ao (l,k,j,i) - print*, " GPU res", int_2e_ao_gpu(l,k,j,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(int_2e_ao(l,k,j,i)) - enddo - enddo - enddo - enddo - print *, ' absolute accuracy on int_2e_ao (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - deallocate(int_fct_long_range, grad1_u12, c_mat) - deallocate(int_2e_ao, int2_grad1_u12_ao) - deallocate(int_2e_ao_gpu, int2_grad1_u12_ao_gpu) - deallocate(rn, aos_data1, aos_data2) - - call wall_time(time1) - write(*,"(A,2X,F15.7)") ' wall time for deb_int_2e_ao_gpu (sec) = ', (time1 - time0) - - return -end diff --git a/plugins/local/tc_int/install b/plugins/local/tc_int/install index 34e56b4a..be779c3a 100755 --- a/plugins/local/tc_int/install +++ b/plugins/local/tc_int/install @@ -14,5 +14,5 @@ source config/env.rc make cd .. -ln -s ${PWD}/CuTC/build/libtc_int_cu.so ${QP_ROOT}/lib +ln -s ${PWD}/CuTC/build/libcutcint.so ${QP_ROOT}/lib diff --git a/plugins/local/tc_int/no_0e.irp.f b/plugins/local/tc_int/no_0e.irp.f new file mode 100644 index 00000000..b945e0dd --- /dev/null +++ b/plugins/local/tc_int/no_0e.irp.f @@ -0,0 +1,412 @@ + +! --- + +subroutine provide_no_0e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, noL_0e) + + BEGIN_DOC + ! + ! < Phi_left | L | Phi_right > + ! + END_DOC + + implicit none + + integer, intent(in) :: n_grid, n_mo + integer, intent(in) :: ne_a, ne_b + double precision, intent(in) :: wr1(n_grid) + double precision, intent(in) :: mos_l_in_r(n_grid,n_mo) + double precision, intent(in) :: mos_r_in_r(n_grid,n_mo) + double precision, intent(in) :: int2_grad1_u12(n_grid,3,n_mo,n_mo) + double precision, intent(out) :: noL_0e + + integer :: i, j, k, ipoint + double precision :: t0, t1 + double precision, allocatable :: tmp(:) + double precision, allocatable :: tmp_L(:,:), tmp_R(:,:) + double precision, allocatable :: tmp_M(:,:), tmp_S(:), tmp_O(:), tmp_J(:,:) + double precision, allocatable :: tmp_M_priv(:,:), tmp_S_priv(:), tmp_O_priv(:), tmp_J_priv(:,:) + + + if(ne_a .eq. ne_b) then + + allocate(tmp(ne_b)) + allocate(tmp_L(n_grid,3), tmp_R(n_grid,3)) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(j, i, ipoint, tmp_L, tmp_R) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, wr1, & + !$OMP int2_grad1_u12, tmp) + + !$OMP DO + do j = 1, ne_b + + tmp_L = 0.d0 + tmp_R = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + + tmp_L(ipoint,1) = tmp_L(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,2) = tmp_L(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,3) = tmp_L(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) + + tmp_R(ipoint,1) = tmp_R(ipoint,1) + int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,2) = tmp_R(ipoint,2) + int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,3) = tmp_R(ipoint,3) + int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) + enddo + enddo + + tmp(j) = 0.d0 + do ipoint = 1, n_grid + tmp(j) = tmp(j) + wr1(ipoint) * (tmp_L(ipoint,1)*tmp_R(ipoint,1) + tmp_L(ipoint,2)*tmp_R(ipoint,2) + tmp_L(ipoint,3)*tmp_R(ipoint,3)) + enddo + enddo ! j + !$OMP END DO + !$OMP END PARALLEL + + noL_0e = -2.d0 * sum(tmp) + + deallocate(tmp) + deallocate(tmp_L, tmp_R) + + ! --- + + allocate(tmp_O(n_grid), tmp_J(n_grid,3)) + tmp_O = 0.d0 + tmp_J = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, ipoint, tmp_O_priv, tmp_J_priv) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp_O, tmp_J) + + allocate(tmp_O_priv(n_grid), tmp_J_priv(n_grid,3)) + tmp_O_priv = 0.d0 + tmp_J_priv = 0.d0 + + !$OMP DO + do i = 1, ne_b + do ipoint = 1, n_grid + tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) + tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) + tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmp_O = tmp_O + tmp_O_priv + tmp_J = tmp_J + tmp_J_priv + !$OMP END CRITICAL + + deallocate(tmp_O_priv, tmp_J_priv) + !$OMP END PARALLEL + + allocate(tmp_M(n_grid,3), tmp_S(n_grid)) + tmp_M = 0.d0 + tmp_S = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, j, ipoint, tmp_M_priv, tmp_S_priv) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp_M, tmp_S) + + allocate(tmp_M_priv(n_grid,3), tmp_S_priv(n_grid)) + tmp_M_priv = 0.d0 + tmp_S_priv = 0.d0 + + !$OMP DO COLLAPSE(2) + do i = 1, ne_b + do j = 1, ne_b + do ipoint = 1, n_grid + + tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmp_M = tmp_M + tmp_M_priv + tmp_S = tmp_S + tmp_S_priv + !$OMP END CRITICAL + + deallocate(tmp_M_priv, tmp_S_priv) + !$OMP END PARALLEL + + allocate(tmp(n_grid)) + + do ipoint = 1, n_grid + + tmp_S(ipoint) = 2.d0 * (tmp_J(ipoint,1)*tmp_J(ipoint,1) + tmp_J(ipoint,2)*tmp_J(ipoint,2) + tmp_J(ipoint,3)*tmp_J(ipoint,3)) - tmp_S(ipoint) + + tmp(ipoint) = wr1(ipoint) * ( tmp_O(ipoint) * tmp_S(ipoint) & + - 2.d0 * ( tmp_J(ipoint,1) * tmp_M(ipoint,1) & + + tmp_J(ipoint,2) * tmp_M(ipoint,2) & + + tmp_J(ipoint,3) * tmp_M(ipoint,3))) + enddo + + noL_0e = noL_0e -2.d0 * (sum(tmp)) + + deallocate(tmp) + + else + + allocate(tmp(ne_a)) + allocate(tmp_L(n_grid,3), tmp_R(n_grid,3)) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(j, i, ipoint, tmp_L, tmp_R) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp, wr1) + + !$OMP DO + do j = 1, ne_b + + tmp_L = 0.d0 + tmp_R = 0.d0 + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + + tmp_L(ipoint,1) = tmp_L(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,2) = tmp_L(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,3) = tmp_L(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) + + tmp_R(ipoint,1) = tmp_R(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,2) = tmp_R(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,3) = tmp_R(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) + enddo + enddo + + tmp(j) = 0.d0 + do ipoint = 1, n_grid + tmp(j) = tmp(j) + wr1(ipoint) * (tmp_L(ipoint,1)*tmp_R(ipoint,1) + tmp_L(ipoint,2)*tmp_R(ipoint,2) + tmp_L(ipoint,3)*tmp_R(ipoint,3)) + enddo + + do i = 1, ne_b + do ipoint = 1, n_grid + + tmp_L(ipoint,1) = tmp_L(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,2) = tmp_L(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,3) = tmp_L(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) + + tmp_R(ipoint,1) = tmp_R(ipoint,1) + int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,2) = tmp_R(ipoint,2) + int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,3) = tmp_R(ipoint,3) + int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) + enddo + enddo + + do ipoint = 1, n_grid + tmp(j) = tmp(j) + wr1(ipoint) * (tmp_L(ipoint,1)*tmp_R(ipoint,1) + tmp_L(ipoint,2)*tmp_R(ipoint,2) + tmp_L(ipoint,3)*tmp_R(ipoint,3)) + enddo + enddo ! j + !$OMP END DO + !$OMP END PARALLEL + + ! --- + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(j, i, ipoint, tmp_L, tmp_R) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp, wr1) + + !$OMP DO + do j = ne_b+1, ne_a + + tmp_L = 0.d0 + tmp_R = 0.d0 + do i = 1, ne_a + do ipoint = 1, n_grid + tmp_L(ipoint,1) = tmp_L(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,2) = tmp_L(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,3) = tmp_L(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) + + tmp_R(ipoint,1) = tmp_R(ipoint,1) + int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,2) = tmp_R(ipoint,2) + int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,3) = tmp_R(ipoint,3) + int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) + enddo + enddo + + tmp(j) = 0.d0 + do ipoint = 1, n_grid + tmp(j) = tmp(j) + 0.5d0 * wr1(ipoint) * (tmp_L(ipoint,1)*tmp_R(ipoint,1) + tmp_L(ipoint,2)*tmp_R(ipoint,2) + tmp_L(ipoint,3)*tmp_R(ipoint,3)) + enddo + enddo ! j + !$OMP END DO + !$OMP END PARALLEL + + noL_0e = -2.d0 * sum(tmp) + + deallocate(tmp) + deallocate(tmp_L, tmp_R) + + ! --- + + allocate(tmp_O(n_grid), tmp_J(n_grid,3)) + tmp_O = 0.d0 + tmp_J = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, ipoint, tmp_O_priv, tmp_J_priv) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp_O, tmp_J) + + allocate(tmp_O_priv(n_grid), tmp_J_priv(n_grid,3)) + tmp_O_priv = 0.d0 + tmp_J_priv = 0.d0 + + !$OMP DO + do i = 1, ne_b + do ipoint = 1, n_grid + tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) + tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) + tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP DO + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + 0.5d0 * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,i) + tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,i) + tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmp_O = tmp_O + tmp_O_priv + tmp_J = tmp_J + tmp_J_priv + !$OMP END CRITICAL + + deallocate(tmp_O_priv, tmp_J_priv) + !$OMP END PARALLEL + + ! --- + + allocate(tmp_M(n_grid,3), tmp_S(n_grid)) + tmp_M = 0.d0 + tmp_S = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, j, ipoint, tmp_M_priv, tmp_S_priv) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp_M, tmp_S) + + allocate(tmp_M_priv(n_grid,3), tmp_S_priv(n_grid)) + tmp_M_priv = 0.d0 + tmp_S_priv = 0.d0 + + !$OMP DO COLLAPSE(2) + do i = 1, ne_b + do j = 1, ne_b + do ipoint = 1, n_grid + + tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP DO COLLAPSE(2) + do i = ne_b+1, ne_a + do j = 1, ne_b + do ipoint = 1, n_grid + + tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + + tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP DO COLLAPSE(2) + do i = ne_b+1, ne_a + do j = ne_b+1, ne_a + do ipoint = 1, n_grid + + tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmp_M = tmp_M + tmp_M_priv + tmp_S = tmp_S + tmp_S_priv + !$OMP END CRITICAL + + deallocate(tmp_M_priv, tmp_S_priv) + !$OMP END PARALLEL + + allocate(tmp(n_grid)) + + do ipoint = 1, n_grid + + tmp_S(ipoint) = 2.d0 * (tmp_J(ipoint,1)*tmp_J(ipoint,1) + tmp_J(ipoint,2)*tmp_J(ipoint,2) + tmp_J(ipoint,3)*tmp_J(ipoint,3)) - tmp_S(ipoint) + + tmp(ipoint) = wr1(ipoint) * ( tmp_O(ipoint) * tmp_S(ipoint) & + - 2.d0 * ( tmp_J(ipoint,1) * tmp_M(ipoint,1) & + + tmp_J(ipoint,2) * tmp_M(ipoint,2) & + + tmp_J(ipoint,3) * tmp_M(ipoint,3))) + enddo + + noL_0e = noL_0e -2.d0 * (sum(tmp)) + + deallocate(tmp) + + endif + + + call wall_time(t1) + write(*,"(A,2X,F15.7)") ' wall time for noL_0e (sec) = ', (t1 - t0) + + return +end + +! --- + diff --git a/plugins/local/tc_int/no_1e.irp.f b/plugins/local/tc_int/no_1e.irp.f new file mode 100644 index 00000000..3a990276 --- /dev/null +++ b/plugins/local/tc_int/no_1e.irp.f @@ -0,0 +1,602 @@ + +! --- + +subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, noL_1e) + + implicit none + + integer, intent(in) :: n_grid, n_mo + integer, intent(in) :: ne_a, ne_b + double precision, intent(in) :: wr1(n_grid) + double precision, intent(in) :: mos_l_in_r(n_grid,n_mo) + double precision, intent(in) :: mos_r_in_r(n_grid,n_mo) + double precision, intent(in) :: int2_grad1_u12(n_grid,3,n_mo,n_mo) + double precision, intent(out) :: noL_1e(n_mo,n_mo) + + integer :: p, s, i, j, ipoint + double precision :: t0, t1 + double precision, allocatable :: tmp1(:,:,:,:), tmp2(:,:), tmp3(:,:,:), tmp4(:,:,:) + double precision, allocatable :: tmp_L(:,:,:), tmp_R(:,:,:), tmp_M(:,:), tmp_S(:), tmp_O(:), tmp_J(:,:) + double precision, allocatable :: tmp_L0(:,:,:), tmp_R0(:,:,:) + double precision, allocatable :: tmp_M_priv(:,:), tmp_S_priv(:), tmp_O_priv(:), tmp_J_priv(:,:) + + + call wall_time(t0) + + + if(ne_a .eq. ne_b) then + + allocate(tmp_O(n_grid), tmp_J(n_grid,3)) + tmp_O = 0.d0 + tmp_J = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, ipoint, tmp_O_priv, tmp_J_priv) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp_O, tmp_J) + + allocate(tmp_O_priv(n_grid), tmp_J_priv(n_grid,3)) + tmp_O_priv = 0.d0 + tmp_J_priv = 0.d0 + + !$OMP DO + do i = 1, ne_b + do ipoint = 1, n_grid + tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) + tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) + tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmp_O = tmp_O + tmp_O_priv + tmp_J = tmp_J + tmp_J_priv + !$OMP END CRITICAL + + deallocate(tmp_O_priv, tmp_J_priv) + !$OMP END PARALLEL + + ! --- + + allocate(tmp_M(n_grid,3), tmp_S(n_grid)) + tmp_M = 0.d0 + tmp_S = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, j, ipoint, tmp_M_priv, tmp_S_priv) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp_M, tmp_S) + + allocate(tmp_M_priv(n_grid,3), tmp_S_priv(n_grid)) + tmp_M_priv = 0.d0 + tmp_S_priv = 0.d0 + + !$OMP DO COLLAPSE(2) + do i = 1, ne_b + do j = 1, ne_b + do ipoint = 1, n_grid + + tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmp_M = tmp_M + tmp_M_priv + tmp_S = tmp_S + tmp_S_priv + !$OMP END CRITICAL + + deallocate(tmp_M_priv, tmp_S_priv) + !$OMP END PARALLEL + + ! --- + + allocate(tmp2(n_grid,4)) + allocate(tmp1(n_grid,4,mo_num,mo_num)) + + do ipoint = 1, n_grid + + tmp2(ipoint,1) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,1) - tmp_M(ipoint,1)) + tmp2(ipoint,2) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,2) - tmp_M(ipoint,2)) + tmp2(ipoint,3) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,3) - tmp_M(ipoint,3)) + tmp2(ipoint,4) = -wr1(ipoint) * tmp_O(ipoint) + + tmp_S(ipoint) = 2.d0 * (tmp_J(ipoint,1) * tmp_J(ipoint,1) + tmp_J(ipoint,2) * tmp_J(ipoint,2) + tmp_J(ipoint,3) * tmp_J(ipoint,3)) - tmp_S(ipoint) + enddo + + deallocate(tmp_O, tmp_M) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, s, i, ipoint) & + !$OMP SHARED(mo_num, ne_b, n_grid, & + !$OMP int2_grad1_u12, tmp1) + + !$OMP DO COLLAPSE(2) + do s = 1, mo_num + do p = 1, mo_num + + do ipoint = 1, n_grid + tmp1(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) + tmp1(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) + tmp1(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) + enddo + + tmp1(:,4,p,s) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmp1(ipoint,4,p,s) = tmp1(ipoint,4,p,s) + int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo + enddo + + enddo ! p + enddo ! s + !$OMP END DO + !$OMP END PARALLEL + + call dgemv( 'T', 4*n_grid, mo_num*mo_num, 2.d0 & + , tmp1(1,1,1,1), size(tmp1, 1) * size(tmp1, 2) & + , tmp2(1,1), 1 & + , 0.d0, noL_1e(1,1), 1) + + deallocate(tmp1, tmp2) + + ! --- + + allocate(tmp_L(n_grid,3,mo_num)) + allocate(tmp_R(n_grid,3,mo_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, ipoint) & + !$OMP SHARED(ne_b, n_grid, mo_num, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp_L, tmp_R) + + !$OMP DO + do p = 1, mo_num + + tmp_L(:,1:3,p) = 0.d0 + tmp_R(:,1:3,p) = 0.d0 + + do i = 1, ne_b + do ipoint = 1, n_grid + + tmp_L(ipoint,1,p) = tmp_L(ipoint,1,p) + int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,2,p) = tmp_L(ipoint,2,p) + int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,3,p) = tmp_L(ipoint,3,p) + int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) + + tmp_R(ipoint,1,p) = tmp_R(ipoint,1,p) + int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,2,p) = tmp_R(ipoint,2,p) + int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,3,p) = tmp_R(ipoint,3,p) + int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) + enddo + enddo + enddo ! p + !$OMP END DO + !$OMP END PARALLEL + + ! --- + + allocate(tmp3(n_grid,5,mo_num)) + allocate(tmp4(n_grid,5,mo_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, j, ipoint) & + !$OMP SHARED(ne_b, n_grid, mo_num, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, wr1, & + !$OMP tmp_L, tmp_R, tmp_J, tmp_S, tmp3, tmp4) + + !$OMP DO + do p = 1, mo_num + + do ipoint = 1, n_grid + + tmp3(ipoint,1,p) = wr1(ipoint) * mos_l_in_r(ipoint,p) + tmp3(ipoint,2,p) = -2.d0 * (tmp_L(ipoint,1,p) * tmp_J(ipoint,1) + tmp_L(ipoint,2,p) * tmp_J(ipoint,2) + tmp_L(ipoint,3,p) * tmp_J(ipoint,3)) + tmp3(ipoint,3,p) = wr1(ipoint) * tmp_L(ipoint,1,p) + tmp3(ipoint,4,p) = wr1(ipoint) * tmp_L(ipoint,2,p) + tmp3(ipoint,5,p) = wr1(ipoint) * tmp_L(ipoint,3,p) + + tmp4(ipoint,1,p) = -2.d0 * (tmp_R(ipoint,1,p) * tmp_J(ipoint,1) + tmp_R(ipoint,2,p) * tmp_J(ipoint,2) + tmp_R(ipoint,3,p) * tmp_J(ipoint,3)) & + + mos_r_in_r(ipoint,p) * tmp_S(ipoint) + tmp4(ipoint,2,p) = wr1(ipoint) * mos_r_in_r(ipoint,p) + tmp4(ipoint,3,p) = tmp_R(ipoint,1,p) + tmp4(ipoint,4,p) = tmp_R(ipoint,2,p) + tmp4(ipoint,5,p) = tmp_R(ipoint,3,p) + enddo + + do i = 1, ne_b + do j = 1, ne_b + do ipoint = 1, n_grid + + tmp3(ipoint,2,p) = tmp3(ipoint,2,p) + mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) + + tmp4(ipoint,1,p) = tmp4(ipoint,1,p) + mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) + enddo ! ipoint + enddo ! j + enddo ! i + + enddo ! p + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmp_L, tmp_R, tmp_J, tmp_S) + + call dgemm( 'T', 'N', mo_num, mo_num, 5*n_grid, 1.d0 & + , tmp3(1,1,1), 5*n_grid, tmp4(1,1,1), 5*n_grid & + , 1.d0, noL_1e(1,1), mo_num) + + deallocate(tmp3, tmp4) + + ! --- + + else + + allocate(tmp_O(n_grid), tmp_J(n_grid,3)) + tmp_O = 0.d0 + tmp_J = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, ipoint, tmp_O_priv, tmp_J_priv) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp_O, tmp_J) + + allocate(tmp_O_priv(n_grid), tmp_J_priv(n_grid,3)) + tmp_O_priv = 0.d0 + tmp_J_priv = 0.d0 + + !$OMP DO + do i = 1, ne_b + do ipoint = 1, n_grid + tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) + tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) + tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP DO + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + 0.5d0 * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,i) + tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,i) + tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmp_O = tmp_O + tmp_O_priv + tmp_J = tmp_J + tmp_J_priv + !$OMP END CRITICAL + + deallocate(tmp_O_priv, tmp_J_priv) + !$OMP END PARALLEL + + ! --- + + allocate(tmp_M(n_grid,3), tmp_S(n_grid)) + tmp_M = 0.d0 + tmp_S = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, j, ipoint, tmp_M_priv, tmp_S_priv) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp_M, tmp_S) + + allocate(tmp_M_priv(n_grid,3), tmp_S_priv(n_grid)) + tmp_M_priv = 0.d0 + tmp_S_priv = 0.d0 + + !$OMP DO COLLAPSE(2) + do i = 1, ne_b + do j = 1, ne_b + do ipoint = 1, n_grid + + tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP DO COLLAPSE(2) + do i = ne_b+1, ne_a + do j = 1, ne_b + do ipoint = 1, n_grid + + tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + + tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP DO COLLAPSE(2) + do i = ne_b+1, ne_a + do j = ne_b+1, ne_a + do ipoint = 1, n_grid + + tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmp_M = tmp_M + tmp_M_priv + tmp_S = tmp_S + tmp_S_priv + !$OMP END CRITICAL + + deallocate(tmp_M_priv, tmp_S_priv) + !$OMP END PARALLEL + + ! --- + + allocate(tmp2(n_grid,4)) + allocate(tmp1(n_grid,4,mo_num,mo_num)) + + do ipoint = 1, n_grid + + tmp2(ipoint,1) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,1) - tmp_M(ipoint,1)) + tmp2(ipoint,2) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,2) - tmp_M(ipoint,2)) + tmp2(ipoint,3) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,3) - tmp_M(ipoint,3)) + tmp2(ipoint,4) = -wr1(ipoint) * tmp_O(ipoint) + + tmp_S(ipoint) = 2.d0 * (tmp_J(ipoint,1) * tmp_J(ipoint,1) + tmp_J(ipoint,2) * tmp_J(ipoint,2) + tmp_J(ipoint,3) * tmp_J(ipoint,3)) - tmp_S(ipoint) + enddo + + deallocate(tmp_O, tmp_M) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, s, i, ipoint) & + !$OMP SHARED(mo_num, ne_b, n_grid, & + !$OMP ne_a, int2_grad1_u12, tmp1) + + !$OMP DO COLLAPSE(2) + do s = 1, mo_num + do p = 1, mo_num + + do ipoint = 1, n_grid + tmp1(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) + tmp1(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) + tmp1(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) + enddo + + tmp1(:,4,p,s) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmp1(ipoint,4,p,s) = tmp1(ipoint,4,p,s) + int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo + enddo + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmp1(ipoint,4,p,s) = tmp1(ipoint,4,p,s) + 0.5d0 * int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + 0.5d0 * int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + 0.5d0 * int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo + enddo + + enddo ! p + enddo ! s + !$OMP END DO + !$OMP END PARALLEL + + call dgemv( 'T', 4*n_grid, mo_num*mo_num, 2.d0 & + , tmp1(1,1,1,1), size(tmp1, 1) * size(tmp1, 2) & + , tmp2(1,1), 1 & + , 0.d0, noL_1e(1,1), 1) + + deallocate(tmp1, tmp2) + + ! --- + + allocate(tmp_L(n_grid,3,mo_num), tmp_L0(n_grid,3,mo_num)) + allocate(tmp_R(n_grid,3,mo_num), tmp_R0(n_grid,3,mo_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, ipoint) & + !$OMP SHARED(ne_b, ne_a, n_grid, mo_num, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmp_L0, tmp_R0, tmp_L, tmp_R) + + !$OMP DO + do p = 1, mo_num + + tmp_L0(:,1:3,p) = 0.d0 + tmp_R0(:,1:3,p) = 0.d0 + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + + tmp_L0(ipoint,1,p) = tmp_L0(ipoint,1,p) + 0.5d0 * int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) + tmp_L0(ipoint,2,p) = tmp_L0(ipoint,2,p) + 0.5d0 * int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) + tmp_L0(ipoint,3,p) = tmp_L0(ipoint,3,p) + 0.5d0 * int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) + + tmp_R0(ipoint,1,p) = tmp_R0(ipoint,1,p) + 0.5d0 * int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) + tmp_R0(ipoint,2,p) = tmp_R0(ipoint,2,p) + 0.5d0 * int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) + tmp_R0(ipoint,3,p) = tmp_R0(ipoint,3,p) + 0.5d0 * int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) + enddo + enddo + + tmp_L(:,1:3,p) = tmp_L0(:,1:3,p) + tmp_R(:,1:3,p) = tmp_R0(:,1:3,p) + do i = 1, ne_b + do ipoint = 1, n_grid + + tmp_L(ipoint,1,p) = tmp_L(ipoint,1,p) + int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,2,p) = tmp_L(ipoint,2,p) + int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) + tmp_L(ipoint,3,p) = tmp_L(ipoint,3,p) + int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) + + tmp_R(ipoint,1,p) = tmp_R(ipoint,1,p) + int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,2,p) = tmp_R(ipoint,2,p) + int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) + tmp_R(ipoint,3,p) = tmp_R(ipoint,3,p) + int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) + enddo + enddo + + enddo ! p + !$OMP END DO + !$OMP END PARALLEL + + ! --- + + allocate(tmp3(n_grid,8,mo_num)) + allocate(tmp4(n_grid,8,mo_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, j, ipoint) & + !$OMP SHARED(ne_b, ne_a, n_grid, mo_num, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, wr1, & + !$OMP tmp_L, tmp_L0, tmp_R, tmp_R0, tmp_J, tmp_S, tmp3, tmp4) + + !$OMP DO + do p = 1, mo_num + + do ipoint = 1, n_grid + + tmp3(ipoint,1,p) = wr1(ipoint) * mos_l_in_r(ipoint,p) + tmp3(ipoint,2,p) = -2.d0 * (tmp_L(ipoint,1,p) * tmp_J(ipoint,1) + tmp_L(ipoint,2,p) * tmp_J(ipoint,2) + tmp_L(ipoint,3,p) * tmp_J(ipoint,3)) + tmp3(ipoint,3,p) = wr1(ipoint) * tmp_L(ipoint,1,p) + tmp3(ipoint,4,p) = wr1(ipoint) * tmp_L(ipoint,2,p) + tmp3(ipoint,5,p) = wr1(ipoint) * tmp_L(ipoint,3,p) + tmp3(ipoint,6,p) = wr1(ipoint) * tmp_L0(ipoint,1,p) + tmp3(ipoint,7,p) = wr1(ipoint) * tmp_L0(ipoint,2,p) + tmp3(ipoint,8,p) = wr1(ipoint) * tmp_L0(ipoint,3,p) + + tmp4(ipoint,1,p) = -2.d0 * (tmp_R(ipoint,1,p) * tmp_J(ipoint,1) + tmp_R(ipoint,2,p) * tmp_J(ipoint,2) + tmp_R(ipoint,3,p) * tmp_J(ipoint,3)) & + + mos_r_in_r(ipoint,p) * tmp_S(ipoint) + tmp4(ipoint,2,p) = wr1(ipoint) * mos_r_in_r(ipoint,p) + tmp4(ipoint,3,p) = tmp_R(ipoint,1,p) + tmp4(ipoint,4,p) = tmp_R(ipoint,2,p) + tmp4(ipoint,5,p) = tmp_R(ipoint,3,p) + tmp4(ipoint,6,p) = tmp_R0(ipoint,1,p) + tmp4(ipoint,7,p) = tmp_R0(ipoint,2,p) + tmp4(ipoint,8,p) = tmp_R0(ipoint,3,p) + enddo + + do i = 1, ne_b + do j = 1, ne_b + do ipoint = 1, n_grid + + tmp3(ipoint,2,p) = tmp3(ipoint,2,p) + mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) + + tmp4(ipoint,1,p) = tmp4(ipoint,1,p) + mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) + enddo ! ipoint + enddo ! j + enddo ! i + + do i = ne_b+1, ne_a + do j = 1, ne_b + do ipoint = 1, n_grid + + tmp3(ipoint,2,p) = tmp3(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) + tmp3(ipoint,2,p) = tmp3(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,p,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,p,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,p,j) * int2_grad1_u12(ipoint,3,j,i) ) + + tmp4(ipoint,1,p) = tmp4(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) + tmp4(ipoint,1,p) = tmp4(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,j,i) * int2_grad1_u12(ipoint,1,i,p) & + + int2_grad1_u12(ipoint,2,j,i) * int2_grad1_u12(ipoint,2,i,p) & + + int2_grad1_u12(ipoint,3,j,i) * int2_grad1_u12(ipoint,3,i,p) ) + enddo ! ipoint + enddo ! j + enddo ! i + + do i = ne_b+1, ne_a + do j = ne_b+1, ne_a + do ipoint = 1, n_grid + + tmp3(ipoint,2,p) = tmp3(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) + + tmp4(ipoint,1,p) = tmp4(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) + enddo ! ipoint + enddo ! j + enddo ! i + + enddo ! p + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmp_L0, tmp_L, tmp_R0, tmp_R, tmp_J, tmp_S) + + call dgemm( 'T', 'N', mo_num, mo_num, 8*n_grid, 1.d0 & + , tmp3(1,1,1), 8*n_grid, tmp4(1,1,1), 8*n_grid & + , 1.d0, noL_1e(1,1), mo_num) + + deallocate(tmp3, tmp4) + + endif + + + call wall_time(t1) + write(*,"(A,2X,F15.7)") ' wall time for noL_1e (sec) = ', (t1 - t0) + + return +end + +! --- + diff --git a/plugins/local/tc_int/no_2e.irp.f b/plugins/local/tc_int/no_2e.irp.f new file mode 100644 index 00000000..6d31d6a0 --- /dev/null +++ b/plugins/local/tc_int/no_2e.irp.f @@ -0,0 +1,605 @@ + +! --- + +subroutine provide_no_2e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, noL_2e) + + implicit none + + integer, intent(in) :: n_grid, n_mo + integer, intent(in) :: ne_a, ne_b + double precision, intent(in) :: wr1(n_grid) + double precision, intent(in) :: mos_l_in_r(n_grid,n_mo) + double precision, intent(in) :: mos_r_in_r(n_grid,n_mo) + double precision, intent(in) :: int2_grad1_u12(n_grid,3,n_mo,n_mo) + double precision, intent(out) :: noL_2e(n_mo,n_mo,n_mo,n_mo) + + integer :: p, q, s, t, i, ipoint + double precision :: t0, t1 + double precision, allocatable :: tmpO(:), tmpJ(:,:) + double precision, allocatable :: tmpA(:,:,:), tmpB(:,:,:) + double precision, allocatable :: tmpC(:,:,:,:), tmpD(:,:,:,:) + double precision, allocatable :: tmpE(:,:,:,:) + + + call wall_time(t0) + + if(ne_a .eq. ne_b) then + + allocate(tmpO(n_grid), tmpJ(n_grid,3)) + allocate(tmpA(n_grid,3,n_mo), tmpB(n_grid,3,n_mo)) + allocate(tmpC(n_grid,4,n_mo,n_mo), tmpD(n_grid,4,n_mo,n_mo)) + allocate(tmpE(n_mo,n_mo,n_mo,n_mo)) + + tmpO = 0.d0 + tmpJ = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpO(ipoint) = tmpO(ipoint) + wr1(ipoint) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ(ipoint,1) = tmpJ(ipoint,1) + wr1(ipoint) * int2_grad1_u12(ipoint,1,i,i) + tmpJ(ipoint,2) = tmpJ(ipoint,2) + wr1(ipoint) * int2_grad1_u12(ipoint,2,i,i) + tmpJ(ipoint,3) = tmpJ(ipoint,3) + wr1(ipoint) * int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, ipoint) & + !$OMP SHARED(n_mo, ne_b, n_grid, & + !$OMP wr1, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, & + !$OMP tmpA, tmpB) + + !$OMP DO + do p = 1, n_mo + + tmpA(:,:,p) = 0.d0 + tmpB(:,:,p) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpA(ipoint,1,p) = tmpA(ipoint,1,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,p,i) + tmpA(ipoint,2,p) = tmpA(ipoint,2,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,p,i) + tmpA(ipoint,3,p) = tmpA(ipoint,3,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,p,i) + tmpB(ipoint,1,p) = tmpB(ipoint,1,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,i,p) + tmpB(ipoint,2,p) = tmpB(ipoint,2,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,i,p) + tmpB(ipoint,3,p) = tmpB(ipoint,3,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,i,p) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, s, i, ipoint) & + !$OMP SHARED(n_mo, ne_b, n_grid, & + !$OMP wr1, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, & + !$OMP tmpA, tmpB, tmpO, tmpJ, tmpC, tmpD) + + !$OMP DO COLLAPSE(2) + do s = 1, n_mo + do p = 1, n_mo + + do ipoint = 1, n_grid + + tmpC(ipoint,1,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,1,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,1,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,1,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,1) + tmpC(ipoint,2,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,2,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,2,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,2,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,2) + tmpC(ipoint,3,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,3,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,3,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,3,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,3) + + tmpD(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) + tmpD(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) + tmpD(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) + tmpD(ipoint,4,p,s) = wr1(ipoint) * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) + + enddo ! ipoint + + tmpC(:,4,p,s) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpC(ipoint,4,p,s) += int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo ! ipoint + enddo ! i + + enddo ! p + enddo ! s + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmpO, tmpJ, tmpA, tmpB) + + + call dgemm( 'T', 'N', n_mo*n_mo, n_mo*n_mo, 4*n_grid, 0.5d0 & + , tmpC(1,1,1,1), 4*n_grid, tmpD(1,1,1,1), 4*n_grid & + , 0.d0, tmpE(1,1,1,1), n_mo*n_mo) + + deallocate(tmpC, tmpD) + + call sum_a_at(tmpE, n_mo*n_mo) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(t, s, q, p) & + !$OMP SHARED(n_mo, tmpE, noL_2e) + + !$OMP DO COLLAPSE(3) + do t = 1, n_mo + do s = 1, n_mo + do q = 1, n_mo + do p = 1, n_mo + noL_2e(p,q,s,t) = tmpE(p,s,q,t) + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmpE) + + else + + allocate(tmpO(n_grid), tmpJ(n_grid,3)) + allocate(tmpA(n_grid,3,n_mo), tmpB(n_grid,3,n_mo)) + allocate(tmpC(n_grid,4,n_mo,n_mo), tmpD(n_grid,4,n_mo,n_mo)) + allocate(tmpE(n_mo,n_mo,n_mo,n_mo)) + + tmpO = 0.d0 + tmpJ = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpO(ipoint) = tmpO(ipoint) + wr1(ipoint) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ(ipoint,1) = tmpJ(ipoint,1) + wr1(ipoint) * int2_grad1_u12(ipoint,1,i,i) + tmpJ(ipoint,2) = tmpJ(ipoint,2) + wr1(ipoint) * int2_grad1_u12(ipoint,2,i,i) + tmpJ(ipoint,3) = tmpJ(ipoint,3) + wr1(ipoint) * int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmpO(ipoint) = tmpO(ipoint) + 0.5d0 * wr1(ipoint) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ(ipoint,1) = tmpJ(ipoint,1) + 0.5d0 * wr1(ipoint) * int2_grad1_u12(ipoint,1,i,i) + tmpJ(ipoint,2) = tmpJ(ipoint,2) + 0.5d0 * wr1(ipoint) * int2_grad1_u12(ipoint,2,i,i) + tmpJ(ipoint,3) = tmpJ(ipoint,3) + 0.5d0 * wr1(ipoint) * int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, ipoint) & + !$OMP SHARED(n_mo, ne_a, ne_b, n_grid, & + !$OMP wr1, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, & + !$OMP tmpA, tmpB) + + !$OMP DO + do p = 1, n_mo + + tmpA(:,:,p) = 0.d0 + tmpB(:,:,p) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpA(ipoint,1,p) = tmpA(ipoint,1,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,p,i) + tmpA(ipoint,2,p) = tmpA(ipoint,2,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,p,i) + tmpA(ipoint,3,p) = tmpA(ipoint,3,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,p,i) + tmpB(ipoint,1,p) = tmpB(ipoint,1,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,i,p) + tmpB(ipoint,2,p) = tmpB(ipoint,2,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,i,p) + tmpB(ipoint,3,p) = tmpB(ipoint,3,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,i,p) + enddo + enddo + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmpA(ipoint,1,p) = tmpA(ipoint,1,p) + 0.5d0 * wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,p,i) + tmpA(ipoint,2,p) = tmpA(ipoint,2,p) + 0.5d0 * wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,p,i) + tmpA(ipoint,3,p) = tmpA(ipoint,3,p) + 0.5d0 * wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,p,i) + tmpB(ipoint,1,p) = tmpB(ipoint,1,p) + 0.5d0 * wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,i,p) + tmpB(ipoint,2,p) = tmpB(ipoint,2,p) + 0.5d0 * wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,i,p) + tmpB(ipoint,3,p) = tmpB(ipoint,3,p) + 0.5d0 * wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,i,p) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, s, i, ipoint) & + !$OMP SHARED(n_mo, ne_a, ne_b, n_grid, & + !$OMP wr1, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, & + !$OMP tmpA, tmpB, tmpO, tmpJ, tmpC, tmpD) + + !$OMP DO COLLAPSE(2) + do s = 1, n_mo + do p = 1, n_mo + + do ipoint = 1, n_grid + + tmpC(ipoint,1,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,1,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,1,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,1,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,1) + tmpC(ipoint,2,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,2,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,2,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,2,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,2) + tmpC(ipoint,3,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,3,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,3,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,3,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,3) + + tmpD(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) + tmpD(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) + tmpD(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) + tmpD(ipoint,4,p,s) = wr1(ipoint) * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) + + enddo ! ipoint + + tmpC(:,4,p,s) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpC(ipoint,4,p,s) += int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo ! ipoint + enddo ! i + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmpC(ipoint,4,p,s) += 0.5d0 * int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + 0.5d0 * int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + 0.5d0 * int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo ! ipoint + enddo ! i + + enddo ! p + enddo ! s + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmpO, tmpJ, tmpA, tmpB) + + + call dgemm( 'T', 'N', n_mo*n_mo, n_mo*n_mo, 4*n_grid, 0.5d0 & + , tmpC(1,1,1,1), 4*n_grid, tmpD(1,1,1,1), 4*n_grid & + , 0.d0, tmpE(1,1,1,1), n_mo*n_mo) + + deallocate(tmpC, tmpD) + + call sum_a_at(tmpE, n_mo*n_mo) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(t, s, q, p) & + !$OMP SHARED(n_mo, tmpE, noL_2e) + + !$OMP DO COLLAPSE(3) + do t = 1, n_mo + do s = 1, n_mo + do q = 1, n_mo + do p = 1, n_mo + noL_2e(p,q,s,t) = tmpE(p,s,q,t) + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmpE) + + endif + + call wall_time(t1) + write(*,"(A,2X,F15.7)") ' wall time for noL_2e (sec) = ', (t1 - t0) + + return +end + +! --- + +subroutine provide_no_2e_tmp(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & + tmpO, tmpJ, tmpA, tmpB, tmpC, tmpD, tmpE, noL_2e) + + implicit none + + integer, intent(in) :: n_grid, n_mo + integer, intent(in) :: ne_a, ne_b + double precision, intent(in) :: wr1(n_grid) + double precision, intent(in) :: mos_l_in_r(n_grid,n_mo) + double precision, intent(in) :: mos_r_in_r(n_grid,n_mo) + double precision, intent(in) :: int2_grad1_u12(n_grid,3,n_mo,n_mo) + double precision, intent(out) :: tmpO(n_grid), tmpJ(n_grid,3) + double precision, intent(out) :: tmpA(n_grid,3,n_mo), tmpB(n_grid,3,n_mo) + double precision, intent(out) :: tmpC(n_grid,4,n_mo,n_mo), tmpD(n_grid,4,n_mo,n_mo) + double precision, intent(out) :: tmpE(n_mo,n_mo,n_mo,n_mo) + double precision, intent(out) :: noL_2e(n_mo,n_mo,n_mo,n_mo) + + integer :: p, q, s, t, i, ipoint + double precision :: t0, t1 + + + call wall_time(t0) + + if(ne_a .eq. ne_b) then + + tmpO = 0.d0 + tmpJ = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpO(ipoint) = tmpO(ipoint) + wr1(ipoint) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ(ipoint,1) = tmpJ(ipoint,1) + wr1(ipoint) * int2_grad1_u12(ipoint,1,i,i) + tmpJ(ipoint,2) = tmpJ(ipoint,2) + wr1(ipoint) * int2_grad1_u12(ipoint,2,i,i) + tmpJ(ipoint,3) = tmpJ(ipoint,3) + wr1(ipoint) * int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, ipoint) & + !$OMP SHARED(n_mo, ne_b, n_grid, & + !$OMP wr1, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, & + !$OMP tmpA, tmpB) + + !$OMP DO + do p = 1, n_mo + + tmpA(:,:,p) = 0.d0 + tmpB(:,:,p) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpA(ipoint,1,p) = tmpA(ipoint,1,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,p,i) + tmpA(ipoint,2,p) = tmpA(ipoint,2,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,p,i) + tmpA(ipoint,3,p) = tmpA(ipoint,3,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,p,i) + tmpB(ipoint,1,p) = tmpB(ipoint,1,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,i,p) + tmpB(ipoint,2,p) = tmpB(ipoint,2,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,i,p) + tmpB(ipoint,3,p) = tmpB(ipoint,3,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,i,p) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, s, i, ipoint) & + !$OMP SHARED(n_mo, ne_b, n_grid, & + !$OMP wr1, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, & + !$OMP tmpA, tmpB, tmpO, tmpJ, tmpC, tmpD) + + !$OMP DO COLLAPSE(2) + do s = 1, n_mo + do p = 1, n_mo + + do ipoint = 1, n_grid + + tmpC(ipoint,1,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,1,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,1,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,1,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,1) + tmpC(ipoint,2,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,2,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,2,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,2,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,2) + tmpC(ipoint,3,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,3,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,3,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,3,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,3) + + tmpD(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) + tmpD(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) + tmpD(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) + tmpD(ipoint,4,p,s) = wr1(ipoint) * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) + + enddo ! ipoint + + tmpC(:,4,p,s) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpC(ipoint,4,p,s) += int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo ! ipoint + enddo ! i + + enddo ! p + enddo ! s + !$OMP END DO + !$OMP END PARALLEL + + + call dgemm( 'T', 'N', n_mo*n_mo, n_mo*n_mo, 4*n_grid, 0.5d0 & + , tmpC(1,1,1,1), 4*n_grid, tmpD(1,1,1,1), 4*n_grid & + , 0.d0, tmpE(1,1,1,1), n_mo*n_mo) + + call sum_a_at(tmpE, n_mo*n_mo) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(t, s, q, p) & + !$OMP SHARED(n_mo, tmpE, noL_2e) + + !$OMP DO COLLAPSE(3) + do t = 1, n_mo + do s = 1, n_mo + do q = 1, n_mo + do p = 1, n_mo + noL_2e(p,q,s,t) = tmpE(p,s,q,t) + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + else + + tmpO = 0.d0 + tmpJ = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpO(ipoint) = tmpO(ipoint) + wr1(ipoint) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ(ipoint,1) = tmpJ(ipoint,1) + wr1(ipoint) * int2_grad1_u12(ipoint,1,i,i) + tmpJ(ipoint,2) = tmpJ(ipoint,2) + wr1(ipoint) * int2_grad1_u12(ipoint,2,i,i) + tmpJ(ipoint,3) = tmpJ(ipoint,3) + wr1(ipoint) * int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmpO(ipoint) = tmpO(ipoint) + 0.5d0 * wr1(ipoint) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ(ipoint,1) = tmpJ(ipoint,1) + 0.5d0 * wr1(ipoint) * int2_grad1_u12(ipoint,1,i,i) + tmpJ(ipoint,2) = tmpJ(ipoint,2) + 0.5d0 * wr1(ipoint) * int2_grad1_u12(ipoint,2,i,i) + tmpJ(ipoint,3) = tmpJ(ipoint,3) + 0.5d0 * wr1(ipoint) * int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, ipoint) & + !$OMP SHARED(n_mo, ne_a, ne_b, n_grid, & + !$OMP wr1, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, & + !$OMP tmpA, tmpB) + + !$OMP DO + do p = 1, n_mo + + tmpA(:,:,p) = 0.d0 + tmpB(:,:,p) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpA(ipoint,1,p) = tmpA(ipoint,1,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,p,i) + tmpA(ipoint,2,p) = tmpA(ipoint,2,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,p,i) + tmpA(ipoint,3,p) = tmpA(ipoint,3,p) + wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,p,i) + tmpB(ipoint,1,p) = tmpB(ipoint,1,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,i,p) + tmpB(ipoint,2,p) = tmpB(ipoint,2,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,i,p) + tmpB(ipoint,3,p) = tmpB(ipoint,3,p) + wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,i,p) + enddo + enddo + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmpA(ipoint,1,p) = tmpA(ipoint,1,p) + 0.5d0 * wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,p,i) + tmpA(ipoint,2,p) = tmpA(ipoint,2,p) + 0.5d0 * wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,p,i) + tmpA(ipoint,3,p) = tmpA(ipoint,3,p) + 0.5d0 * wr1(ipoint) * mos_l_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,p,i) + tmpB(ipoint,1,p) = tmpB(ipoint,1,p) + 0.5d0 * wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,1,i,p) + tmpB(ipoint,2,p) = tmpB(ipoint,2,p) + 0.5d0 * wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,2,i,p) + tmpB(ipoint,3,p) = tmpB(ipoint,3,p) + 0.5d0 * wr1(ipoint) * mos_r_in_r(ipoint,i) * int2_grad1_u12(ipoint,3,i,p) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, s, i, ipoint) & + !$OMP SHARED(n_mo, ne_a, ne_b, n_grid, & + !$OMP wr1, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, & + !$OMP tmpA, tmpB, tmpO, tmpJ, tmpC, tmpD) + + !$OMP DO COLLAPSE(2) + do s = 1, n_mo + do p = 1, n_mo + + do ipoint = 1, n_grid + + tmpC(ipoint,1,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,1,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,1,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,1,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,1) + tmpC(ipoint,2,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,2,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,2,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,2,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,2) + tmpC(ipoint,3,p,s) = mos_r_in_r(ipoint,s) * tmpA(ipoint,3,p) & + + mos_l_in_r(ipoint,p) * tmpB(ipoint,3,s) & + - tmpO(ipoint) * int2_grad1_u12(ipoint,3,p,s) & + - 2.d0 * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) * tmpJ(ipoint,3) + + tmpD(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) + tmpD(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) + tmpD(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) + tmpD(ipoint,4,p,s) = wr1(ipoint) * mos_l_in_r(ipoint,p) * mos_r_in_r(ipoint,s) + + enddo ! ipoint + + tmpC(:,4,p,s) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpC(ipoint,4,p,s) += int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo ! ipoint + enddo ! i + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmpC(ipoint,4,p,s) += 0.5d0 * int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + 0.5d0 * int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + 0.5d0 * int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo ! ipoint + enddo ! i + + enddo ! p + enddo ! s + !$OMP END DO + !$OMP END PARALLEL + + + call dgemm( 'T', 'N', n_mo*n_mo, n_mo*n_mo, 4*n_grid, 0.5d0 & + , tmpC(1,1,1,1), 4*n_grid, tmpD(1,1,1,1), 4*n_grid & + , 0.d0, tmpE(1,1,1,1), n_mo*n_mo) + + call sum_a_at(tmpE, n_mo*n_mo) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(t, s, q, p) & + !$OMP SHARED(n_mo, tmpE, noL_2e) + + !$OMP DO COLLAPSE(3) + do t = 1, n_mo + do s = 1, n_mo + do q = 1, n_mo + do p = 1, n_mo + noL_2e(p,q,s,t) = tmpE(p,s,q,t) + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + endif + + call wall_time(t1) + write(*,"(A,2X,F15.7)") ' wall time for noL_2e & tmp tensors (sec) = ', (t1 - t0) + + return +end + +! --- + + diff --git a/plugins/local/tc_int/uninstall b/plugins/local/tc_int/uninstall index 3dd3612c..e37a5491 100755 --- a/plugins/local/tc_int/uninstall +++ b/plugins/local/tc_int/uninstall @@ -9,5 +9,5 @@ then fi rm -rf ${PWD}/CuTC -rm ${QP_ROOT}/lib/libtc_int_cu.so +rm ${QP_ROOT}/lib/libcutcint.so diff --git a/plugins/local/tc_int/write_tc_int_cuda.irp.f b/plugins/local/tc_int/write_tc_int_cuda.irp.f index 212518ee..2a3dc4d1 100644 --- a/plugins/local/tc_int/write_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/write_tc_int_cuda.irp.f @@ -120,12 +120,12 @@ subroutine do_work_on_gpu() call wall_time(cuda_time0) print*, ' start CUDA kernel' - call tc_int_c(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & - n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & - final_grid_points, final_weight_at_r_vector, & - final_grid_points_extra, final_weight_at_r_vector_extra, & - rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & - int2_grad1_u12_ao, int_2e_ao) + call cutc_int_c(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & + n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & + final_grid_points, final_weight_at_r_vector, & + final_grid_points_extra, final_weight_at_r_vector_extra, & + rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & + int2_grad1_u12_ao, int_2e_ao) call wall_time(cuda_time1) print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0 From 4e9143133ac19cbfbc8dbda1f358331da6ef5277 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Sat, 10 Aug 2024 18:13:23 +0200 Subject: [PATCH 16/19] added 1e-noL with CUDA --- plugins/local/tc_int/cutc_module.F90 | 51 + plugins/local/tc_int/deb_no_1e_gpu.irp.f | 499 +++++++++ plugins/local/tc_int/deb_no_2e_gpu.irp.f | 1 - plugins/local/tc_int/deb_tc_int_cuda.irp.f | 6 +- plugins/local/tc_int/no_1e.irp.f | 1101 +++++++++++++++----- 5 files changed, 1394 insertions(+), 264 deletions(-) create mode 100644 plugins/local/tc_int/deb_no_1e_gpu.irp.f diff --git a/plugins/local/tc_int/cutc_module.F90 b/plugins/local/tc_int/cutc_module.F90 index d7b922cd..d1466697 100644 --- a/plugins/local/tc_int/cutc_module.F90 +++ b/plugins/local/tc_int/cutc_module.F90 @@ -119,6 +119,57 @@ module cutc_module ! --- + subroutine cutc_no_1e(n_grid1, n_mo, ne_a, ne_b, & + wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & + no_1e) bind(C, name = "cutc_no_1e") + + import c_int, c_double, c_ptr + + integer(c_int), intent(in), value :: n_grid1 + integer(c_int), intent(in), value :: n_mo + integer(c_int), intent(in), value :: ne_a + integer(c_int), intent(in), value :: ne_b + real(c_double), intent(in) :: wr1(n_grid1) + real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) + real(c_double), intent(out) :: no_1e(n_mo,n_mo) + + end subroutine cutc_no_1e + + ! --- + + subroutine deb_no_1e(n_grid1, n_mo, ne_a, ne_b, & + wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & + tmpO, tmpJ, tmpM, tmpS, tmpC, tmpD, tmpL, tmpR, tmpE, tmpF, & + no_1e) bind(C, name = "deb_no_1e") + + import c_int, c_double, c_ptr + + integer(c_int), intent(in), value :: n_grid1 + integer(c_int), intent(in), value :: n_mo + integer(c_int), intent(in), value :: ne_a + integer(c_int), intent(in), value :: ne_b + real(c_double), intent(in) :: wr1(n_grid1) + real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) + real(c_double), intent(out) :: tmpO(n_grid1) + real(c_double), intent(out) :: tmpJ(n_grid1,3) + real(c_double), intent(out) :: tmpM(n_grid1,3) + real(c_double), intent(out) :: tmpS(n_grid1) + real(c_double), intent(out) :: tmpC(n_grid1,4,n_mo,n_mo) + real(c_double), intent(out) :: tmpD(n_grid1,4) + real(c_double), intent(out) :: tmpL(n_grid1,3,n_mo) + real(c_double), intent(out) :: tmpR(n_grid1,3,n_mo) + real(c_double), intent(out) :: tmpE(n_grid1,5,n_mo) + real(c_double), intent(out) :: tmpF(n_grid1,5,n_mo) + real(c_double), intent(out) :: no_1e(n_mo,n_mo) + + end subroutine deb_no_1e + + ! --- + end interface end module cutc_module diff --git a/plugins/local/tc_int/deb_no_1e_gpu.irp.f b/plugins/local/tc_int/deb_no_1e_gpu.irp.f new file mode 100644 index 00000000..1efbb913 --- /dev/null +++ b/plugins/local/tc_int/deb_no_1e_gpu.irp.f @@ -0,0 +1,499 @@ + +! --- + +subroutine deb_no_1e_gpu() + + use cutc_module + + implicit none + + integer :: i, j, k, l, ipoint + double precision :: acc_thr, err_tot, nrm_tot, err_loc + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: tmp(:,:,:,:) + double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) + double precision, allocatable :: noL_1e(:,:) + double precision, allocatable :: noL_1e_gpu(:,:) + + + PROVIDE mo_l_coef mo_r_coef + PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp + + + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) + print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") + read(11) int2_grad1_u12_ao + close(11) + + allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) + allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(int2_grad1_u12_ao) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) + !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + do i = 1, mo_num + do j = 1, mo_num + int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) + int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) + int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmp) + + ! --- + + allocate(noL_1e_gpu(mo_num,mo_num)) + + call cutc_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_1e_gpu(1,1)) + + ! --- + + allocate(noL_1e(mo_num,mo_num)) + + call provide_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_1e(1,1)) + + ! --- + + deallocate(int2_grad1_u12_bimo_t) + + acc_thr = 1d-12 + + err_tot = 0.d0 + nrm_tot = 0.d0 + do k = 1, mo_num + do l = 1, mo_num + err_loc = dabs(noL_1e(l,k) - noL_1e_gpu(l,k)) + if(err_loc > acc_thr) then + print*, " error on", l, k + print*, " CPU res", noL_1e (l,k) + print*, " GPU res", noL_1e_gpu(l,k) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(noL_1e(l,k)) + enddo + enddo + print *, ' absolute accuracy on noL_1e (%) =', 100.d0 * err_tot / nrm_tot + + deallocate(noL_1e) + deallocate(noL_1e_gpu) + + + return + +end + +! --- + +subroutine deb_no_1e_gpu_tmp() + + use cutc_module + + implicit none + + integer :: i, j, k, l, m, ipoint + double precision :: acc_thr, err_tot, nrm_tot, err_loc + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: tmp(:,:,:,:) + double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) + double precision, allocatable :: tmpO(:), tmpO_gpu(:) + double precision, allocatable :: tmpJ(:,:), tmpJ_gpu(:,:) + double precision, allocatable :: tmpM(:,:), tmpM_gpu(:,:) + double precision, allocatable :: tmpS(:), tmpS_gpu(:) + double precision, allocatable :: tmpC(:,:,:,:), tmpC_gpu(:,:,:,:) + double precision, allocatable :: tmpD(:,:), tmpD_gpu(:,:) + double precision, allocatable :: tmpL(:,:,:), tmpL_gpu(:,:,:) + double precision, allocatable :: tmpR(:,:,:), tmpR_gpu(:,:,:) + double precision, allocatable :: tmpE(:,:,:), tmpE_gpu(:,:,:) + double precision, allocatable :: tmpF(:,:,:), tmpF_gpu(:,:,:) + double precision, allocatable :: noL_1e(:,:), noL_1e_gpu(:,:) + + + ! --- + + + PROVIDE mo_l_coef mo_r_coef + PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp + + + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) + print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") + read(11) int2_grad1_u12_ao + close(11) + + allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) + allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(int2_grad1_u12_ao) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) + !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + do i = 1, mo_num + do j = 1, mo_num + int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) + int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) + int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmp) + + ! --- + + allocate(tmpO_gpu(n_points_final_grid)) + allocate(tmpJ_gpu(n_points_final_grid,3)) + allocate(tmpM_gpu(n_points_final_grid,3)) + allocate(tmpS_gpu(n_points_final_grid)) + allocate(tmpC_gpu(n_points_final_grid,4,mo_num,mo_num)) + allocate(tmpD_gpu(n_points_final_grid,4)) + allocate(tmpL_gpu(n_points_final_grid,3,mo_num)) + allocate(tmpR_gpu(n_points_final_grid,3,mo_num)) + allocate(tmpE_gpu(n_points_final_grid,5,mo_num)) + allocate(tmpF_gpu(n_points_final_grid,5,mo_num)) + allocate(noL_1e_gpu(mo_num,mo_num)) + + call deb_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), int2_grad1_u12_bimo_t(1,1,1,1), & + tmpO_gpu(1), tmpJ_gpu(1,1), tmpM_gpu(1,1), tmpS_gpu(1), tmpC_gpu(1,1,1,1), tmpD_gpu(1,1), & + tmpL_gpu(1,1,1), tmpR_gpu(1,1,1), tmpE_gpu(1,1,1), tmpF_gpu(1,1,1), noL_1e_gpu(1,1)) + + ! --- + + allocate(tmpO(n_points_final_grid)) + allocate(tmpJ(n_points_final_grid,3)) + allocate(tmpM(n_points_final_grid,3)) + allocate(tmpS(n_points_final_grid)) + allocate(tmpC(n_points_final_grid,4,mo_num,mo_num)) + allocate(tmpD(n_points_final_grid,4)) + allocate(tmpL(n_points_final_grid,3,mo_num)) + allocate(tmpR(n_points_final_grid,3,mo_num)) + allocate(tmpE(n_points_final_grid,5,mo_num)) + allocate(tmpF(n_points_final_grid,5,mo_num)) + allocate(noL_1e(mo_num,mo_num)) + + call provide_no_1e_tmp(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), int2_grad1_u12_bimo_t(1,1,1,1), & + tmpO(1), tmpJ(1,1), tmpM(1,1), tmpS(1), tmpC(1,1,1,1), tmpD(1,1), tmpL(1,1,1), tmpR(1,1,1), & + tmpE(1,1,1), tmpF(1,1,1), noL_1e(1,1)) + + ! --- + + deallocate(int2_grad1_u12_bimo_t) + + + acc_thr = 1d-12 + + ! --- + + ! tmpO(n_points_final_grid)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpO(ipoint) - tmpO_gpu(ipoint)) + if(err_loc > acc_thr) then + print*, " error on", ipoint + print*, " CPU res", tmpO (ipoint) + print*, " GPU res", tmpO_gpu(ipoint) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpO(ipoint)) + enddo + print *, ' absolute accuracy on tmpO (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + ! tmpJ(n_points_final_grid,3)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do m = 1, 3 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpJ(ipoint,m) - tmpJ_gpu(ipoint,m)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m + print*, " CPU res", tmpJ (ipoint,m) + print*, " GPU res", tmpJ_gpu(ipoint,m) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpJ(ipoint,m)) + enddo + enddo + print *, ' absolute accuracy on tmpJ (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + ! tmpM(n_points_final_grid,3)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do m = 1, 3 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpM(ipoint,m) - tmpM_gpu(ipoint,m)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m + print*, " CPU res", tmpM (ipoint,m) + print*, " GPU res", tmpM_gpu(ipoint,m) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpM(ipoint,m)) + enddo + enddo + print *, ' absolute accuracy on tmpM (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + ! tmpS(n_points_final_grid)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpS(ipoint) - tmpS_gpu(ipoint)) + if(err_loc > acc_thr) then + print*, " error on", ipoint + print*, " CPU res", tmpS (ipoint) + print*, " GPU res", tmpS_gpu(ipoint) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpS(ipoint)) + enddo + print *, ' absolute accuracy on tmpS (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + ! tmpC(n_points_final_grid,4,mo_num,mo_num)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do j = 1, mo_num + do m = 1, 4 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpC(ipoint,m,j,i) - tmpC_gpu(ipoint,m,j,i)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m, j, i + print*, " CPU res", tmpC (ipoint,m,j,i) + print*, " GPU res", tmpC_gpu(ipoint,m,j,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpC(ipoint,m,j,i)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on tmpC (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + ! tmpD(n_points_final_grid,4)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do m = 1, 4 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpD(ipoint,m) - tmpD_gpu(ipoint,m)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m + print*, " CPU res", tmpD (ipoint,m) + print*, " GPU res", tmpD_gpu(ipoint,m) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpD(ipoint,m)) + enddo + enddo + print *, ' absolute accuracy on tmpD (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + ! tmpL(n_points_final_grid,3,mo_num)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do m = 1, 3 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpL(ipoint,m,i) - tmpL_gpu(ipoint,m,i)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m, i + print*, " CPU res", tmpL (ipoint,m,i) + print*, " GPU res", tmpL_gpu(ipoint,m,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpL(ipoint,m,i)) + enddo + enddo + enddo + print *, ' absolute accuracy on tmpL (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + ! tmpR(n_points_final_grid,3,mo_num)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do m = 1, 3 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpR(ipoint,m,i) - tmpR_gpu(ipoint,m,i)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m, i + print*, " CPU res", tmpR (ipoint,m,i) + print*, " GPU res", tmpR_gpu(ipoint,m,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpR(ipoint,m,i)) + enddo + enddo + enddo + print *, ' absolute accuracy on tmpR (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + ! tmpE(n_points_final_grid,5,mo_num)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do m = 1, 5 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpE(ipoint,m,i) - tmpE_gpu(ipoint,m,i)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m, i + print*, " CPU res", tmpE (ipoint,m,i) + print*, " GPU res", tmpE_gpu(ipoint,m,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpE(ipoint,m,i)) + enddo + enddo + enddo + print *, ' absolute accuracy on tmpE (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + ! tmpF(n_points_final_grid,5,mo_num)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do m = 1, 5 + do ipoint = 1, n_points_final_grid + err_loc = dabs(tmpF(ipoint,m,i) - tmpF_gpu(ipoint,m,i)) + if(err_loc > acc_thr) then + print*, " error on", ipoint, m, i + print*, " CPU res", tmpF (ipoint,m,i) + print*, " GPU res", tmpF_gpu(ipoint,m,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(tmpF(ipoint,m,i)) + enddo + enddo + enddo + print *, ' absolute accuracy on tmpF (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + ! noL_1e(mo_num,mo_num)) + err_tot = 0.d0 + nrm_tot = 0.d0 + do k = 1, mo_num + do l = 1, mo_num + err_loc = dabs(noL_1e(l,k) - noL_1e_gpu(l,k)) + if(err_loc > acc_thr) then + print*, " error on", l, k + print*, " CPU res", noL_1e (l,k) + print*, " GPU res", noL_1e_gpu(l,k) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(noL_1e(l,k)) + enddo + enddo + print *, ' absolute accuracy on noL_1e (%) =', 100.d0 * err_tot / nrm_tot + + ! --- + + + deallocate(tmpO) + deallocate(tmpJ) + deallocate(tmpM) + deallocate(tmpS) + deallocate(tmpC) + deallocate(tmpD) + deallocate(tmpL) + deallocate(tmpR) + deallocate(tmpE) + deallocate(tmpF) + deallocate(noL_1e) + + deallocate(tmpO_gpu) + deallocate(tmpJ_gpu) + deallocate(tmpM_gpu) + deallocate(tmpS_gpu) + deallocate(tmpC_gpu) + deallocate(tmpD_gpu) + deallocate(tmpL_gpu) + deallocate(tmpR_gpu) + deallocate(tmpE_gpu) + deallocate(tmpF_gpu) + deallocate(noL_1e_gpu) + + + return + +end + + + diff --git a/plugins/local/tc_int/deb_no_2e_gpu.irp.f b/plugins/local/tc_int/deb_no_2e_gpu.irp.f index 2be53ddd..16f58cca 100644 --- a/plugins/local/tc_int/deb_no_2e_gpu.irp.f +++ b/plugins/local/tc_int/deb_no_2e_gpu.irp.f @@ -87,7 +87,6 @@ subroutine deb_no_2e_gpu() acc_thr = 1d-12 - print *, ' precision on noL_2e ' err_tot = 0.d0 nrm_tot = 0.d0 do i = 1, mo_num diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f index 9da9ac95..8d0cc4f3 100644 --- a/plugins/local/tc_int/deb_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/deb_tc_int_cuda.irp.f @@ -37,8 +37,12 @@ subroutine main() implicit none !call deb_int_2e_ao_gpu() + !call deb_no_2e_gpu_tmp() - call deb_no_2e_gpu() + !call deb_no_2e_gpu() + + call deb_no_1e_gpu_tmp() + !call deb_no_1e_gpu() return end diff --git a/plugins/local/tc_int/no_1e.irp.f b/plugins/local/tc_int/no_1e.irp.f index 3a990276..5a9798f0 100644 --- a/plugins/local/tc_int/no_1e.irp.f +++ b/plugins/local/tc_int/no_1e.irp.f @@ -15,10 +15,10 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, integer :: p, s, i, j, ipoint double precision :: t0, t1 - double precision, allocatable :: tmp1(:,:,:,:), tmp2(:,:), tmp3(:,:,:), tmp4(:,:,:) - double precision, allocatable :: tmp_L(:,:,:), tmp_R(:,:,:), tmp_M(:,:), tmp_S(:), tmp_O(:), tmp_J(:,:) - double precision, allocatable :: tmp_L0(:,:,:), tmp_R0(:,:,:) - double precision, allocatable :: tmp_M_priv(:,:), tmp_S_priv(:), tmp_O_priv(:), tmp_J_priv(:,:) + double precision, allocatable :: tmpC(:,:,:,:), tmpD(:,:), tmpE(:,:,:), tmpF(:,:,:) + double precision, allocatable :: tmpL(:,:,:), tmpR(:,:,:), tmpM(:,:), tmpS(:), tmpO(:), tmpJ(:,:) + double precision, allocatable :: tmpL0(:,:,:), tmpR0(:,:,:) + double precision, allocatable :: tmpM_priv(:,:), tmpS_priv(:), tmpO_priv(:), tmpJ_priv(:,:) call wall_time(t0) @@ -26,119 +26,119 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, if(ne_a .eq. ne_b) then - allocate(tmp_O(n_grid), tmp_J(n_grid,3)) - tmp_O = 0.d0 - tmp_J = 0.d0 + allocate(tmpO(n_grid), tmpJ(n_grid,3)) + tmpO = 0.d0 + tmpJ = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(i, ipoint, tmp_O_priv, tmp_J_priv) & - !$OMP SHARED(ne_b, n_grid, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, tmp_O, tmp_J) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, ipoint, tmpO_priv, tmpJ_priv) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpO, tmpJ) - allocate(tmp_O_priv(n_grid), tmp_J_priv(n_grid,3)) - tmp_O_priv = 0.d0 - tmp_J_priv = 0.d0 + allocate(tmpO_priv(n_grid), tmpJ_priv(n_grid,3)) + tmpO_priv = 0.d0 + tmpJ_priv = 0.d0 !$OMP DO do i = 1, ne_b do ipoint = 1, n_grid - tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) - tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) - tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) - tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) + tmpO_priv(ipoint) = tmpO_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ_priv(ipoint,1) = tmpJ_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) + tmpJ_priv(ipoint,2) = tmpJ_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) + tmpJ_priv(ipoint,3) = tmpJ_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) enddo enddo !$OMP END DO NOWAIT !$OMP CRITICAL - tmp_O = tmp_O + tmp_O_priv - tmp_J = tmp_J + tmp_J_priv + tmpO = tmpO + tmpO_priv + tmpJ = tmpJ + tmpJ_priv !$OMP END CRITICAL - deallocate(tmp_O_priv, tmp_J_priv) + deallocate(tmpO_priv, tmpJ_priv) !$OMP END PARALLEL ! --- - allocate(tmp_M(n_grid,3), tmp_S(n_grid)) - tmp_M = 0.d0 - tmp_S = 0.d0 + allocate(tmpM(n_grid,3), tmpS(n_grid)) + tmpM = 0.d0 + tmpS = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(i, j, ipoint, tmp_M_priv, tmp_S_priv) & - !$OMP SHARED(ne_b, n_grid, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, tmp_M, tmp_S) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, j, ipoint, tmpM_priv, tmpS_priv) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpM, tmpS) - allocate(tmp_M_priv(n_grid,3), tmp_S_priv(n_grid)) - tmp_M_priv = 0.d0 - tmp_S_priv = 0.d0 + allocate(tmpM_priv(n_grid,3), tmpS_priv(n_grid)) + tmpM_priv = 0.d0 + tmpS_priv = 0.d0 !$OMP DO COLLAPSE(2) do i = 1, ne_b do j = 1, ne_b do ipoint = 1, n_grid - tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & - + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & - + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + tmpS_priv(ipoint) = tmpS_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) enddo enddo enddo !$OMP END DO NOWAIT !$OMP CRITICAL - tmp_M = tmp_M + tmp_M_priv - tmp_S = tmp_S + tmp_S_priv + tmpM = tmpM + tmpM_priv + tmpS = tmpS + tmpS_priv !$OMP END CRITICAL - deallocate(tmp_M_priv, tmp_S_priv) + deallocate(tmpM_priv, tmpS_priv) !$OMP END PARALLEL ! --- - allocate(tmp2(n_grid,4)) - allocate(tmp1(n_grid,4,mo_num,mo_num)) + allocate(tmpC(n_grid,4,n_mo,n_mo)) + allocate(tmpD(n_grid,4)) do ipoint = 1, n_grid - tmp2(ipoint,1) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,1) - tmp_M(ipoint,1)) - tmp2(ipoint,2) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,2) - tmp_M(ipoint,2)) - tmp2(ipoint,3) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,3) - tmp_M(ipoint,3)) - tmp2(ipoint,4) = -wr1(ipoint) * tmp_O(ipoint) + tmpD(ipoint,1) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,1) - tmpM(ipoint,1)) + tmpD(ipoint,2) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,2) - tmpM(ipoint,2)) + tmpD(ipoint,3) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,3) - tmpM(ipoint,3)) + tmpD(ipoint,4) = -wr1(ipoint) * tmpO(ipoint) - tmp_S(ipoint) = 2.d0 * (tmp_J(ipoint,1) * tmp_J(ipoint,1) + tmp_J(ipoint,2) * tmp_J(ipoint,2) + tmp_J(ipoint,3) * tmp_J(ipoint,3)) - tmp_S(ipoint) + tmpS(ipoint) = 2.d0 * (tmpJ(ipoint,1) * tmpJ(ipoint,1) + tmpJ(ipoint,2) * tmpJ(ipoint,2) + tmpJ(ipoint,3) * tmpJ(ipoint,3)) - tmpS(ipoint) enddo - deallocate(tmp_O, tmp_M) + deallocate(tmpO, tmpM) - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(p, s, i, ipoint) & - !$OMP SHARED(mo_num, ne_b, n_grid, & - !$OMP int2_grad1_u12, tmp1) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, s, i, ipoint) & + !$OMP SHARED(n_mo, ne_b, n_grid, & + !$OMP int2_grad1_u12, tmpC) !$OMP DO COLLAPSE(2) - do s = 1, mo_num - do p = 1, mo_num + do s = 1, n_mo + do p = 1, n_mo do ipoint = 1, n_grid - tmp1(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) - tmp1(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) - tmp1(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) + tmpC(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) + tmpC(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) + tmpC(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) enddo - tmp1(:,4,p,s) = 0.d0 + tmpC(:,4,p,s) = 0.d0 do i = 1, ne_b do ipoint = 1, n_grid - tmp1(ipoint,4,p,s) = tmp1(ipoint,4,p,s) + int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + tmpC(ipoint,4,p,s) = tmpC(ipoint,4,p,s) + int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) enddo @@ -149,41 +149,41 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, !$OMP END DO !$OMP END PARALLEL - call dgemv( 'T', 4*n_grid, mo_num*mo_num, 2.d0 & - , tmp1(1,1,1,1), size(tmp1, 1) * size(tmp1, 2) & - , tmp2(1,1), 1 & + call dgemv( 'T', 4*n_grid, n_mo*n_mo, 2.d0 & + , tmpC(1,1,1,1), size(tmpC, 1) * size(tmpC, 2) & + , tmpD(1,1), 1 & , 0.d0, noL_1e(1,1), 1) - deallocate(tmp1, tmp2) + deallocate(tmpC, tmpD) ! --- - allocate(tmp_L(n_grid,3,mo_num)) - allocate(tmp_R(n_grid,3,mo_num)) + allocate(tmpL(n_grid,3,n_mo)) + allocate(tmpR(n_grid,3,n_mo)) - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(p, i, ipoint) & - !$OMP SHARED(ne_b, n_grid, mo_num, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, tmp_L, tmp_R) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, ipoint) & + !$OMP SHARED(ne_b, n_grid, n_mo, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpL, tmpR) !$OMP DO - do p = 1, mo_num + do p = 1, n_mo - tmp_L(:,1:3,p) = 0.d0 - tmp_R(:,1:3,p) = 0.d0 + tmpL(:,1:3,p) = 0.d0 + tmpR(:,1:3,p) = 0.d0 do i = 1, ne_b do ipoint = 1, n_grid - tmp_L(ipoint,1,p) = tmp_L(ipoint,1,p) + int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,2,p) = tmp_L(ipoint,2,p) + int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,3,p) = tmp_L(ipoint,3,p) + int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,1,p) = tmpL(ipoint,1,p) + int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,2,p) = tmpL(ipoint,2,p) + int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,3,p) = tmpL(ipoint,3,p) + int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) - tmp_R(ipoint,1,p) = tmp_R(ipoint,1,p) + int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,2,p) = tmp_R(ipoint,2,p) + int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,3,p) = tmp_R(ipoint,3,p) + int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) + tmpR(ipoint,1,p) = tmpR(ipoint,1,p) + int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) + tmpR(ipoint,2,p) = tmpR(ipoint,2,p) + int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) + tmpR(ipoint,3,p) = tmpR(ipoint,3,p) + int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) enddo enddo enddo ! p @@ -192,45 +192,45 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, ! --- - allocate(tmp3(n_grid,5,mo_num)) - allocate(tmp4(n_grid,5,mo_num)) + allocate(tmpE(n_grid,5,n_mo)) + allocate(tmpF(n_grid,5,n_mo)) - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(p, i, j, ipoint) & - !$OMP SHARED(ne_b, n_grid, mo_num, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, wr1, & - !$OMP tmp_L, tmp_R, tmp_J, tmp_S, tmp3, tmp4) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, j, ipoint) & + !$OMP SHARED(ne_b, n_grid, n_mo, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, wr1, & + !$OMP tmpL, tmpR, tmpJ, tmpS, tmpE, tmpF) !$OMP DO - do p = 1, mo_num + do p = 1, n_mo do ipoint = 1, n_grid - tmp3(ipoint,1,p) = wr1(ipoint) * mos_l_in_r(ipoint,p) - tmp3(ipoint,2,p) = -2.d0 * (tmp_L(ipoint,1,p) * tmp_J(ipoint,1) + tmp_L(ipoint,2,p) * tmp_J(ipoint,2) + tmp_L(ipoint,3,p) * tmp_J(ipoint,3)) - tmp3(ipoint,3,p) = wr1(ipoint) * tmp_L(ipoint,1,p) - tmp3(ipoint,4,p) = wr1(ipoint) * tmp_L(ipoint,2,p) - tmp3(ipoint,5,p) = wr1(ipoint) * tmp_L(ipoint,3,p) + tmpE(ipoint,1,p) = wr1(ipoint) * mos_l_in_r(ipoint,p) + tmpE(ipoint,2,p) = -2.d0 * (tmpL(ipoint,1,p) * tmpJ(ipoint,1) + tmpL(ipoint,2,p) * tmpJ(ipoint,2) + tmpL(ipoint,3,p) * tmpJ(ipoint,3)) + tmpE(ipoint,3,p) = wr1(ipoint) * tmpL(ipoint,1,p) + tmpE(ipoint,4,p) = wr1(ipoint) * tmpL(ipoint,2,p) + tmpE(ipoint,5,p) = wr1(ipoint) * tmpL(ipoint,3,p) - tmp4(ipoint,1,p) = -2.d0 * (tmp_R(ipoint,1,p) * tmp_J(ipoint,1) + tmp_R(ipoint,2,p) * tmp_J(ipoint,2) + tmp_R(ipoint,3,p) * tmp_J(ipoint,3)) & - + mos_r_in_r(ipoint,p) * tmp_S(ipoint) - tmp4(ipoint,2,p) = wr1(ipoint) * mos_r_in_r(ipoint,p) - tmp4(ipoint,3,p) = tmp_R(ipoint,1,p) - tmp4(ipoint,4,p) = tmp_R(ipoint,2,p) - tmp4(ipoint,5,p) = tmp_R(ipoint,3,p) + tmpF(ipoint,1,p) = -2.d0 * (tmpR(ipoint,1,p) * tmpJ(ipoint,1) + tmpR(ipoint,2,p) * tmpJ(ipoint,2) + tmpR(ipoint,3,p) * tmpJ(ipoint,3)) & + + mos_r_in_r(ipoint,p) * tmpS(ipoint) + tmpF(ipoint,2,p) = wr1(ipoint) * mos_r_in_r(ipoint,p) + tmpF(ipoint,3,p) = tmpR(ipoint,1,p) + tmpF(ipoint,4,p) = tmpR(ipoint,2,p) + tmpF(ipoint,5,p) = tmpR(ipoint,3,p) enddo do i = 1, ne_b do j = 1, ne_b do ipoint = 1, n_grid - tmp3(ipoint,2,p) = tmp3(ipoint,2,p) + mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + tmpE(ipoint,2,p) = tmpE(ipoint,2,p) + mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) - tmp4(ipoint,1,p) = tmp4(ipoint,1,p) + mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + tmpF(ipoint,1,p) = tmpF(ipoint,1,p) + mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) enddo ! ipoint @@ -241,40 +241,40 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, !$OMP END DO !$OMP END PARALLEL - deallocate(tmp_L, tmp_R, tmp_J, tmp_S) + deallocate(tmpL, tmpR, tmpJ, tmpS) - call dgemm( 'T', 'N', mo_num, mo_num, 5*n_grid, 1.d0 & - , tmp3(1,1,1), 5*n_grid, tmp4(1,1,1), 5*n_grid & - , 1.d0, noL_1e(1,1), mo_num) + call dgemm( 'T', 'N', n_mo, n_mo, 5*n_grid, 1.d0 & + , tmpE(1,1,1), 5*n_grid, tmpF(1,1,1), 5*n_grid & + , 1.d0, noL_1e(1,1), n_mo) - deallocate(tmp3, tmp4) + deallocate(tmpE, tmpF) ! --- else - allocate(tmp_O(n_grid), tmp_J(n_grid,3)) - tmp_O = 0.d0 - tmp_J = 0.d0 + allocate(tmpO(n_grid), tmpJ(n_grid,3)) + tmpO = 0.d0 + tmpJ = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(i, ipoint, tmp_O_priv, tmp_J_priv) & - !$OMP SHARED(ne_b, ne_a, n_grid, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, tmp_O, tmp_J) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, ipoint, tmpO_priv, tmpJ_priv) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpO, tmpJ) - allocate(tmp_O_priv(n_grid), tmp_J_priv(n_grid,3)) - tmp_O_priv = 0.d0 - tmp_J_priv = 0.d0 + allocate(tmpO_priv(n_grid), tmpJ_priv(n_grid,3)) + tmpO_priv = 0.d0 + tmpJ_priv = 0.d0 !$OMP DO do i = 1, ne_b do ipoint = 1, n_grid - tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) - tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) - tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) - tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) + tmpO_priv(ipoint) = tmpO_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ_priv(ipoint,1) = tmpJ_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) + tmpJ_priv(ipoint,2) = tmpJ_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) + tmpJ_priv(ipoint,3) = tmpJ_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) enddo enddo !$OMP END DO NOWAIT @@ -282,51 +282,51 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, !$OMP DO do i = ne_b+1, ne_a do ipoint = 1, n_grid - tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + 0.5d0 * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) - tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,i) - tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,i) - tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,i) + tmpO_priv(ipoint) = tmpO_priv(ipoint) + 0.5d0 * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ_priv(ipoint,1) = tmpJ_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,i) + tmpJ_priv(ipoint,2) = tmpJ_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,i) + tmpJ_priv(ipoint,3) = tmpJ_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,i) enddo enddo !$OMP END DO NOWAIT !$OMP CRITICAL - tmp_O = tmp_O + tmp_O_priv - tmp_J = tmp_J + tmp_J_priv + tmpO = tmpO + tmpO_priv + tmpJ = tmpJ + tmpJ_priv !$OMP END CRITICAL - deallocate(tmp_O_priv, tmp_J_priv) + deallocate(tmpO_priv, tmpJ_priv) !$OMP END PARALLEL ! --- - allocate(tmp_M(n_grid,3), tmp_S(n_grid)) - tmp_M = 0.d0 - tmp_S = 0.d0 + allocate(tmpM(n_grid,3), tmpS(n_grid)) + tmpM = 0.d0 + tmpS = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(i, j, ipoint, tmp_M_priv, tmp_S_priv) & - !$OMP SHARED(ne_b, ne_a, n_grid, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, tmp_M, tmp_S) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, j, ipoint, tmpM_priv, tmpS_priv) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpM, tmpS) - allocate(tmp_M_priv(n_grid,3), tmp_S_priv(n_grid)) - tmp_M_priv = 0.d0 - tmp_S_priv = 0.d0 + allocate(tmpM_priv(n_grid,3), tmpS_priv(n_grid)) + tmpM_priv = 0.d0 + tmpS_priv = 0.d0 !$OMP DO COLLAPSE(2) do i = 1, ne_b do j = 1, ne_b do ipoint = 1, n_grid - tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & - + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & - + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + tmpS_priv(ipoint) = tmpS_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) enddo enddo enddo @@ -337,17 +337,17 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, do j = 1, ne_b do ipoint = 1, n_grid - tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) - tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) - tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) - tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & - + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & - + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + tmpS_priv(ipoint) = tmpS_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) enddo enddo enddo @@ -358,70 +358,70 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, do j = ne_b+1, ne_a do ipoint = 1, n_grid - tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & - + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & - + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + tmpS_priv(ipoint) = tmpS_priv(ipoint) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) enddo enddo enddo !$OMP END DO NOWAIT !$OMP CRITICAL - tmp_M = tmp_M + tmp_M_priv - tmp_S = tmp_S + tmp_S_priv + tmpM = tmpM + tmpM_priv + tmpS = tmpS + tmpS_priv !$OMP END CRITICAL - deallocate(tmp_M_priv, tmp_S_priv) + deallocate(tmpM_priv, tmpS_priv) !$OMP END PARALLEL ! --- - allocate(tmp2(n_grid,4)) - allocate(tmp1(n_grid,4,mo_num,mo_num)) + allocate(tmpC(n_grid,4,n_mo,n_mo)) + allocate(tmpD(n_grid,4)) do ipoint = 1, n_grid - tmp2(ipoint,1) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,1) - tmp_M(ipoint,1)) - tmp2(ipoint,2) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,2) - tmp_M(ipoint,2)) - tmp2(ipoint,3) = wr1(ipoint) * (2.d0 * tmp_O(ipoint) * tmp_J(ipoint,3) - tmp_M(ipoint,3)) - tmp2(ipoint,4) = -wr1(ipoint) * tmp_O(ipoint) + tmpD(ipoint,1) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,1) - tmpM(ipoint,1)) + tmpD(ipoint,2) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,2) - tmpM(ipoint,2)) + tmpD(ipoint,3) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,3) - tmpM(ipoint,3)) + tmpD(ipoint,4) = -wr1(ipoint) * tmpO(ipoint) - tmp_S(ipoint) = 2.d0 * (tmp_J(ipoint,1) * tmp_J(ipoint,1) + tmp_J(ipoint,2) * tmp_J(ipoint,2) + tmp_J(ipoint,3) * tmp_J(ipoint,3)) - tmp_S(ipoint) + tmpS(ipoint) = 2.d0 * (tmpJ(ipoint,1) * tmpJ(ipoint,1) + tmpJ(ipoint,2) * tmpJ(ipoint,2) + tmpJ(ipoint,3) * tmpJ(ipoint,3)) - tmpS(ipoint) enddo - deallocate(tmp_O, tmp_M) + deallocate(tmpO, tmpM) - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(p, s, i, ipoint) & - !$OMP SHARED(mo_num, ne_b, n_grid, & - !$OMP ne_a, int2_grad1_u12, tmp1) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, s, i, ipoint) & + !$OMP SHARED(n_mo, ne_b, n_grid, & + !$OMP ne_a, int2_grad1_u12, tmpC) !$OMP DO COLLAPSE(2) - do s = 1, mo_num - do p = 1, mo_num + do s = 1, n_mo + do p = 1, n_mo do ipoint = 1, n_grid - tmp1(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) - tmp1(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) - tmp1(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) + tmpC(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) + tmpC(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) + tmpC(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) enddo - tmp1(:,4,p,s) = 0.d0 + tmpC(:,4,p,s) = 0.d0 do i = 1, ne_b do ipoint = 1, n_grid - tmp1(ipoint,4,p,s) = tmp1(ipoint,4,p,s) + int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + tmpC(ipoint,4,p,s) = tmpC(ipoint,4,p,s) + int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) enddo enddo do i = ne_b+1, ne_a do ipoint = 1, n_grid - tmp1(ipoint,4,p,s) = tmp1(ipoint,4,p,s) + 0.5d0 * int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + tmpC(ipoint,4,p,s) = tmpC(ipoint,4,p,s) + 0.5d0 * int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + 0.5d0 * int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + 0.5d0 * int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) enddo @@ -432,55 +432,55 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, !$OMP END DO !$OMP END PARALLEL - call dgemv( 'T', 4*n_grid, mo_num*mo_num, 2.d0 & - , tmp1(1,1,1,1), size(tmp1, 1) * size(tmp1, 2) & - , tmp2(1,1), 1 & + call dgemv( 'T', 4*n_grid, n_mo*n_mo, 2.d0 & + , tmpC(1,1,1,1), size(tmpC, 1) * size(tmpC, 2) & + , tmpD(1,1), 1 & , 0.d0, noL_1e(1,1), 1) - deallocate(tmp1, tmp2) + deallocate(tmpC, tmpD) ! --- - allocate(tmp_L(n_grid,3,mo_num), tmp_L0(n_grid,3,mo_num)) - allocate(tmp_R(n_grid,3,mo_num), tmp_R0(n_grid,3,mo_num)) + allocate(tmpL(n_grid,3,n_mo), tmpL0(n_grid,3,n_mo)) + allocate(tmpR(n_grid,3,n_mo), tmpR0(n_grid,3,n_mo)) - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(p, i, ipoint) & - !$OMP SHARED(ne_b, ne_a, n_grid, mo_num, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, tmp_L0, tmp_R0, tmp_L, tmp_R) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, ipoint) & + !$OMP SHARED(ne_b, ne_a, n_grid, n_mo, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpL0, tmpR0, tmpL, tmpR) !$OMP DO - do p = 1, mo_num + do p = 1, n_mo - tmp_L0(:,1:3,p) = 0.d0 - tmp_R0(:,1:3,p) = 0.d0 + tmpL0(:,1:3,p) = 0.d0 + tmpR0(:,1:3,p) = 0.d0 do i = ne_b+1, ne_a do ipoint = 1, n_grid - tmp_L0(ipoint,1,p) = tmp_L0(ipoint,1,p) + 0.5d0 * int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) - tmp_L0(ipoint,2,p) = tmp_L0(ipoint,2,p) + 0.5d0 * int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) - tmp_L0(ipoint,3,p) = tmp_L0(ipoint,3,p) + 0.5d0 * int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) + tmpL0(ipoint,1,p) = tmpL0(ipoint,1,p) + 0.5d0 * int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) + tmpL0(ipoint,2,p) = tmpL0(ipoint,2,p) + 0.5d0 * int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) + tmpL0(ipoint,3,p) = tmpL0(ipoint,3,p) + 0.5d0 * int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) - tmp_R0(ipoint,1,p) = tmp_R0(ipoint,1,p) + 0.5d0 * int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) - tmp_R0(ipoint,2,p) = tmp_R0(ipoint,2,p) + 0.5d0 * int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) - tmp_R0(ipoint,3,p) = tmp_R0(ipoint,3,p) + 0.5d0 * int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) + tmpR0(ipoint,1,p) = tmpR0(ipoint,1,p) + 0.5d0 * int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) + tmpR0(ipoint,2,p) = tmpR0(ipoint,2,p) + 0.5d0 * int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) + tmpR0(ipoint,3,p) = tmpR0(ipoint,3,p) + 0.5d0 * int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) enddo enddo - tmp_L(:,1:3,p) = tmp_L0(:,1:3,p) - tmp_R(:,1:3,p) = tmp_R0(:,1:3,p) + tmpL(:,1:3,p) = tmpL0(:,1:3,p) + tmpR(:,1:3,p) = tmpR0(:,1:3,p) do i = 1, ne_b do ipoint = 1, n_grid - tmp_L(ipoint,1,p) = tmp_L(ipoint,1,p) + int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,2,p) = tmp_L(ipoint,2,p) + int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,3,p) = tmp_L(ipoint,3,p) + int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,1,p) = tmpL(ipoint,1,p) + int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,2,p) = tmpL(ipoint,2,p) + int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,3,p) = tmpL(ipoint,3,p) + int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) - tmp_R(ipoint,1,p) = tmp_R(ipoint,1,p) + int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,2,p) = tmp_R(ipoint,2,p) + int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,3,p) = tmp_R(ipoint,3,p) + int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) + tmpR(ipoint,1,p) = tmpR(ipoint,1,p) + int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) + tmpR(ipoint,2,p) = tmpR(ipoint,2,p) + int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) + tmpR(ipoint,3,p) = tmpR(ipoint,3,p) + int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) enddo enddo @@ -490,51 +490,51 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, ! --- - allocate(tmp3(n_grid,8,mo_num)) - allocate(tmp4(n_grid,8,mo_num)) + allocate(tmpE(n_grid,8,n_mo)) + allocate(tmpF(n_grid,8,n_mo)) - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(p, i, j, ipoint) & - !$OMP SHARED(ne_b, ne_a, n_grid, mo_num, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, wr1, & - !$OMP tmp_L, tmp_L0, tmp_R, tmp_R0, tmp_J, tmp_S, tmp3, tmp4) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, j, ipoint) & + !$OMP SHARED(ne_b, ne_a, n_grid, n_mo, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, wr1, & + !$OMP tmpL, tmpL0, tmpR, tmpR0, tmpJ, tmpS, tmpE, tmpF) !$OMP DO - do p = 1, mo_num + do p = 1, n_mo do ipoint = 1, n_grid - tmp3(ipoint,1,p) = wr1(ipoint) * mos_l_in_r(ipoint,p) - tmp3(ipoint,2,p) = -2.d0 * (tmp_L(ipoint,1,p) * tmp_J(ipoint,1) + tmp_L(ipoint,2,p) * tmp_J(ipoint,2) + tmp_L(ipoint,3,p) * tmp_J(ipoint,3)) - tmp3(ipoint,3,p) = wr1(ipoint) * tmp_L(ipoint,1,p) - tmp3(ipoint,4,p) = wr1(ipoint) * tmp_L(ipoint,2,p) - tmp3(ipoint,5,p) = wr1(ipoint) * tmp_L(ipoint,3,p) - tmp3(ipoint,6,p) = wr1(ipoint) * tmp_L0(ipoint,1,p) - tmp3(ipoint,7,p) = wr1(ipoint) * tmp_L0(ipoint,2,p) - tmp3(ipoint,8,p) = wr1(ipoint) * tmp_L0(ipoint,3,p) + tmpE(ipoint,1,p) = wr1(ipoint) * mos_l_in_r(ipoint,p) + tmpE(ipoint,2,p) = -2.d0 * (tmpL(ipoint,1,p) * tmpJ(ipoint,1) + tmpL(ipoint,2,p) * tmpJ(ipoint,2) + tmpL(ipoint,3,p) * tmpJ(ipoint,3)) + tmpE(ipoint,3,p) = wr1(ipoint) * tmpL(ipoint,1,p) + tmpE(ipoint,4,p) = wr1(ipoint) * tmpL(ipoint,2,p) + tmpE(ipoint,5,p) = wr1(ipoint) * tmpL(ipoint,3,p) + tmpE(ipoint,6,p) = wr1(ipoint) * tmpL0(ipoint,1,p) + tmpE(ipoint,7,p) = wr1(ipoint) * tmpL0(ipoint,2,p) + tmpE(ipoint,8,p) = wr1(ipoint) * tmpL0(ipoint,3,p) - tmp4(ipoint,1,p) = -2.d0 * (tmp_R(ipoint,1,p) * tmp_J(ipoint,1) + tmp_R(ipoint,2,p) * tmp_J(ipoint,2) + tmp_R(ipoint,3,p) * tmp_J(ipoint,3)) & - + mos_r_in_r(ipoint,p) * tmp_S(ipoint) - tmp4(ipoint,2,p) = wr1(ipoint) * mos_r_in_r(ipoint,p) - tmp4(ipoint,3,p) = tmp_R(ipoint,1,p) - tmp4(ipoint,4,p) = tmp_R(ipoint,2,p) - tmp4(ipoint,5,p) = tmp_R(ipoint,3,p) - tmp4(ipoint,6,p) = tmp_R0(ipoint,1,p) - tmp4(ipoint,7,p) = tmp_R0(ipoint,2,p) - tmp4(ipoint,8,p) = tmp_R0(ipoint,3,p) + tmpF(ipoint,1,p) = -2.d0 * (tmpR(ipoint,1,p) * tmpJ(ipoint,1) + tmpR(ipoint,2,p) * tmpJ(ipoint,2) + tmpR(ipoint,3,p) * tmpJ(ipoint,3)) & + + mos_r_in_r(ipoint,p) * tmpS(ipoint) + tmpF(ipoint,2,p) = wr1(ipoint) * mos_r_in_r(ipoint,p) + tmpF(ipoint,3,p) = tmpR(ipoint,1,p) + tmpF(ipoint,4,p) = tmpR(ipoint,2,p) + tmpF(ipoint,5,p) = tmpR(ipoint,3,p) + tmpF(ipoint,6,p) = tmpR0(ipoint,1,p) + tmpF(ipoint,7,p) = tmpR0(ipoint,2,p) + tmpF(ipoint,8,p) = tmpR0(ipoint,3,p) enddo do i = 1, ne_b do j = 1, ne_b do ipoint = 1, n_grid - tmp3(ipoint,2,p) = tmp3(ipoint,2,p) + mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + tmpE(ipoint,2,p) = tmpE(ipoint,2,p) + mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) - tmp4(ipoint,1,p) = tmp4(ipoint,1,p) + mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + tmpF(ipoint,1,p) = tmpF(ipoint,1,p) + mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) enddo ! ipoint @@ -545,17 +545,17 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, do j = 1, ne_b do ipoint = 1, n_grid - tmp3(ipoint,2,p) = tmp3(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + tmpE(ipoint,2,p) = tmpE(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) - tmp3(ipoint,2,p) = tmp3(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,p,j) * int2_grad1_u12(ipoint,1,j,i) & + tmpE(ipoint,2,p) = tmpE(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,p,j) * int2_grad1_u12(ipoint,1,j,i) & + int2_grad1_u12(ipoint,2,p,j) * int2_grad1_u12(ipoint,2,j,i) & + int2_grad1_u12(ipoint,3,p,j) * int2_grad1_u12(ipoint,3,j,i) ) - tmp4(ipoint,1,p) = tmp4(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + tmpF(ipoint,1,p) = tmpF(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) - tmp4(ipoint,1,p) = tmp4(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,j,i) * int2_grad1_u12(ipoint,1,i,p) & + tmpF(ipoint,1,p) = tmpF(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,j,i) * int2_grad1_u12(ipoint,1,i,p) & + int2_grad1_u12(ipoint,2,j,i) * int2_grad1_u12(ipoint,2,i,p) & + int2_grad1_u12(ipoint,3,j,i) * int2_grad1_u12(ipoint,3,i,p) ) enddo ! ipoint @@ -566,11 +566,11 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, do j = ne_b+1, ne_a do ipoint = 1, n_grid - tmp3(ipoint,2,p) = tmp3(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + tmpE(ipoint,2,p) = tmpE(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) - tmp4(ipoint,1,p) = tmp4(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + tmpF(ipoint,1,p) = tmpF(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) enddo ! ipoint @@ -581,13 +581,590 @@ subroutine provide_no_1e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, !$OMP END DO !$OMP END PARALLEL - deallocate(tmp_L0, tmp_L, tmp_R0, tmp_R, tmp_J, tmp_S) + deallocate(tmpL0, tmpL, tmpR0, tmpR, tmpJ, tmpS) - call dgemm( 'T', 'N', mo_num, mo_num, 8*n_grid, 1.d0 & - , tmp3(1,1,1), 8*n_grid, tmp4(1,1,1), 8*n_grid & - , 1.d0, noL_1e(1,1), mo_num) + call dgemm( 'T', 'N', n_mo, n_mo, 8*n_grid, 1.d0 & + , tmpE(1,1,1), 8*n_grid, tmpF(1,1,1), 8*n_grid & + , 1.d0, noL_1e(1,1), n_mo) - deallocate(tmp3, tmp4) + deallocate(tmpE, tmpF) + + endif + + + call wall_time(t1) + write(*,"(A,2X,F15.7)") ' wall time for noL_1e (sec) = ', (t1 - t0) + + return +end + +! --- + +subroutine provide_no_1e_tmp(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & + tmpO, tmpJ, tmpM, tmpS, tmpC, tmpD, tmpL, tmpR, tmpE, tmpF, noL_1e) + + + implicit none + + integer, intent(in) :: n_grid, n_mo + integer, intent(in) :: ne_a, ne_b + double precision, intent(in) :: wr1(n_grid) + double precision, intent(in) :: mos_l_in_r(n_grid,n_mo) + double precision, intent(in) :: mos_r_in_r(n_grid,n_mo) + double precision, intent(in) :: int2_grad1_u12(n_grid,3,n_mo,n_mo) + double precision, intent(out) :: tmpO(n_grid), tmpJ(n_grid,3) + double precision, intent(out) :: tmpM(n_grid,3), tmpS(n_grid) + double precision, intent(out) :: tmpC(n_grid,4,n_mo,n_mo), tmpD(n_grid,4) + double precision, intent(out) :: tmpL(n_grid,3,n_mo), tmpR(n_grid,3,n_mo) + double precision, intent(out) :: tmpE(n_grid,5,n_mo), tmpF(n_grid,5,n_mo) + double precision, intent(out) :: noL_1e(n_mo,n_mo) + + integer :: p, s, i, j, ipoint + double precision :: t0, t1 + double precision, allocatable :: tmpM_priv(:,:), tmpS_priv(:), tmpO_priv(:), tmpJ_priv(:,:) + double precision, allocatable :: tmpL0(:,:,:), tmpR0(:,:,:) + double precision, allocatable :: tmpE_os(:,:,:), tmpF_os(:,:,:) + + + call wall_time(t0) + + + if(ne_a .eq. ne_b) then + + tmpO = 0.d0 + tmpJ = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, ipoint, tmpO_priv, tmpJ_priv) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpO, tmpJ) + + allocate(tmpO_priv(n_grid), tmpJ_priv(n_grid,3)) + tmpO_priv = 0.d0 + tmpJ_priv = 0.d0 + + !$OMP DO + do i = 1, ne_b + do ipoint = 1, n_grid + tmpO_priv(ipoint) = tmpO_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ_priv(ipoint,1) = tmpJ_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) + tmpJ_priv(ipoint,2) = tmpJ_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) + tmpJ_priv(ipoint,3) = tmpJ_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmpO = tmpO + tmpO_priv + tmpJ = tmpJ + tmpJ_priv + !$OMP END CRITICAL + + deallocate(tmpO_priv, tmpJ_priv) + !$OMP END PARALLEL + + ! --- + + tmpM = 0.d0 + tmpS = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, j, ipoint, tmpM_priv, tmpS_priv) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpM, tmpS) + + allocate(tmpM_priv(n_grid,3), tmpS_priv(n_grid)) + tmpM_priv = 0.d0 + tmpS_priv = 0.d0 + + !$OMP DO COLLAPSE(2) + do i = 1, ne_b + do j = 1, ne_b + do ipoint = 1, n_grid + + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmpS_priv(ipoint) = tmpS_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmpM = tmpM + tmpM_priv + tmpS = tmpS + tmpS_priv + !$OMP END CRITICAL + + deallocate(tmpM_priv, tmpS_priv) + !$OMP END PARALLEL + + ! --- + + do ipoint = 1, n_grid + + tmpD(ipoint,1) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,1) - tmpM(ipoint,1)) + tmpD(ipoint,2) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,2) - tmpM(ipoint,2)) + tmpD(ipoint,3) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,3) - tmpM(ipoint,3)) + tmpD(ipoint,4) = -wr1(ipoint) * tmpO(ipoint) + + tmpS(ipoint) = 2.d0 * (tmpJ(ipoint,1) * tmpJ(ipoint,1) + tmpJ(ipoint,2) * tmpJ(ipoint,2) + tmpJ(ipoint,3) * tmpJ(ipoint,3)) - tmpS(ipoint) + enddo + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, s, i, ipoint) & + !$OMP SHARED(n_mo, ne_b, n_grid, & + !$OMP int2_grad1_u12, tmpC) + + !$OMP DO COLLAPSE(2) + do s = 1, n_mo + do p = 1, n_mo + + do ipoint = 1, n_grid + tmpC(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) + tmpC(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) + tmpC(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) + enddo + + tmpC(:,4,p,s) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpC(ipoint,4,p,s) = tmpC(ipoint,4,p,s) + int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo + enddo + + enddo ! p + enddo ! s + !$OMP END DO + !$OMP END PARALLEL + + call dgemv( 'T', 4*n_grid, n_mo*n_mo, 2.d0 & + , tmpC(1,1,1,1), size(tmpC, 1) * size(tmpC, 2) & + , tmpD(1,1), 1 & + , 0.d0, noL_1e(1,1), 1) + + ! --- + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, ipoint) & + !$OMP SHARED(ne_b, n_grid, n_mo, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpL, tmpR) + + !$OMP DO + do p = 1, n_mo + + tmpL(:,1:3,p) = 0.d0 + tmpR(:,1:3,p) = 0.d0 + + do i = 1, ne_b + do ipoint = 1, n_grid + + tmpL(ipoint,1,p) = tmpL(ipoint,1,p) + int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,2,p) = tmpL(ipoint,2,p) + int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,3,p) = tmpL(ipoint,3,p) + int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) + + tmpR(ipoint,1,p) = tmpR(ipoint,1,p) + int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) + tmpR(ipoint,2,p) = tmpR(ipoint,2,p) + int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) + tmpR(ipoint,3,p) = tmpR(ipoint,3,p) + int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) + enddo + enddo + enddo ! p + !$OMP END DO + !$OMP END PARALLEL + + ! --- + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, j, ipoint) & + !$OMP SHARED(ne_b, n_grid, n_mo, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, wr1, & + !$OMP tmpL, tmpR, tmpJ, tmpS, tmpE, tmpF) + + !$OMP DO + do p = 1, n_mo + + do ipoint = 1, n_grid + + tmpE(ipoint,1,p) = wr1(ipoint) * mos_l_in_r(ipoint,p) + tmpE(ipoint,2,p) = -2.d0 * (tmpL(ipoint,1,p) * tmpJ(ipoint,1) + tmpL(ipoint,2,p) * tmpJ(ipoint,2) + tmpL(ipoint,3,p) * tmpJ(ipoint,3)) + tmpE(ipoint,3,p) = wr1(ipoint) * tmpL(ipoint,1,p) + tmpE(ipoint,4,p) = wr1(ipoint) * tmpL(ipoint,2,p) + tmpE(ipoint,5,p) = wr1(ipoint) * tmpL(ipoint,3,p) + + tmpF(ipoint,1,p) = -2.d0 * (tmpR(ipoint,1,p) * tmpJ(ipoint,1) + tmpR(ipoint,2,p) * tmpJ(ipoint,2) + tmpR(ipoint,3,p) * tmpJ(ipoint,3)) & + + mos_r_in_r(ipoint,p) * tmpS(ipoint) + tmpF(ipoint,2,p) = wr1(ipoint) * mos_r_in_r(ipoint,p) + tmpF(ipoint,3,p) = tmpR(ipoint,1,p) + tmpF(ipoint,4,p) = tmpR(ipoint,2,p) + tmpF(ipoint,5,p) = tmpR(ipoint,3,p) + enddo + + do i = 1, ne_b + do j = 1, ne_b + do ipoint = 1, n_grid + + tmpE(ipoint,2,p) = tmpE(ipoint,2,p) + mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) + + tmpF(ipoint,1,p) = tmpF(ipoint,1,p) + mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) + enddo ! ipoint + enddo ! j + enddo ! i + + enddo ! p + !$OMP END DO + !$OMP END PARALLEL + + call dgemm( 'T', 'N', n_mo, n_mo, 5*n_grid, 1.d0 & + , tmpE(1,1,1), 5*n_grid, tmpF(1,1,1), 5*n_grid & + , 1.d0, noL_1e(1,1), n_mo) + + ! --- + + else + + tmpO = 0.d0 + tmpJ = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, ipoint, tmpO_priv, tmpJ_priv) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpO, tmpJ) + + allocate(tmpO_priv(n_grid), tmpJ_priv(n_grid,3)) + tmpO_priv = 0.d0 + tmpJ_priv = 0.d0 + + !$OMP DO + do i = 1, ne_b + do ipoint = 1, n_grid + tmpO_priv(ipoint) = tmpO_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ_priv(ipoint,1) = tmpJ_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) + tmpJ_priv(ipoint,2) = tmpJ_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) + tmpJ_priv(ipoint,3) = tmpJ_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP DO + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmpO_priv(ipoint) = tmpO_priv(ipoint) + 0.5d0 * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ_priv(ipoint,1) = tmpJ_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,i) + tmpJ_priv(ipoint,2) = tmpJ_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,i) + tmpJ_priv(ipoint,3) = tmpJ_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,i) + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmpO = tmpO + tmpO_priv + tmpJ = tmpJ + tmpJ_priv + !$OMP END CRITICAL + + deallocate(tmpO_priv, tmpJ_priv) + !$OMP END PARALLEL + + ! --- + + tmpM = 0.d0 + tmpS = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, j, ipoint, tmpM_priv, tmpS_priv) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpM, tmpS) + + allocate(tmpM_priv(n_grid,3), tmpS_priv(n_grid)) + tmpM_priv = 0.d0 + tmpS_priv = 0.d0 + + !$OMP DO COLLAPSE(2) + do i = 1, ne_b + do j = 1, ne_b + do ipoint = 1, n_grid + + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmpS_priv(ipoint) = tmpS_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP DO COLLAPSE(2) + do i = ne_b+1, ne_a + do j = 1, ne_b + do ipoint = 1, n_grid + + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + + tmpS_priv(ipoint) = tmpS_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP DO COLLAPSE(2) + do i = ne_b+1, ne_a + do j = ne_b+1, ne_a + do ipoint = 1, n_grid + + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + + tmpS_priv(ipoint) = tmpS_priv(ipoint) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + enddo + enddo + enddo + !$OMP END DO NOWAIT + + !$OMP CRITICAL + tmpM = tmpM + tmpM_priv + tmpS = tmpS + tmpS_priv + !$OMP END CRITICAL + + deallocate(tmpM_priv, tmpS_priv) + !$OMP END PARALLEL + + ! --- + + do ipoint = 1, n_grid + + tmpD(ipoint,1) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,1) - tmpM(ipoint,1)) + tmpD(ipoint,2) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,2) - tmpM(ipoint,2)) + tmpD(ipoint,3) = wr1(ipoint) * (2.d0 * tmpO(ipoint) * tmpJ(ipoint,3) - tmpM(ipoint,3)) + tmpD(ipoint,4) = -wr1(ipoint) * tmpO(ipoint) + + tmpS(ipoint) = 2.d0 * (tmpJ(ipoint,1) * tmpJ(ipoint,1) + tmpJ(ipoint,2) * tmpJ(ipoint,2) + tmpJ(ipoint,3) * tmpJ(ipoint,3)) - tmpS(ipoint) + enddo + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, s, i, ipoint) & + !$OMP SHARED(n_mo, ne_b, n_grid, & + !$OMP ne_a, int2_grad1_u12, tmpC) + + !$OMP DO COLLAPSE(2) + do s = 1, n_mo + do p = 1, n_mo + + do ipoint = 1, n_grid + tmpC(ipoint,1,p,s) = int2_grad1_u12(ipoint,1,p,s) + tmpC(ipoint,2,p,s) = int2_grad1_u12(ipoint,2,p,s) + tmpC(ipoint,3,p,s) = int2_grad1_u12(ipoint,3,p,s) + enddo + + tmpC(:,4,p,s) = 0.d0 + do i = 1, ne_b + do ipoint = 1, n_grid + tmpC(ipoint,4,p,s) = tmpC(ipoint,4,p,s) + int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo + enddo + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + tmpC(ipoint,4,p,s) = tmpC(ipoint,4,p,s) + 0.5d0 * int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,s) & + + 0.5d0 * int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,s) & + + 0.5d0 * int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,s) + enddo + enddo + + enddo ! p + enddo ! s + !$OMP END DO + !$OMP END PARALLEL + + call dgemv( 'T', 4*n_grid, n_mo*n_mo, 2.d0 & + , tmpC(1,1,1,1), size(tmpC, 1) * size(tmpC, 2) & + , tmpD(1,1), 1 & + , 0.d0, noL_1e(1,1), 1) + + ! --- + + allocate(tmpL0(n_grid,3,n_mo)) + allocate(tmpR0(n_grid,3,n_mo)) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, ipoint) & + !$OMP SHARED(ne_b, ne_a, n_grid, n_mo, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpL0, tmpR0, tmpL, tmpR) + + !$OMP DO + do p = 1, n_mo + + tmpL0(:,1:3,p) = 0.d0 + tmpR0(:,1:3,p) = 0.d0 + do i = ne_b+1, ne_a + do ipoint = 1, n_grid + + tmpL0(ipoint,1,p) = tmpL0(ipoint,1,p) + 0.5d0 * int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) + tmpL0(ipoint,2,p) = tmpL0(ipoint,2,p) + 0.5d0 * int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) + tmpL0(ipoint,3,p) = tmpL0(ipoint,3,p) + 0.5d0 * int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) + + tmpR0(ipoint,1,p) = tmpR0(ipoint,1,p) + 0.5d0 * int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) + tmpR0(ipoint,2,p) = tmpR0(ipoint,2,p) + 0.5d0 * int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) + tmpR0(ipoint,3,p) = tmpR0(ipoint,3,p) + 0.5d0 * int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) + enddo + enddo + + tmpL(:,1:3,p) = tmpL0(:,1:3,p) + tmpR(:,1:3,p) = tmpR0(:,1:3,p) + do i = 1, ne_b + do ipoint = 1, n_grid + + tmpL(ipoint,1,p) = tmpL(ipoint,1,p) + int2_grad1_u12(ipoint,1,p,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,2,p) = tmpL(ipoint,2,p) + int2_grad1_u12(ipoint,2,p,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,3,p) = tmpL(ipoint,3,p) + int2_grad1_u12(ipoint,3,p,i) * mos_l_in_r(ipoint,i) + + tmpR(ipoint,1,p) = tmpR(ipoint,1,p) + int2_grad1_u12(ipoint,1,i,p) * mos_r_in_r(ipoint,i) + tmpR(ipoint,2,p) = tmpR(ipoint,2,p) + int2_grad1_u12(ipoint,2,i,p) * mos_r_in_r(ipoint,i) + tmpR(ipoint,3,p) = tmpR(ipoint,3,p) + int2_grad1_u12(ipoint,3,i,p) * mos_r_in_r(ipoint,i) + enddo + enddo + + enddo ! p + !$OMP END DO + !$OMP END PARALLEL + + ! --- + + allocate(tmpE_os(n_grid,8,n_mo)) + allocate(tmpF_os(n_grid,8,n_mo)) + + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(p, i, j, ipoint) & + !$OMP SHARED(ne_b, ne_a, n_grid, n_mo, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, wr1, & + !$OMP tmpL, tmpL0, tmpR, tmpR0, tmpJ, tmpS, tmpE_os, tmpF_os) + + !$OMP DO + do p = 1, n_mo + + do ipoint = 1, n_grid + + tmpE_os(ipoint,1,p) = wr1(ipoint) * mos_l_in_r(ipoint,p) + tmpE_os(ipoint,2,p) = -2.d0 * (tmpL(ipoint,1,p) * tmpJ(ipoint,1) + tmpL(ipoint,2,p) * tmpJ(ipoint,2) + tmpL(ipoint,3,p) * tmpJ(ipoint,3)) + tmpE_os(ipoint,3,p) = wr1(ipoint) * tmpL(ipoint,1,p) + tmpE_os(ipoint,4,p) = wr1(ipoint) * tmpL(ipoint,2,p) + tmpE_os(ipoint,5,p) = wr1(ipoint) * tmpL(ipoint,3,p) + tmpE_os(ipoint,6,p) = wr1(ipoint) * tmpL0(ipoint,1,p) + tmpE_os(ipoint,7,p) = wr1(ipoint) * tmpL0(ipoint,2,p) + tmpE_os(ipoint,8,p) = wr1(ipoint) * tmpL0(ipoint,3,p) + + tmpF_os(ipoint,1,p) = -2.d0 * (tmpR(ipoint,1,p) * tmpJ(ipoint,1) + tmpR(ipoint,2,p) * tmpJ(ipoint,2) + tmpR(ipoint,3,p) * tmpJ(ipoint,3)) & + + mos_r_in_r(ipoint,p) * tmpS(ipoint) + tmpF_os(ipoint,2,p) = wr1(ipoint) * mos_r_in_r(ipoint,p) + tmpF_os(ipoint,3,p) = tmpR(ipoint,1,p) + tmpF_os(ipoint,4,p) = tmpR(ipoint,2,p) + tmpF_os(ipoint,5,p) = tmpR(ipoint,3,p) + tmpF_os(ipoint,6,p) = tmpR0(ipoint,1,p) + tmpF_os(ipoint,7,p) = tmpR0(ipoint,2,p) + tmpF_os(ipoint,8,p) = tmpR0(ipoint,3,p) + enddo + + do i = 1, ne_b + do j = 1, ne_b + do ipoint = 1, n_grid + + tmpE_os(ipoint,2,p) = tmpE_os(ipoint,2,p) + mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) + + tmpF_os(ipoint,1,p) = tmpF_os(ipoint,1,p) + mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) + enddo ! ipoint + enddo ! j + enddo ! i + + do i = ne_b+1, ne_a + do j = 1, ne_b + do ipoint = 1, n_grid + + tmpE_os(ipoint,2,p) = tmpE_os(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) + tmpE_os(ipoint,2,p) = tmpE_os(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,p,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,p,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,p,j) * int2_grad1_u12(ipoint,3,j,i) ) + + tmpF_os(ipoint,1,p) = tmpF_os(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) + tmpF_os(ipoint,1,p) = tmpF_os(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,j,i) * int2_grad1_u12(ipoint,1,i,p) & + + int2_grad1_u12(ipoint,2,j,i) * int2_grad1_u12(ipoint,2,i,p) & + + int2_grad1_u12(ipoint,3,j,i) * int2_grad1_u12(ipoint,3,i,p) ) + enddo ! ipoint + enddo ! j + enddo ! i + + do i = ne_b+1, ne_a + do j = ne_b+1, ne_a + do ipoint = 1, n_grid + + tmpE_os(ipoint,2,p) = tmpE_os(ipoint,2,p) + 0.5d0 * mos_l_in_r(ipoint,j) * ( int2_grad1_u12(ipoint,1,p,i) * int2_grad1_u12(ipoint,1,i,j) & + + int2_grad1_u12(ipoint,2,p,i) * int2_grad1_u12(ipoint,2,i,j) & + + int2_grad1_u12(ipoint,3,p,i) * int2_grad1_u12(ipoint,3,i,j) ) + + tmpF_os(ipoint,1,p) = tmpF_os(ipoint,1,p) + 0.5d0 * mos_r_in_r(ipoint,i) * ( int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,p) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,p) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,p) ) + enddo ! ipoint + enddo ! j + enddo ! i + + enddo ! p + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmpL0, tmpR0) + + call dgemm( 'T', 'N', n_mo, n_mo, 8*n_grid, 1.d0 & + , tmpE_os(1,1,1), 8*n_grid, tmpF_os(1,1,1), 8*n_grid & + , 1.d0, noL_1e(1,1), n_mo) + + deallocate(tmpE_os, tmpF_os) endif From bf15b68b0b64a229812dc4eeb2ef498fe56b194c Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Tue, 13 Aug 2024 10:57:44 +0200 Subject: [PATCH 17/19] add normal-ordering with CuTC --- plugins/local/tc_int/cutc_module.F90 | 58 +++- plugins/local/tc_int/deb_no_0e_gpu.irp.f | 96 ++++++ plugins/local/tc_int/deb_no_gpu.irp.f | 170 ++++++++++ plugins/local/tc_int/deb_tc_int_cuda.irp.f | 6 +- plugins/local/tc_int/no_0e.irp.f | 331 +++++++++---------- plugins/local/tc_int/write_tc_int_cuda.irp.f | 12 +- 6 files changed, 490 insertions(+), 183 deletions(-) create mode 100644 plugins/local/tc_int/deb_no_0e_gpu.irp.f create mode 100644 plugins/local/tc_int/deb_no_gpu.irp.f diff --git a/plugins/local/tc_int/cutc_module.F90 b/plugins/local/tc_int/cutc_module.F90 index d1466697..eaf271e5 100644 --- a/plugins/local/tc_int/cutc_module.F90 +++ b/plugins/local/tc_int/cutc_module.F90 @@ -9,13 +9,13 @@ module cutc_module ! --- - subroutine cutc_int_c(nxBlocks, nyBlocks, nzBlocks, & - blockxSize, blockySize, blockzSize, & - n_grid1, n_grid2, n_ao, n_nuc, size_bh, & - r1, wr1, r2, wr2, rn, & - aos_data1, aos_data2, & - c_bh, m_bh, n_bh, o_bh, & - int2_grad1_u12_ao, int_2e_ao) bind(C, name = "cutc_int_c") + subroutine cutc_int(nxBlocks, nyBlocks, nzBlocks, & + blockxSize, blockySize, blockzSize, & + n_grid1, n_grid2, n_ao, n_nuc, size_bh, & + r1, wr1, r2, wr2, rn, & + aos_data1, aos_data2, & + c_bh, m_bh, n_bh, o_bh, & + int2_grad1_u12_ao, int_2e_ao) bind(C, name = "cutc_int") import c_int, c_double, c_ptr integer(c_int), intent(in), value :: nxBlocks, blockxSize @@ -37,7 +37,7 @@ module cutc_module real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,3) real(c_double), intent(out) :: int_2e_ao(n_ao,n_ao,n_ao,n_ao) - end subroutine cutc_int_c + end subroutine cutc_int ! --- @@ -170,6 +170,48 @@ module cutc_module ! --- + subroutine cutc_no_0e(n_grid1, n_mo, ne_a, ne_b, & + wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & + no_0e) bind(C, name = "cutc_no_0e") + + import c_int, c_double, c_ptr + + integer(c_int), intent(in), value :: n_grid1 + integer(c_int), intent(in), value :: n_mo + integer(c_int), intent(in), value :: ne_a + integer(c_int), intent(in), value :: ne_b + real(c_double), intent(in) :: wr1(n_grid1) + real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) + real(c_double), intent(out) :: no_0e(1) + + end subroutine cutc_no_0e + + ! --- + + subroutine cutc_no(n_grid1, n_mo, ne_a, ne_b, & + wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & + no_2e, no_1e, no_0e) bind(C, name = "cutc_no") + + import c_int, c_double, c_ptr + + integer(c_int), intent(in), value :: n_grid1 + integer(c_int), intent(in), value :: n_mo + integer(c_int), intent(in), value :: ne_a + integer(c_int), intent(in), value :: ne_b + real(c_double), intent(in) :: wr1(n_grid1) + real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) + real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) + real(c_double), intent(out) :: no_2e(n_mo,n_mo,n_mo,n_mo) + real(c_double), intent(out) :: no_1e(n_mo,n_mo) + real(c_double), intent(out) :: no_0e(1) + + end subroutine cutc_no + + ! --- + end interface end module cutc_module diff --git a/plugins/local/tc_int/deb_no_0e_gpu.irp.f b/plugins/local/tc_int/deb_no_0e_gpu.irp.f new file mode 100644 index 00000000..afff060a --- /dev/null +++ b/plugins/local/tc_int/deb_no_0e_gpu.irp.f @@ -0,0 +1,96 @@ + +! --- + +subroutine deb_no_0e_gpu() + + use cutc_module + + implicit none + + integer :: i, j, k, l, ipoint + double precision :: acc_thr, err_tot, nrm_tot, err_loc + double precision :: noL_0e + double precision :: noL_0e_gpu(1) + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: tmp(:,:,:,:) + double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) + + + PROVIDE mo_l_coef mo_r_coef + PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp + + + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) + print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") + read(11) int2_grad1_u12_ao + close(11) + + allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) + allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(int2_grad1_u12_ao) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) + !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + do i = 1, mo_num + do j = 1, mo_num + int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) + int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) + int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmp) + + ! --- + + call cutc_no_0e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_0e_gpu(1)) + + ! --- + + call provide_no_0e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_0e) + + ! --- + + deallocate(int2_grad1_u12_bimo_t) + + print *, 'noL_0e CPU = ', noL_0e + print *, 'noL_0e GPU = ', noL_0e_gpu(1) + + err_tot = dabs(noL_0e - noL_0e_gpu(1)) + nrm_tot = dabs(noL_0e) + print *, ' absolute accuracy on noL_0e (%) =', 100.d0 * err_tot / nrm_tot + + return + +end + +! --- + diff --git a/plugins/local/tc_int/deb_no_gpu.irp.f b/plugins/local/tc_int/deb_no_gpu.irp.f new file mode 100644 index 00000000..e14404e6 --- /dev/null +++ b/plugins/local/tc_int/deb_no_gpu.irp.f @@ -0,0 +1,170 @@ + +! --- + +subroutine deb_no_gpu() + + use cutc_module + + implicit none + + integer :: i, j, k, l, ipoint + double precision :: acc_thr, err_tot, nrm_tot, err_loc + double precision :: noL_0e + double precision :: noL_0e_gpu(1) + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: tmp(:,:,:,:) + double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) + double precision, allocatable :: noL_1e (:,:) + double precision, allocatable :: noL_1e_gpu(:,:) + double precision, allocatable :: noL_2e (:,:,:,:) + double precision, allocatable :: noL_2e_gpu(:,:,:,:) + + + PROVIDE mo_l_coef mo_r_coef + PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp + + + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) + print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") + read(11) int2_grad1_u12_ao + close(11) + + allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) + allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(int2_grad1_u12_ao) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) + !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + do i = 1, mo_num + do j = 1, mo_num + int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) + int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) + int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(tmp) + + ! --- + + allocate(noL_2e_gpu(mo_num,mo_num,mo_num,mo_num)) + allocate(noL_1e_gpu(mo_num,mo_num)) + + call cutc_no(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_2e_gpu(1,1,1,1), noL_1e_gpu(1,1), noL_0e_gpu(1)) + + ! --- + + allocate(noL_2e(mo_num,mo_num,mo_num,mo_num)) + allocate(noL_1e(mo_num,mo_num)) + + call provide_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_2e(1,1,1,1)) + + call provide_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_1e(1,1)) + + call provide_no_0e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_0e) + + ! --- + + deallocate(int2_grad1_u12_bimo_t) + + acc_thr = 1d-12 + + ! --- + + err_tot = 0.d0 + nrm_tot = 0.d0 + do i = 1, mo_num + do j = 1, mo_num + do k = 1, mo_num + do l = 1, mo_num + err_loc = dabs(noL_2e(l,k,j,i) - noL_2e_gpu(l,k,j,i)) + if(err_loc > acc_thr) then + print*, " error on", l, k, j, i + print*, " CPU res", noL_2e (l,k,j,i) + print*, " GPU res", noL_2e_gpu(l,k,j,i) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(noL_2e(l,k,j,i)) + enddo + enddo + enddo + enddo + print *, ' absolute accuracy on noL_2e (%) =', 100.d0 * err_tot / nrm_tot + + deallocate(noL_2e) + deallocate(noL_2e_gpu) + + ! --- + + err_tot = 0.d0 + nrm_tot = 0.d0 + do k = 1, mo_num + do l = 1, mo_num + err_loc = dabs(noL_1e(l,k) - noL_1e_gpu(l,k)) + if(err_loc > acc_thr) then + print*, " error on", l, k + print*, " CPU res", noL_1e (l,k) + print*, " GPU res", noL_1e_gpu(l,k) + stop + endif + err_tot = err_tot + err_loc + nrm_tot = nrm_tot + dabs(noL_1e(l,k)) + enddo + enddo + print *, ' absolute accuracy on noL_1e (%) =', 100.d0 * err_tot / nrm_tot + + deallocate(noL_1e) + deallocate(noL_1e_gpu) + + ! --- + + print *, 'noL_0e CPU = ', noL_0e + print *, 'noL_0e GPU = ', noL_0e_gpu(1) + + err_tot = dabs(noL_0e - noL_0e_gpu(1)) + nrm_tot = dabs(noL_0e) + print *, ' absolute accuracy on noL_0e (%) =', 100.d0 * err_tot / nrm_tot + + + return + +end + +! --- + + diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f index 8d0cc4f3..ad20d861 100644 --- a/plugins/local/tc_int/deb_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/deb_tc_int_cuda.irp.f @@ -41,9 +41,13 @@ subroutine main() !call deb_no_2e_gpu_tmp() !call deb_no_2e_gpu() - call deb_no_1e_gpu_tmp() + !call deb_no_1e_gpu_tmp() !call deb_no_1e_gpu() + !call deb_no_0e_gpu() + + call deb_no_gpu() + return end diff --git a/plugins/local/tc_int/no_0e.irp.f b/plugins/local/tc_int/no_0e.irp.f index b945e0dd..830b91a8 100644 --- a/plugins/local/tc_int/no_0e.irp.f +++ b/plugins/local/tc_int/no_0e.irp.f @@ -3,12 +3,6 @@ subroutine provide_no_0e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, noL_0e) - BEGIN_DOC - ! - ! < Phi_left | L | Phi_right > - ! - END_DOC - implicit none integer, intent(in) :: n_grid, n_mo @@ -22,44 +16,47 @@ subroutine provide_no_0e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, integer :: i, j, k, ipoint double precision :: t0, t1 double precision, allocatable :: tmp(:) - double precision, allocatable :: tmp_L(:,:), tmp_R(:,:) - double precision, allocatable :: tmp_M(:,:), tmp_S(:), tmp_O(:), tmp_J(:,:) - double precision, allocatable :: tmp_M_priv(:,:), tmp_S_priv(:), tmp_O_priv(:), tmp_J_priv(:,:) + double precision, allocatable :: tmpL(:,:), tmpR(:,:) + double precision, allocatable :: tmpM(:,:), tmpS(:), tmpO(:), tmpJ(:,:) + double precision, allocatable :: tmpM_priv(:,:), tmpS_priv(:), tmpO_priv(:), tmpJ_priv(:,:) + + + call wall_time(t0) if(ne_a .eq. ne_b) then allocate(tmp(ne_b)) - allocate(tmp_L(n_grid,3), tmp_R(n_grid,3)) + allocate(tmpL(n_grid,3), tmpR(n_grid,3)) !$OMP PARALLEL & !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(j, i, ipoint, tmp_L, tmp_R) & - !$OMP SHARED(ne_b, n_grid, & + !$OMP PRIVATE(j, i, ipoint, tmpL, tmpR) & + !$OMP SHARED(ne_b, n_grid, & !$OMP mos_l_in_r, mos_r_in_r, wr1, & !$OMP int2_grad1_u12, tmp) !$OMP DO do j = 1, ne_b - tmp_L = 0.d0 - tmp_R = 0.d0 + tmpL = 0.d0 + tmpR = 0.d0 do i = 1, ne_b do ipoint = 1, n_grid - tmp_L(ipoint,1) = tmp_L(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,2) = tmp_L(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,3) = tmp_L(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,1) = tmpL(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,2) = tmpL(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,3) = tmpL(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) - tmp_R(ipoint,1) = tmp_R(ipoint,1) + int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,2) = tmp_R(ipoint,2) + int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,3) = tmp_R(ipoint,3) + int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,1) = tmpR(ipoint,1) + int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,2) = tmpR(ipoint,2) + int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,3) = tmpR(ipoint,3) + int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) enddo enddo tmp(j) = 0.d0 do ipoint = 1, n_grid - tmp(j) = tmp(j) + wr1(ipoint) * (tmp_L(ipoint,1)*tmp_R(ipoint,1) + tmp_L(ipoint,2)*tmp_R(ipoint,2) + tmp_L(ipoint,3)*tmp_R(ipoint,3)) + tmp(j) = tmp(j) + wr1(ipoint) * (tmpL(ipoint,1)*tmpR(ipoint,1) + tmpL(ipoint,2)*tmpR(ipoint,2) + tmpL(ipoint,3)*tmpR(ipoint,3)) enddo enddo ! j !$OMP END DO @@ -68,150 +65,149 @@ subroutine provide_no_0e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, noL_0e = -2.d0 * sum(tmp) deallocate(tmp) - deallocate(tmp_L, tmp_R) + deallocate(tmpL, tmpR) ! --- - allocate(tmp_O(n_grid), tmp_J(n_grid,3)) - tmp_O = 0.d0 - tmp_J = 0.d0 + allocate(tmpO(n_grid), tmpJ(n_grid,3)) + tmpO = 0.d0 + tmpJ = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(i, ipoint, tmp_O_priv, tmp_J_priv) & - !$OMP SHARED(ne_b, n_grid, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, tmp_O, tmp_J) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, ipoint, tmpO_priv, tmpJ_priv) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpO, tmpJ) - allocate(tmp_O_priv(n_grid), tmp_J_priv(n_grid,3)) - tmp_O_priv = 0.d0 - tmp_J_priv = 0.d0 + allocate(tmpO_priv(n_grid), tmpJ_priv(n_grid,3)) + tmpO_priv = 0.d0 + tmpJ_priv = 0.d0 !$OMP DO do i = 1, ne_b do ipoint = 1, n_grid - tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) - tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) - tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) - tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) + tmpO_priv(ipoint) = tmpO_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ_priv(ipoint,1) = tmpJ_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) + tmpJ_priv(ipoint,2) = tmpJ_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) + tmpJ_priv(ipoint,3) = tmpJ_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) enddo enddo !$OMP END DO NOWAIT !$OMP CRITICAL - tmp_O = tmp_O + tmp_O_priv - tmp_J = tmp_J + tmp_J_priv + tmpO = tmpO + tmpO_priv + tmpJ = tmpJ + tmpJ_priv !$OMP END CRITICAL - deallocate(tmp_O_priv, tmp_J_priv) + deallocate(tmpO_priv, tmpJ_priv) !$OMP END PARALLEL - allocate(tmp_M(n_grid,3), tmp_S(n_grid)) - tmp_M = 0.d0 - tmp_S = 0.d0 + allocate(tmpM(n_grid,3), tmpS(n_grid)) + tmpM = 0.d0 + tmpS = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(i, j, ipoint, tmp_M_priv, tmp_S_priv) & - !$OMP SHARED(ne_b, n_grid, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, tmp_M, tmp_S) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, j, ipoint, tmpM_priv, tmpS_priv) & + !$OMP SHARED(ne_b, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpM, tmpS) - allocate(tmp_M_priv(n_grid,3), tmp_S_priv(n_grid)) - tmp_M_priv = 0.d0 - tmp_S_priv = 0.d0 + allocate(tmpM_priv(n_grid,3), tmpS_priv(n_grid)) + tmpM_priv = 0.d0 + tmpS_priv = 0.d0 !$OMP DO COLLAPSE(2) do i = 1, ne_b do j = 1, ne_b do ipoint = 1, n_grid - tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & - + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & - + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + tmpS_priv(ipoint) = tmpS_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) enddo enddo enddo !$OMP END DO NOWAIT !$OMP CRITICAL - tmp_M = tmp_M + tmp_M_priv - tmp_S = tmp_S + tmp_S_priv + tmpM = tmpM + tmpM_priv + tmpS = tmpS + tmpS_priv !$OMP END CRITICAL - deallocate(tmp_M_priv, tmp_S_priv) + deallocate(tmpM_priv, tmpS_priv) !$OMP END PARALLEL allocate(tmp(n_grid)) do ipoint = 1, n_grid - tmp_S(ipoint) = 2.d0 * (tmp_J(ipoint,1)*tmp_J(ipoint,1) + tmp_J(ipoint,2)*tmp_J(ipoint,2) + tmp_J(ipoint,3)*tmp_J(ipoint,3)) - tmp_S(ipoint) + tmpS(ipoint) = 2.d0 * (tmpJ(ipoint,1)*tmpJ(ipoint,1) + tmpJ(ipoint,2)*tmpJ(ipoint,2) + tmpJ(ipoint,3)*tmpJ(ipoint,3)) - tmpS(ipoint) - tmp(ipoint) = wr1(ipoint) * ( tmp_O(ipoint) * tmp_S(ipoint) & - - 2.d0 * ( tmp_J(ipoint,1) * tmp_M(ipoint,1) & - + tmp_J(ipoint,2) * tmp_M(ipoint,2) & - + tmp_J(ipoint,3) * tmp_M(ipoint,3))) + tmp(ipoint) = wr1(ipoint) * ( tmpO(ipoint) * tmpS(ipoint) - 2.d0 * ( tmpJ(ipoint,1) * tmpM(ipoint,1) & + + tmpJ(ipoint,2) * tmpM(ipoint,2) & + + tmpJ(ipoint,3) * tmpM(ipoint,3) ) ) enddo - noL_0e = noL_0e -2.d0 * (sum(tmp)) + noL_0e = noL_0e - 2.d0 * (sum(tmp)) deallocate(tmp) else allocate(tmp(ne_a)) - allocate(tmp_L(n_grid,3), tmp_R(n_grid,3)) + allocate(tmpL(n_grid,3), tmpR(n_grid,3)) - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(j, i, ipoint, tmp_L, tmp_R) & - !$OMP SHARED(ne_b, ne_a, n_grid, & - !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(j, i, ipoint, tmpL, tmpR) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & !$OMP int2_grad1_u12, tmp, wr1) !$OMP DO do j = 1, ne_b - tmp_L = 0.d0 - tmp_R = 0.d0 + tmpL = 0.d0 + tmpR = 0.d0 do i = ne_b+1, ne_a do ipoint = 1, n_grid - tmp_L(ipoint,1) = tmp_L(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,2) = tmp_L(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,3) = tmp_L(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,1) = tmpL(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,2) = tmpL(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,3) = tmpL(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) - tmp_R(ipoint,1) = tmp_R(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,2) = tmp_R(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,3) = tmp_R(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,1) = tmpR(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,2) = tmpR(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,3) = tmpR(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) enddo enddo tmp(j) = 0.d0 do ipoint = 1, n_grid - tmp(j) = tmp(j) + wr1(ipoint) * (tmp_L(ipoint,1)*tmp_R(ipoint,1) + tmp_L(ipoint,2)*tmp_R(ipoint,2) + tmp_L(ipoint,3)*tmp_R(ipoint,3)) + tmp(j) = tmp(j) + wr1(ipoint) * (tmpL(ipoint,1)*tmpR(ipoint,1) + tmpL(ipoint,2)*tmpR(ipoint,2) + tmpL(ipoint,3)*tmpR(ipoint,3)) enddo do i = 1, ne_b do ipoint = 1, n_grid - tmp_L(ipoint,1) = tmp_L(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,2) = tmp_L(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,3) = tmp_L(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,1) = tmpL(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,2) = tmpL(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,3) = tmpL(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) - tmp_R(ipoint,1) = tmp_R(ipoint,1) + int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,2) = tmp_R(ipoint,2) + int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,3) = tmp_R(ipoint,3) + int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,1) = tmpR(ipoint,1) + int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,2) = tmpR(ipoint,2) + int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,3) = tmpR(ipoint,3) + int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) enddo enddo do ipoint = 1, n_grid - tmp(j) = tmp(j) + wr1(ipoint) * (tmp_L(ipoint,1)*tmp_R(ipoint,1) + tmp_L(ipoint,2)*tmp_R(ipoint,2) + tmp_L(ipoint,3)*tmp_R(ipoint,3)) + tmp(j) = tmp(j) + wr1(ipoint) * (tmpL(ipoint,1)*tmpR(ipoint,1) + tmpL(ipoint,2)*tmpR(ipoint,2) + tmpL(ipoint,3)*tmpR(ipoint,3)) enddo enddo ! j !$OMP END DO @@ -219,33 +215,33 @@ subroutine provide_no_0e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, ! --- - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(j, i, ipoint, tmp_L, tmp_R) & - !$OMP SHARED(ne_b, ne_a, n_grid, & - !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(j, i, ipoint, tmpL, tmpR) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & !$OMP int2_grad1_u12, tmp, wr1) !$OMP DO do j = ne_b+1, ne_a - tmp_L = 0.d0 - tmp_R = 0.d0 + tmpL = 0.d0 + tmpR = 0.d0 do i = 1, ne_a do ipoint = 1, n_grid - tmp_L(ipoint,1) = tmp_L(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,2) = tmp_L(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) - tmp_L(ipoint,3) = tmp_L(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,1) = tmpL(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,2) = tmpL(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) + tmpL(ipoint,3) = tmpL(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) - tmp_R(ipoint,1) = tmp_R(ipoint,1) + int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,2) = tmp_R(ipoint,2) + int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) - tmp_R(ipoint,3) = tmp_R(ipoint,3) + int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,1) = tmpR(ipoint,1) + int2_grad1_u12(ipoint,1,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,2) = tmpR(ipoint,2) + int2_grad1_u12(ipoint,2,i,j) * mos_r_in_r(ipoint,i) + tmpR(ipoint,3) = tmpR(ipoint,3) + int2_grad1_u12(ipoint,3,i,j) * mos_r_in_r(ipoint,i) enddo enddo tmp(j) = 0.d0 do ipoint = 1, n_grid - tmp(j) = tmp(j) + 0.5d0 * wr1(ipoint) * (tmp_L(ipoint,1)*tmp_R(ipoint,1) + tmp_L(ipoint,2)*tmp_R(ipoint,2) + tmp_L(ipoint,3)*tmp_R(ipoint,3)) + tmp(j) = tmp(j) + 0.5d0 * wr1(ipoint) * (tmpL(ipoint,1)*tmpR(ipoint,1) + tmpL(ipoint,2)*tmpR(ipoint,2) + tmpL(ipoint,3)*tmpR(ipoint,3)) enddo enddo ! j !$OMP END DO @@ -254,32 +250,32 @@ subroutine provide_no_0e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, noL_0e = -2.d0 * sum(tmp) deallocate(tmp) - deallocate(tmp_L, tmp_R) + deallocate(tmpL, tmpR) ! --- - allocate(tmp_O(n_grid), tmp_J(n_grid,3)) - tmp_O = 0.d0 - tmp_J = 0.d0 + allocate(tmpO(n_grid), tmpJ(n_grid,3)) + tmpO = 0.d0 + tmpJ = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(i, ipoint, tmp_O_priv, tmp_J_priv) & - !$OMP SHARED(ne_b, ne_a, n_grid, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, tmp_O, tmp_J) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, ipoint, tmpO_priv, tmpJ_priv) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpO, tmpJ) - allocate(tmp_O_priv(n_grid), tmp_J_priv(n_grid,3)) - tmp_O_priv = 0.d0 - tmp_J_priv = 0.d0 + allocate(tmpO_priv(n_grid), tmpJ_priv(n_grid,3)) + tmpO_priv = 0.d0 + tmpJ_priv = 0.d0 !$OMP DO do i = 1, ne_b do ipoint = 1, n_grid - tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) - tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) - tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) - tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) + tmpO_priv(ipoint) = tmpO_priv(ipoint) + mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ_priv(ipoint,1) = tmpJ_priv(ipoint,1) + int2_grad1_u12(ipoint,1,i,i) + tmpJ_priv(ipoint,2) = tmpJ_priv(ipoint,2) + int2_grad1_u12(ipoint,2,i,i) + tmpJ_priv(ipoint,3) = tmpJ_priv(ipoint,3) + int2_grad1_u12(ipoint,3,i,i) enddo enddo !$OMP END DO NOWAIT @@ -287,49 +283,49 @@ subroutine provide_no_0e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, !$OMP DO do i = ne_b+1, ne_a do ipoint = 1, n_grid - tmp_O_priv(ipoint) = tmp_O_priv(ipoint) + 0.5d0 * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) - tmp_J_priv(ipoint,1) = tmp_J_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,i) - tmp_J_priv(ipoint,2) = tmp_J_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,i) - tmp_J_priv(ipoint,3) = tmp_J_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,i) + tmpO_priv(ipoint) = tmpO_priv(ipoint) + 0.5d0 * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,i) + tmpJ_priv(ipoint,1) = tmpJ_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,i) + tmpJ_priv(ipoint,2) = tmpJ_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,i) + tmpJ_priv(ipoint,3) = tmpJ_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,i) enddo enddo !$OMP END DO NOWAIT !$OMP CRITICAL - tmp_O = tmp_O + tmp_O_priv - tmp_J = tmp_J + tmp_J_priv + tmpO = tmpO + tmpO_priv + tmpJ = tmpJ + tmpJ_priv !$OMP END CRITICAL - deallocate(tmp_O_priv, tmp_J_priv) + deallocate(tmpO_priv, tmpJ_priv) !$OMP END PARALLEL ! --- - allocate(tmp_M(n_grid,3), tmp_S(n_grid)) - tmp_M = 0.d0 - tmp_S = 0.d0 + allocate(tmpM(n_grid,3), tmpS(n_grid)) + tmpM = 0.d0 + tmpS = 0.d0 - !$OMP PARALLEL & - !$OMP DEFAULT(NONE) & - !$OMP PRIVATE(i, j, ipoint, tmp_M_priv, tmp_S_priv) & - !$OMP SHARED(ne_b, ne_a, n_grid, & - !$OMP mos_l_in_r, mos_r_in_r, & - !$OMP int2_grad1_u12, tmp_M, tmp_S) + !$OMP PARALLEL & + !$OMP DEFAULT(NONE) & + !$OMP PRIVATE(i, j, ipoint, tmpM_priv, tmpS_priv) & + !$OMP SHARED(ne_b, ne_a, n_grid, & + !$OMP mos_l_in_r, mos_r_in_r, & + !$OMP int2_grad1_u12, tmpM, tmpS) - allocate(tmp_M_priv(n_grid,3), tmp_S_priv(n_grid)) - tmp_M_priv = 0.d0 - tmp_S_priv = 0.d0 + allocate(tmpM_priv(n_grid,3), tmpS_priv(n_grid)) + tmpM_priv = 0.d0 + tmpS_priv = 0.d0 !$OMP DO COLLAPSE(2) do i = 1, ne_b do j = 1, ne_b do ipoint = 1, n_grid - tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + tmpS_priv(ipoint) = tmpS_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) enddo @@ -342,17 +338,17 @@ subroutine provide_no_0e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, do j = 1, ne_b do ipoint = 1, n_grid - tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) - tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) - tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * mos_l_in_r(ipoint,j) * mos_r_in_r(ipoint,i) - tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & - + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & - + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + tmpS_priv(ipoint) = tmpS_priv(ipoint) + int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) enddo enddo enddo @@ -363,39 +359,38 @@ subroutine provide_no_0e(n_grid, n_mo, ne_a, ne_b, wr1, mos_l_in_r, mos_r_in_r, do j = ne_b+1, ne_a do ipoint = 1, n_grid - tmp_M_priv(ipoint,1) = tmp_M_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,2) = tmp_M_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_M_priv(ipoint,3) = tmp_M_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,1) = tmpM_priv(ipoint,1) + 0.5d0 * int2_grad1_u12(ipoint,1,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,2) = tmpM_priv(ipoint,2) + 0.5d0 * int2_grad1_u12(ipoint,2,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) + tmpM_priv(ipoint,3) = tmpM_priv(ipoint,3) + 0.5d0 * int2_grad1_u12(ipoint,3,j,i) * mos_l_in_r(ipoint,i) * mos_r_in_r(ipoint,j) - tmp_S_priv(ipoint) = tmp_S_priv(ipoint) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & - + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & - + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) + tmpS_priv(ipoint) = tmpS_priv(ipoint) + 0.5d0 * int2_grad1_u12(ipoint,1,i,j) * int2_grad1_u12(ipoint,1,j,i) & + + 0.5d0 * int2_grad1_u12(ipoint,2,i,j) * int2_grad1_u12(ipoint,2,j,i) & + + 0.5d0 * int2_grad1_u12(ipoint,3,i,j) * int2_grad1_u12(ipoint,3,j,i) enddo enddo enddo !$OMP END DO NOWAIT !$OMP CRITICAL - tmp_M = tmp_M + tmp_M_priv - tmp_S = tmp_S + tmp_S_priv + tmpM = tmpM + tmpM_priv + tmpS = tmpS + tmpS_priv !$OMP END CRITICAL - deallocate(tmp_M_priv, tmp_S_priv) + deallocate(tmpM_priv, tmpS_priv) !$OMP END PARALLEL allocate(tmp(n_grid)) do ipoint = 1, n_grid - tmp_S(ipoint) = 2.d0 * (tmp_J(ipoint,1)*tmp_J(ipoint,1) + tmp_J(ipoint,2)*tmp_J(ipoint,2) + tmp_J(ipoint,3)*tmp_J(ipoint,3)) - tmp_S(ipoint) + tmpS(ipoint) = 2.d0 * (tmpJ(ipoint,1)*tmpJ(ipoint,1) + tmpJ(ipoint,2)*tmpJ(ipoint,2) + tmpJ(ipoint,3)*tmpJ(ipoint,3)) - tmpS(ipoint) - tmp(ipoint) = wr1(ipoint) * ( tmp_O(ipoint) * tmp_S(ipoint) & - - 2.d0 * ( tmp_J(ipoint,1) * tmp_M(ipoint,1) & - + tmp_J(ipoint,2) * tmp_M(ipoint,2) & - + tmp_J(ipoint,3) * tmp_M(ipoint,3))) + tmp(ipoint) = wr1(ipoint) * ( tmpO(ipoint) * tmpS(ipoint) - 2.d0 * ( tmpJ(ipoint,1) * tmpM(ipoint,1) & + + tmpJ(ipoint,2) * tmpM(ipoint,2) & + + tmpJ(ipoint,3) * tmpM(ipoint,3) ) ) enddo - noL_0e = noL_0e -2.d0 * (sum(tmp)) + noL_0e = noL_0e - 2.d0 * (sum(tmp)) deallocate(tmp) diff --git a/plugins/local/tc_int/write_tc_int_cuda.irp.f b/plugins/local/tc_int/write_tc_int_cuda.irp.f index 2a3dc4d1..756630b8 100644 --- a/plugins/local/tc_int/write_tc_int_cuda.irp.f +++ b/plugins/local/tc_int/write_tc_int_cuda.irp.f @@ -120,12 +120,12 @@ subroutine do_work_on_gpu() call wall_time(cuda_time0) print*, ' start CUDA kernel' - call cutc_int_c(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & - n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & - final_grid_points, final_weight_at_r_vector, & - final_grid_points_extra, final_weight_at_r_vector_extra, & - rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & - int2_grad1_u12_ao, int_2e_ao) + call cutc_int(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & + n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & + final_grid_points, final_weight_at_r_vector, & + final_grid_points_extra, final_weight_at_r_vector_extra, & + rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & + int2_grad1_u12_ao, int_2e_ao) call wall_time(cuda_time1) print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0 From 2e6df70e03303cc59e7b6d5e423105c2183e9068 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Tue, 13 Aug 2024 12:19:30 +0200 Subject: [PATCH 18/19] cleaning in cpu vs gpu comparison --- .../local/tc_int/compute_int_2e_ao_cpu.irp.f | 233 ++++++++ .../local/tc_int/compute_int_2e_ao_gpu.irp.f | 117 ++++ plugins/local/tc_int/compute_no_cpu.irp.f | 143 +++++ ...b_no_0e_gpu.irp.f => compute_no_gpu.irp.f} | 82 ++- plugins/local/tc_int/cutc_module.F90 | 149 ------ plugins/local/tc_int/deb_int_2e_ao_gpu.irp.f | 49 +- plugins/local/tc_int/deb_no_1e_gpu.irp.f | 499 ------------------ plugins/local/tc_int/deb_no_2e_gpu.irp.f | 417 --------------- plugins/local/tc_int/deb_no_gpu.irp.f | 50 +- plugins/local/tc_int/deb_tc_int_cuda.irp.f | 55 -- 10 files changed, 640 insertions(+), 1154 deletions(-) create mode 100644 plugins/local/tc_int/compute_int_2e_ao_cpu.irp.f create mode 100644 plugins/local/tc_int/compute_int_2e_ao_gpu.irp.f create mode 100644 plugins/local/tc_int/compute_no_cpu.irp.f rename plugins/local/tc_int/{deb_no_0e_gpu.irp.f => compute_no_gpu.irp.f} (52%) delete mode 100644 plugins/local/tc_int/deb_no_1e_gpu.irp.f delete mode 100644 plugins/local/tc_int/deb_no_2e_gpu.irp.f delete mode 100644 plugins/local/tc_int/deb_tc_int_cuda.irp.f diff --git a/plugins/local/tc_int/compute_int_2e_ao_cpu.irp.f b/plugins/local/tc_int/compute_int_2e_ao_cpu.irp.f new file mode 100644 index 00000000..9ff5431d --- /dev/null +++ b/plugins/local/tc_int/compute_int_2e_ao_cpu.irp.f @@ -0,0 +1,233 @@ + +! --- + +program compute_int_2e_ao_cpu + + implicit none + + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + + call write_int(6, my_n_pt_r_grid, 'radial external grid over') + call write_int(6, my_n_pt_a_grid, 'angular external grid over') + + call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over') + call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over') + + call main() + +end + +! --- + +subroutine main() + + use cutc_module + + implicit none + + integer :: m + integer :: i, j, k, l + integer :: ipoint, jpoint + + double precision :: weight1, ao_i_r, ao_k_r + + double precision :: time0, time1 + double precision :: wall_time0, wall_time1 + double precision :: wall_ttime0, wall_ttime1 + double precision :: tt1, tt2 + + double precision, allocatable :: rn(:,:), aos_data1(:,:,:), aos_data2(:,:,:) + double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:), c_mat(:,:,:) + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: int_2e_ao(:,:,:,:) + + + call wall_time(time0) + print*, ' start compute_int_2e_ao_cpu' + + + ! --- + + allocate(rn(3,nucl_num)) + allocate(aos_data1(n_points_final_grid,ao_num,4)) + allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) + + do k = 1, nucl_num + rn(1,k) = nucl_coord(k,1) + rn(2,k) = nucl_coord(k,2) + rn(3,k) = nucl_coord(k,3) + enddo + + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint) + aos_data1(ipoint,k,2) = aos_grad_in_r_array(k,ipoint,1) + aos_data1(ipoint,k,3) = aos_grad_in_r_array(k,ipoint,2) + aos_data1(ipoint,k,4) = aos_grad_in_r_array(k,ipoint,3) + enddo + enddo + + do k = 1, ao_num + do ipoint = 1, n_points_extra_final_grid + aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) + aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) + aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) + aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) + enddo + enddo + + + allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num)) + allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4)) + allocate(c_mat(n_points_final_grid,ao_num,ao_num)) + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) + allocate(int_2e_ao(ao_num,ao_num,ao_num,ao_num)) + + call wall_time(wall_time0) + + call wall_time(wall_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (j, i, jpoint) & + !$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp) + !$OMP DO SCHEDULE (static) + do j = 1, ao_num + do i = 1, ao_num + do jpoint = 1, n_points_extra_final_grid + int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (wall_ttime1 - wall_ttime0) + + + call wall_time(wall_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12) + !$OMP DO + do ipoint = 1, n_points_final_grid + call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) & + , grad1_u12(1,ipoint,2) & + , grad1_u12(1,ipoint,3) & + , grad1_u12(1,ipoint,4) ) + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (wall_ttime1 - wall_ttime0) + + + call wall_time(wall_ttime0) + do m = 1, 4 + call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 & + , int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid & + , 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num) + enddo + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for DGEMM of integ over r2 (sec) = ', (wall_ttime1 - wall_ttime0) + + + call wall_time(wall_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint) & + !$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + c_mat(ipoint,k,i) = final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,k) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time of Hermitian part (sec) = ', (wall_ttime1 - wall_ttime0) + + + call wall_time(wall_ttime0) + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , int2_grad1_u12_ao(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 0.d0, int_2e_ao(1,1,1,1), ao_num*ao_num) + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time for DGEMM of Hermitian part (sec) = ', (wall_ttime1 - wall_ttime0) + + + tt1 = 0.d0 + tt2 = 0.d0 + do m = 1, 3 + + call wall_time(wall_ttime0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) & + !$OMP SHARED (aos_in_r_array_transp, aos_grad_in_r_array_transp_bis, c_mat, & + !$OMP ao_num, n_points_final_grid, final_weight_at_r_vector, m) + !$OMP DO SCHEDULE (static) + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + + weight1 = final_weight_at_r_vector(ipoint) + ao_i_r = aos_in_r_array_transp(ipoint,i) + ao_k_r = aos_in_r_array_transp(ipoint,k) + + c_mat(ipoint,k,i) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,m) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,m)) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(wall_ttime1) + tt1 += wall_ttime1 - wall_ttime0 + + call wall_time(wall_ttime0) + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -0.5d0 & + , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & + , 1.d0, int_2e_ao(1,1,1,1), ao_num*ao_num) + call wall_time(wall_ttime1) + tt2 += wall_ttime1 - wall_ttime0 + enddo + write(*,"(A,2X,F15.7)") ' wall time of non-Hermitian part (sec) = ', tt1 + write(*,"(A,2X,F15.7)") ' wall time for DGEMM of non Hermitian part (sec) = ', tt2 + + + call wall_time(wall_ttime0) + call sum_A_At(int_2e_ao(1,1,1,1), ao_num*ao_num) + call wall_time(wall_ttime1) + write(*,"(A,2X,F15.7)") ' wall time of A + A.T (sec) = ', wall_ttime1 - wall_ttime0 + + + call wall_time(wall_time1) + write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (wall_time1 - wall_time0) + + + deallocate(int_fct_long_range, grad1_u12, c_mat) + deallocate(int_2e_ao, int2_grad1_u12_ao) + deallocate(rn, aos_data1, aos_data2) + + call wall_time(time1) + write(*,"(A,2X,F15.7)") ' wall time for compute_int_2e_ao_cpu (sec) = ', (time1 - time0) + + return +end diff --git a/plugins/local/tc_int/compute_int_2e_ao_gpu.irp.f b/plugins/local/tc_int/compute_int_2e_ao_gpu.irp.f new file mode 100644 index 00000000..4ffdc1cb --- /dev/null +++ b/plugins/local/tc_int/compute_int_2e_ao_gpu.irp.f @@ -0,0 +1,117 @@ + +! --- + +program compute_int_2e_ao_gpu + + implicit none + + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + + call write_int(6, my_n_pt_r_grid, 'radial external grid over') + call write_int(6, my_n_pt_a_grid, 'angular external grid over') + + call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over') + call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over') + + call main() + +end + +! --- + +subroutine main() + + use cutc_module + + implicit none + + integer :: i, j, k, l + integer :: ipoint + + double precision :: time0, time1 + + double precision, allocatable :: rn(:,:), aos_data1(:,:,:), aos_data2(:,:,:) + double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:) + double precision, allocatable :: int_2e_ao_gpu(:,:,:,:) + + + call wall_time(time0) + print*, ' start compute_int_2e_ao_gpu' + + + ! --- + + allocate(rn(3,nucl_num)) + allocate(aos_data1(n_points_final_grid,ao_num,4)) + allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) + + do k = 1, nucl_num + rn(1,k) = nucl_coord(k,1) + rn(2,k) = nucl_coord(k,2) + rn(3,k) = nucl_coord(k,3) + enddo + + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint) + aos_data1(ipoint,k,2) = aos_grad_in_r_array(k,ipoint,1) + aos_data1(ipoint,k,3) = aos_grad_in_r_array(k,ipoint,2) + aos_data1(ipoint,k,4) = aos_grad_in_r_array(k,ipoint,3) + enddo + enddo + + do k = 1, ao_num + do ipoint = 1, n_points_extra_final_grid + aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint) + aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1) + aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2) + aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3) + enddo + enddo + + ! --- + + integer :: nB + integer :: sB + + PROVIDE nxBlocks nyBlocks nzBlocks + PROVIDE blockxSize blockySize blockzSize + + sB = 32 + nB = (n_points_final_grid + sB - 1) / sB + + call ezfio_set_tc_int_blockxSize(sB) + call ezfio_set_tc_int_nxBlocks(nB) + + allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,3)) + allocate(int_2e_ao_gpu(ao_num,ao_num,ao_num,ao_num)) + + call cutc_int(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & + n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & + final_grid_points, final_weight_at_r_vector, & + final_grid_points_extra, final_weight_at_r_vector_extra, & + rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & + int2_grad1_u12_ao_gpu, int_2e_ao_gpu) + + deallocate(int_2e_ao_gpu, int2_grad1_u12_ao_gpu) + deallocate(rn, aos_data1, aos_data2) + + call wall_time(time1) + write(*,"(A,2X,F15.7)") ' wall time for compute_int_2e_ao_gpu (sec) = ', (time1 - time0) + + return +end diff --git a/plugins/local/tc_int/compute_no_cpu.irp.f b/plugins/local/tc_int/compute_no_cpu.irp.f new file mode 100644 index 00000000..54ba37ce --- /dev/null +++ b/plugins/local/tc_int/compute_no_cpu.irp.f @@ -0,0 +1,143 @@ + +! --- + +program compute_no_cpu + + implicit none + + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + + call write_int(6, my_n_pt_r_grid, 'radial external grid over') + call write_int(6, my_n_pt_a_grid, 'angular external grid over') + + call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over') + call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over') + + call main() + +end + +! --- + +subroutine main() + + use cutc_module + + implicit none + + integer :: i, j, k, l, ipoint + double precision :: time0, time1 + double precision :: tt0, tt1 + double precision :: acc_thr, err_tot, nrm_tot, err_loc + double precision :: noL_0e + double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) + double precision, allocatable :: tmp(:,:,:,:) + double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) + double precision, allocatable :: noL_1e (:,:) + double precision, allocatable :: noL_2e (:,:,:,:) + + PROVIDE mo_l_coef mo_r_coef + PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp + + + call wall_time(time0) + print*, ' start compute_no_cpu' + + + + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) + print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + call wall_time(tt0) + open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") + read(11) int2_grad1_u12_ao + close(11) + call wall_time(tt1) + write(*,"(A,2X,F15.7)") ' wall time for reading (sec) = ', (tt1 - tt0) + + allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) + allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) + + call wall_time(tt0) + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) + call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) + enddo + !$OMP END DO + !$OMP END PARALLEL + + deallocate(int2_grad1_u12_ao) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint) & + !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) + !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + do i = 1, mo_num + do j = 1, mo_num + int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) + int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) + int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + call wall_time(tt1) + write(*,"(A,2X,F15.7)") ' wall time for 3e-tensor (sec) = ', (tt1 - tt0) + + deallocate(tmp) + + allocate(noL_2e(mo_num,mo_num,mo_num,mo_num)) + allocate(noL_1e(mo_num,mo_num)) + + call provide_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_2e(1,1,1,1)) + + call provide_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_1e(1,1)) + + call provide_no_0e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_0e) + + deallocate(int2_grad1_u12_bimo_t) + deallocate(noL_2e) + deallocate(noL_1e) + + + call wall_time(time1) + write(*,"(A,2X,F15.7)") ' wall time for compute_no_cpu (sec) = ', (time1 - time0) + + return + +end + +! --- + + diff --git a/plugins/local/tc_int/deb_no_0e_gpu.irp.f b/plugins/local/tc_int/compute_no_gpu.irp.f similarity index 52% rename from plugins/local/tc_int/deb_no_0e_gpu.irp.f rename to plugins/local/tc_int/compute_no_gpu.irp.f index afff060a..9e4d90cc 100644 --- a/plugins/local/tc_int/deb_no_0e_gpu.irp.f +++ b/plugins/local/tc_int/compute_no_gpu.irp.f @@ -1,34 +1,77 @@ ! --- -subroutine deb_no_0e_gpu() +program compute_no_gpu + + implicit none + + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + + call write_int(6, my_n_pt_r_grid, 'radial external grid over') + call write_int(6, my_n_pt_a_grid, 'angular external grid over') + + call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over') + call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over') + + call main() + +end + +! --- + +subroutine main() use cutc_module implicit none integer :: i, j, k, l, ipoint + double precision :: time0, time1 + double precision :: tt0, tt1 double precision :: acc_thr, err_tot, nrm_tot, err_loc - double precision :: noL_0e double precision :: noL_0e_gpu(1) double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) double precision, allocatable :: tmp(:,:,:,:) double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) - + double precision, allocatable :: noL_1e_gpu(:,:) + double precision, allocatable :: noL_2e_gpu(:,:,:,:) PROVIDE mo_l_coef mo_r_coef PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp + call wall_time(time0) + print*, ' start compute_no_gpu' + + + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + call wall_time(tt0) open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") read(11) int2_grad1_u12_ao close(11) + call wall_time(tt1) + write(*,"(A,2X,F15.7)") ' wall time for reading (sec) = ', (tt1 - tt0) allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) + call wall_time(tt0) !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (ipoint) & @@ -60,37 +103,30 @@ subroutine deb_no_0e_gpu() enddo !$OMP END DO !$OMP END PARALLEL + call wall_time(tt1) + write(*,"(A,2X,F15.7)") ' wall time for 3e-tensor (sec) = ', (tt1 - tt0) deallocate(tmp) - ! --- + allocate(noL_2e_gpu(mo_num,mo_num,mo_num,mo_num)) + allocate(noL_1e_gpu(mo_num,mo_num)) - call cutc_no_0e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & - final_weight_at_r_vector(1), & - mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & - int2_grad1_u12_bimo_t(1,1,1,1), noL_0e_gpu(1)) - - ! --- - - call provide_no_0e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & - final_weight_at_r_vector(1), & - mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & - int2_grad1_u12_bimo_t(1,1,1,1), noL_0e) - - ! --- + call cutc_no(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & + final_weight_at_r_vector(1), & + mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & + int2_grad1_u12_bimo_t(1,1,1,1), noL_2e_gpu(1,1,1,1), noL_1e_gpu(1,1), noL_0e_gpu(1)) deallocate(int2_grad1_u12_bimo_t) + deallocate(noL_2e_gpu) + deallocate(noL_1e_gpu) - print *, 'noL_0e CPU = ', noL_0e - print *, 'noL_0e GPU = ', noL_0e_gpu(1) + call wall_time(time1) + write(*,"(A,2X,F15.7)") ' wall time for compute_no_gpu (sec) = ', (time1 - time0) - err_tot = dabs(noL_0e - noL_0e_gpu(1)) - nrm_tot = dabs(noL_0e) - print *, ' absolute accuracy on noL_0e (%) =', 100.d0 * err_tot / nrm_tot - return end ! --- + diff --git a/plugins/local/tc_int/cutc_module.F90 b/plugins/local/tc_int/cutc_module.F90 index eaf271e5..0f5f6c78 100644 --- a/plugins/local/tc_int/cutc_module.F90 +++ b/plugins/local/tc_int/cutc_module.F90 @@ -41,155 +41,6 @@ module cutc_module ! --- - subroutine deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, & - blockxSize, blockySize, blockzSize, & - n_grid1, n_grid2, n_ao, n_nuc, size_bh, & - r1, wr1, r2, wr2, rn, & - aos_data1, aos_data2, & - c_bh, m_bh, n_bh, o_bh, & - int2_grad1_u12_ao, int_2e_ao) bind(C, name = "deb_int_2e_ao") - - import c_int, c_double, c_ptr - - integer(c_int), intent(in), value :: nxBlocks, blockxSize - integer(c_int), intent(in), value :: nyBlocks, blockySize - integer(c_int), intent(in), value :: nzBlocks, blockzSize - integer(c_int), intent(in), value :: n_grid1, n_grid2 - integer(c_int), intent(in), value :: n_ao - integer(c_int), intent(in), value :: n_nuc - integer(c_int), intent(in), value :: size_bh - real(c_double), intent(in) :: r1(3,n_grid1), wr1(n_grid1) - real(c_double), intent(in) :: r2(3,n_grid2), wr2(n_grid2) - real(c_double), intent(in) :: rn(3,n_nuc) - real(c_double), intent(in) :: aos_data1(n_grid1,n_ao,4) - real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4) - real(c_double), intent(in) :: c_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) - integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) - real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,3) - real(c_double), intent(out) :: int_2e_ao(n_ao,n_ao,n_ao,n_ao) - - end subroutine deb_int_2e_ao - - ! --- - - subroutine cutc_no_2e(n_grid1, n_mo, ne_a, ne_b, & - wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & - no_2e) bind(C, name = "cutc_no_2e") - - import c_int, c_double, c_ptr - - integer(c_int), intent(in), value :: n_grid1 - integer(c_int), intent(in), value :: n_mo - integer(c_int), intent(in), value :: ne_a - integer(c_int), intent(in), value :: ne_b - real(c_double), intent(in) :: wr1(n_grid1) - real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) - real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) - real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) - real(c_double), intent(out) :: no_2e(n_mo,n_mo,n_mo,n_mo) - - end subroutine cutc_no_2e - - ! --- - - subroutine deb_no_2e(n_grid1, n_mo, ne_a, ne_b, & - wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & - tmpO, tmpJ, tmpA, tmpB, tmpC, tmpD, tmpE, & - no_2e) bind(C, name = "deb_no_2e") - - import c_int, c_double, c_ptr - - integer(c_int), intent(in), value :: n_grid1 - integer(c_int), intent(in), value :: n_mo - integer(c_int), intent(in), value :: ne_a - integer(c_int), intent(in), value :: ne_b - real(c_double), intent(in) :: wr1(n_grid1) - real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) - real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) - real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) - real(c_double), intent(out) :: tmpO(n_grid1), tmpJ(n_grid1,3) - real(c_double), intent(out) :: tmpA(n_grid1,3,n_mo), tmpB(n_grid1,3,n_mo) - real(c_double), intent(out) :: tmpC(n_grid1,4,n_mo,n_mo), tmpD(n_grid1,4,n_mo,n_mo) - real(c_double), intent(out) :: tmpE(n_mo,n_mo,n_mo,n_mo) - real(c_double), intent(out) :: no_2e(n_mo,n_mo,n_mo,n_mo) - - end subroutine deb_no_2e - - ! --- - - subroutine cutc_no_1e(n_grid1, n_mo, ne_a, ne_b, & - wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & - no_1e) bind(C, name = "cutc_no_1e") - - import c_int, c_double, c_ptr - - integer(c_int), intent(in), value :: n_grid1 - integer(c_int), intent(in), value :: n_mo - integer(c_int), intent(in), value :: ne_a - integer(c_int), intent(in), value :: ne_b - real(c_double), intent(in) :: wr1(n_grid1) - real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) - real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) - real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) - real(c_double), intent(out) :: no_1e(n_mo,n_mo) - - end subroutine cutc_no_1e - - ! --- - - subroutine deb_no_1e(n_grid1, n_mo, ne_a, ne_b, & - wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & - tmpO, tmpJ, tmpM, tmpS, tmpC, tmpD, tmpL, tmpR, tmpE, tmpF, & - no_1e) bind(C, name = "deb_no_1e") - - import c_int, c_double, c_ptr - - integer(c_int), intent(in), value :: n_grid1 - integer(c_int), intent(in), value :: n_mo - integer(c_int), intent(in), value :: ne_a - integer(c_int), intent(in), value :: ne_b - real(c_double), intent(in) :: wr1(n_grid1) - real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) - real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) - real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) - real(c_double), intent(out) :: tmpO(n_grid1) - real(c_double), intent(out) :: tmpJ(n_grid1,3) - real(c_double), intent(out) :: tmpM(n_grid1,3) - real(c_double), intent(out) :: tmpS(n_grid1) - real(c_double), intent(out) :: tmpC(n_grid1,4,n_mo,n_mo) - real(c_double), intent(out) :: tmpD(n_grid1,4) - real(c_double), intent(out) :: tmpL(n_grid1,3,n_mo) - real(c_double), intent(out) :: tmpR(n_grid1,3,n_mo) - real(c_double), intent(out) :: tmpE(n_grid1,5,n_mo) - real(c_double), intent(out) :: tmpF(n_grid1,5,n_mo) - real(c_double), intent(out) :: no_1e(n_mo,n_mo) - - end subroutine deb_no_1e - - ! --- - - subroutine cutc_no_0e(n_grid1, n_mo, ne_a, ne_b, & - wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & - no_0e) bind(C, name = "cutc_no_0e") - - import c_int, c_double, c_ptr - - integer(c_int), intent(in), value :: n_grid1 - integer(c_int), intent(in), value :: n_mo - integer(c_int), intent(in), value :: ne_a - integer(c_int), intent(in), value :: ne_b - real(c_double), intent(in) :: wr1(n_grid1) - real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo) - real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo) - real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo) - real(c_double), intent(out) :: no_0e(1) - - end subroutine cutc_no_0e - - ! --- - subroutine cutc_no(n_grid1, n_mo, ne_a, ne_b, & wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & no_2e, no_1e, no_0e) bind(C, name = "cutc_no") diff --git a/plugins/local/tc_int/deb_int_2e_ao_gpu.irp.f b/plugins/local/tc_int/deb_int_2e_ao_gpu.irp.f index 3290c149..4bb44af9 100644 --- a/plugins/local/tc_int/deb_int_2e_ao_gpu.irp.f +++ b/plugins/local/tc_int/deb_int_2e_ao_gpu.irp.f @@ -1,7 +1,39 @@ ! --- -subroutine deb_int_2e_ao_gpu() +program deb_int_2e_ao_gpu + + implicit none + + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + + call write_int(6, my_n_pt_r_grid, 'radial external grid over') + call write_int(6, my_n_pt_a_grid, 'angular external grid over') + + call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over') + call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over') + + call main() + +end + +! --- + +subroutine main() use cutc_module @@ -28,7 +60,6 @@ subroutine deb_int_2e_ao_gpu() double precision, allocatable :: int_2e_ao_gpu(:,:,:,:) - call wall_time(time0) print*, ' start deb_int_2e_ao_gpu' @@ -80,12 +111,12 @@ subroutine deb_int_2e_ao_gpu() allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,3)) allocate(int_2e_ao_gpu(ao_num,ao_num,ao_num,ao_num)) - call deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & - n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & - final_grid_points, final_weight_at_r_vector, & - final_grid_points_extra, final_weight_at_r_vector_extra, & - rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & - int2_grad1_u12_ao_gpu, int_2e_ao_gpu) + call cutc_int(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & + n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & + final_grid_points, final_weight_at_r_vector, & + final_grid_points_extra, final_weight_at_r_vector_extra, & + rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & + int2_grad1_u12_ao_gpu, int_2e_ao_gpu) ! --- @@ -223,7 +254,6 @@ subroutine deb_int_2e_ao_gpu() acc_thr = 1d-12 - print *, ' precision on int2_grad1_u12_ao ' err_tot = 0.d0 nrm_tot = 0.d0 do m = 1, 3 @@ -246,7 +276,6 @@ subroutine deb_int_2e_ao_gpu() print *, ' absolute accuracy on int2_grad1_u12_ao (%) =', 100.d0 * err_tot / nrm_tot - print *, ' precision on int_2e_ao ' err_tot = 0.d0 nrm_tot = 0.d0 do i = 1, ao_num diff --git a/plugins/local/tc_int/deb_no_1e_gpu.irp.f b/plugins/local/tc_int/deb_no_1e_gpu.irp.f deleted file mode 100644 index 1efbb913..00000000 --- a/plugins/local/tc_int/deb_no_1e_gpu.irp.f +++ /dev/null @@ -1,499 +0,0 @@ - -! --- - -subroutine deb_no_1e_gpu() - - use cutc_module - - implicit none - - integer :: i, j, k, l, ipoint - double precision :: acc_thr, err_tot, nrm_tot, err_loc - double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) - double precision, allocatable :: tmp(:,:,:,:) - double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) - double precision, allocatable :: noL_1e(:,:) - double precision, allocatable :: noL_1e_gpu(:,:) - - - PROVIDE mo_l_coef mo_r_coef - PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp - - - allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) - print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") - read(11) int2_grad1_u12_ao - close(11) - - allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) - allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) - !$OMP DO SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) - enddo - !$OMP END DO - !$OMP END PARALLEL - - deallocate(int2_grad1_u12_ao) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, ipoint) & - !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) - !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - do i = 1, mo_num - do j = 1, mo_num - int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) - int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) - int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - deallocate(tmp) - - ! --- - - allocate(noL_1e_gpu(mo_num,mo_num)) - - call cutc_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & - final_weight_at_r_vector(1), & - mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & - int2_grad1_u12_bimo_t(1,1,1,1), noL_1e_gpu(1,1)) - - ! --- - - allocate(noL_1e(mo_num,mo_num)) - - call provide_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & - final_weight_at_r_vector(1), & - mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & - int2_grad1_u12_bimo_t(1,1,1,1), noL_1e(1,1)) - - ! --- - - deallocate(int2_grad1_u12_bimo_t) - - acc_thr = 1d-12 - - err_tot = 0.d0 - nrm_tot = 0.d0 - do k = 1, mo_num - do l = 1, mo_num - err_loc = dabs(noL_1e(l,k) - noL_1e_gpu(l,k)) - if(err_loc > acc_thr) then - print*, " error on", l, k - print*, " CPU res", noL_1e (l,k) - print*, " GPU res", noL_1e_gpu(l,k) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(noL_1e(l,k)) - enddo - enddo - print *, ' absolute accuracy on noL_1e (%) =', 100.d0 * err_tot / nrm_tot - - deallocate(noL_1e) - deallocate(noL_1e_gpu) - - - return - -end - -! --- - -subroutine deb_no_1e_gpu_tmp() - - use cutc_module - - implicit none - - integer :: i, j, k, l, m, ipoint - double precision :: acc_thr, err_tot, nrm_tot, err_loc - double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) - double precision, allocatable :: tmp(:,:,:,:) - double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) - double precision, allocatable :: tmpO(:), tmpO_gpu(:) - double precision, allocatable :: tmpJ(:,:), tmpJ_gpu(:,:) - double precision, allocatable :: tmpM(:,:), tmpM_gpu(:,:) - double precision, allocatable :: tmpS(:), tmpS_gpu(:) - double precision, allocatable :: tmpC(:,:,:,:), tmpC_gpu(:,:,:,:) - double precision, allocatable :: tmpD(:,:), tmpD_gpu(:,:) - double precision, allocatable :: tmpL(:,:,:), tmpL_gpu(:,:,:) - double precision, allocatable :: tmpR(:,:,:), tmpR_gpu(:,:,:) - double precision, allocatable :: tmpE(:,:,:), tmpE_gpu(:,:,:) - double precision, allocatable :: tmpF(:,:,:), tmpF_gpu(:,:,:) - double precision, allocatable :: noL_1e(:,:), noL_1e_gpu(:,:) - - - ! --- - - - PROVIDE mo_l_coef mo_r_coef - PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp - - - allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) - print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") - read(11) int2_grad1_u12_ao - close(11) - - allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) - allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) - !$OMP DO SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) - enddo - !$OMP END DO - !$OMP END PARALLEL - - deallocate(int2_grad1_u12_ao) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, ipoint) & - !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) - !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - do i = 1, mo_num - do j = 1, mo_num - int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) - int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) - int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - deallocate(tmp) - - ! --- - - allocate(tmpO_gpu(n_points_final_grid)) - allocate(tmpJ_gpu(n_points_final_grid,3)) - allocate(tmpM_gpu(n_points_final_grid,3)) - allocate(tmpS_gpu(n_points_final_grid)) - allocate(tmpC_gpu(n_points_final_grid,4,mo_num,mo_num)) - allocate(tmpD_gpu(n_points_final_grid,4)) - allocate(tmpL_gpu(n_points_final_grid,3,mo_num)) - allocate(tmpR_gpu(n_points_final_grid,3,mo_num)) - allocate(tmpE_gpu(n_points_final_grid,5,mo_num)) - allocate(tmpF_gpu(n_points_final_grid,5,mo_num)) - allocate(noL_1e_gpu(mo_num,mo_num)) - - call deb_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & - final_weight_at_r_vector(1), & - mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), int2_grad1_u12_bimo_t(1,1,1,1), & - tmpO_gpu(1), tmpJ_gpu(1,1), tmpM_gpu(1,1), tmpS_gpu(1), tmpC_gpu(1,1,1,1), tmpD_gpu(1,1), & - tmpL_gpu(1,1,1), tmpR_gpu(1,1,1), tmpE_gpu(1,1,1), tmpF_gpu(1,1,1), noL_1e_gpu(1,1)) - - ! --- - - allocate(tmpO(n_points_final_grid)) - allocate(tmpJ(n_points_final_grid,3)) - allocate(tmpM(n_points_final_grid,3)) - allocate(tmpS(n_points_final_grid)) - allocate(tmpC(n_points_final_grid,4,mo_num,mo_num)) - allocate(tmpD(n_points_final_grid,4)) - allocate(tmpL(n_points_final_grid,3,mo_num)) - allocate(tmpR(n_points_final_grid,3,mo_num)) - allocate(tmpE(n_points_final_grid,5,mo_num)) - allocate(tmpF(n_points_final_grid,5,mo_num)) - allocate(noL_1e(mo_num,mo_num)) - - call provide_no_1e_tmp(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & - final_weight_at_r_vector(1), & - mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), int2_grad1_u12_bimo_t(1,1,1,1), & - tmpO(1), tmpJ(1,1), tmpM(1,1), tmpS(1), tmpC(1,1,1,1), tmpD(1,1), tmpL(1,1,1), tmpR(1,1,1), & - tmpE(1,1,1), tmpF(1,1,1), noL_1e(1,1)) - - ! --- - - deallocate(int2_grad1_u12_bimo_t) - - - acc_thr = 1d-12 - - ! --- - - ! tmpO(n_points_final_grid)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpO(ipoint) - tmpO_gpu(ipoint)) - if(err_loc > acc_thr) then - print*, " error on", ipoint - print*, " CPU res", tmpO (ipoint) - print*, " GPU res", tmpO_gpu(ipoint) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpO(ipoint)) - enddo - print *, ' absolute accuracy on tmpO (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - ! tmpJ(n_points_final_grid,3)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do m = 1, 3 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpJ(ipoint,m) - tmpJ_gpu(ipoint,m)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m - print*, " CPU res", tmpJ (ipoint,m) - print*, " GPU res", tmpJ_gpu(ipoint,m) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpJ(ipoint,m)) - enddo - enddo - print *, ' absolute accuracy on tmpJ (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - ! tmpM(n_points_final_grid,3)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do m = 1, 3 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpM(ipoint,m) - tmpM_gpu(ipoint,m)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m - print*, " CPU res", tmpM (ipoint,m) - print*, " GPU res", tmpM_gpu(ipoint,m) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpM(ipoint,m)) - enddo - enddo - print *, ' absolute accuracy on tmpM (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - ! tmpS(n_points_final_grid)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpS(ipoint) - tmpS_gpu(ipoint)) - if(err_loc > acc_thr) then - print*, " error on", ipoint - print*, " CPU res", tmpS (ipoint) - print*, " GPU res", tmpS_gpu(ipoint) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpS(ipoint)) - enddo - print *, ' absolute accuracy on tmpS (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - ! tmpC(n_points_final_grid,4,mo_num,mo_num)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do j = 1, mo_num - do m = 1, 4 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpC(ipoint,m,j,i) - tmpC_gpu(ipoint,m,j,i)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m, j, i - print*, " CPU res", tmpC (ipoint,m,j,i) - print*, " GPU res", tmpC_gpu(ipoint,m,j,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpC(ipoint,m,j,i)) - enddo - enddo - enddo - enddo - print *, ' absolute accuracy on tmpC (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - ! tmpD(n_points_final_grid,4)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do m = 1, 4 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpD(ipoint,m) - tmpD_gpu(ipoint,m)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m - print*, " CPU res", tmpD (ipoint,m) - print*, " GPU res", tmpD_gpu(ipoint,m) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpD(ipoint,m)) - enddo - enddo - print *, ' absolute accuracy on tmpD (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - ! tmpL(n_points_final_grid,3,mo_num)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do m = 1, 3 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpL(ipoint,m,i) - tmpL_gpu(ipoint,m,i)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m, i - print*, " CPU res", tmpL (ipoint,m,i) - print*, " GPU res", tmpL_gpu(ipoint,m,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpL(ipoint,m,i)) - enddo - enddo - enddo - print *, ' absolute accuracy on tmpL (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - ! tmpR(n_points_final_grid,3,mo_num)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do m = 1, 3 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpR(ipoint,m,i) - tmpR_gpu(ipoint,m,i)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m, i - print*, " CPU res", tmpR (ipoint,m,i) - print*, " GPU res", tmpR_gpu(ipoint,m,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpR(ipoint,m,i)) - enddo - enddo - enddo - print *, ' absolute accuracy on tmpR (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - ! tmpE(n_points_final_grid,5,mo_num)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do m = 1, 5 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpE(ipoint,m,i) - tmpE_gpu(ipoint,m,i)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m, i - print*, " CPU res", tmpE (ipoint,m,i) - print*, " GPU res", tmpE_gpu(ipoint,m,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpE(ipoint,m,i)) - enddo - enddo - enddo - print *, ' absolute accuracy on tmpE (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - ! tmpF(n_points_final_grid,5,mo_num)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do m = 1, 5 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpF(ipoint,m,i) - tmpF_gpu(ipoint,m,i)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m, i - print*, " CPU res", tmpF (ipoint,m,i) - print*, " GPU res", tmpF_gpu(ipoint,m,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpF(ipoint,m,i)) - enddo - enddo - enddo - print *, ' absolute accuracy on tmpF (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - ! noL_1e(mo_num,mo_num)) - err_tot = 0.d0 - nrm_tot = 0.d0 - do k = 1, mo_num - do l = 1, mo_num - err_loc = dabs(noL_1e(l,k) - noL_1e_gpu(l,k)) - if(err_loc > acc_thr) then - print*, " error on", l, k - print*, " CPU res", noL_1e (l,k) - print*, " GPU res", noL_1e_gpu(l,k) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(noL_1e(l,k)) - enddo - enddo - print *, ' absolute accuracy on noL_1e (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - - deallocate(tmpO) - deallocate(tmpJ) - deallocate(tmpM) - deallocate(tmpS) - deallocate(tmpC) - deallocate(tmpD) - deallocate(tmpL) - deallocate(tmpR) - deallocate(tmpE) - deallocate(tmpF) - deallocate(noL_1e) - - deallocate(tmpO_gpu) - deallocate(tmpJ_gpu) - deallocate(tmpM_gpu) - deallocate(tmpS_gpu) - deallocate(tmpC_gpu) - deallocate(tmpD_gpu) - deallocate(tmpL_gpu) - deallocate(tmpR_gpu) - deallocate(tmpE_gpu) - deallocate(tmpF_gpu) - deallocate(noL_1e_gpu) - - - return - -end - - - diff --git a/plugins/local/tc_int/deb_no_2e_gpu.irp.f b/plugins/local/tc_int/deb_no_2e_gpu.irp.f deleted file mode 100644 index 16f58cca..00000000 --- a/plugins/local/tc_int/deb_no_2e_gpu.irp.f +++ /dev/null @@ -1,417 +0,0 @@ - -! --- - -subroutine deb_no_2e_gpu() - - use cutc_module - - implicit none - - integer :: i, j, k, l, ipoint - double precision :: acc_thr, err_tot, nrm_tot, err_loc - double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) - double precision, allocatable :: tmp(:,:,:,:) - double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) - double precision, allocatable :: noL_2e(:,:,:,:) - double precision, allocatable :: noL_2e_gpu(:,:,:,:) - - - PROVIDE mo_l_coef mo_r_coef - PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp - - - allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) - print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") - read(11) int2_grad1_u12_ao - close(11) - - allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) - allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) - !$OMP DO SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) - enddo - !$OMP END DO - !$OMP END PARALLEL - - deallocate(int2_grad1_u12_ao) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, ipoint) & - !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) - !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - do i = 1, mo_num - do j = 1, mo_num - int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) - int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) - int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - deallocate(tmp) - - ! --- - - allocate(noL_2e_gpu(mo_num,mo_num,mo_num,mo_num)) - - call cutc_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & - final_weight_at_r_vector(1), & - mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & - int2_grad1_u12_bimo_t(1,1,1,1), noL_2e_gpu(1,1,1,1)) - - ! --- - - allocate(noL_2e(mo_num,mo_num,mo_num,mo_num)) - - call provide_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & - final_weight_at_r_vector(1), & - mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & - int2_grad1_u12_bimo_t(1,1,1,1), noL_2e(1,1,1,1)) - - ! --- - - deallocate(int2_grad1_u12_bimo_t) - - acc_thr = 1d-12 - - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do j = 1, mo_num - do k = 1, mo_num - do l = 1, mo_num - err_loc = dabs(noL_2e(l,k,j,i) - noL_2e_gpu(l,k,j,i)) - if(err_loc > acc_thr) then - print*, " error on", l, k, j, i - print*, " CPU res", noL_2e (l,k,j,i) - print*, " GPU res", noL_2e_gpu(l,k,j,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(noL_2e(l,k,j,i)) - enddo - enddo - enddo - enddo - print *, ' absolute accuracy on noL_2e (%) =', 100.d0 * err_tot / nrm_tot - - deallocate(noL_2e) - deallocate(noL_2e_gpu) - - - return - -end - -! --- - -subroutine deb_no_2e_gpu_tmp() - - use cutc_module - - implicit none - - integer :: i, j, k, l, m, ipoint - double precision :: acc_thr, err_tot, nrm_tot, err_loc - double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) - double precision, allocatable :: tmp(:,:,:,:) - double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) - double precision, allocatable :: tmpO(:), tmpO_gpu(:) - double precision, allocatable :: tmpJ(:,:), tmpJ_gpu(:,:) - double precision, allocatable :: tmpA(:,:,:), tmpA_gpu(:,:,:) - double precision, allocatable :: tmpB(:,:,:), tmpB_gpu(:,:,:) - double precision, allocatable :: tmpC(:,:,:,:), tmpC_gpu(:,:,:,:) - double precision, allocatable :: tmpD(:,:,:,:), tmpD_gpu(:,:,:,:) - double precision, allocatable :: tmpE(:,:,:,:), tmpE_gpu(:,:,:,:) - double precision, allocatable :: noL_2e(:,:,:,:), noL_2e_gpu(:,:,:,:) - - - PROVIDE mo_l_coef mo_r_coef - PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp - - - allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) - print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' - open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") - read(11) int2_grad1_u12_ao - close(11) - - allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) - allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint) & - !$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp) - !$OMP DO SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num) - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num) - call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num) - enddo - !$OMP END DO - !$OMP END PARALLEL - - deallocate(int2_grad1_u12_ao) - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, ipoint) & - !$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t) - !$OMP DO COLLAPSE(2) SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - do i = 1, mo_num - do j = 1, mo_num - int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1) - int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2) - int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3) - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - - deallocate(tmp) - - ! --- - - allocate(tmpO_gpu(n_points_final_grid)) - allocate(tmpJ_gpu(n_points_final_grid,3)) - allocate(tmpA_gpu(n_points_final_grid,3,mo_num)) - allocate(tmpB_gpu(n_points_final_grid,3,mo_num)) - allocate(tmpC_gpu(n_points_final_grid,4,mo_num,mo_num)) - allocate(tmpD_gpu(n_points_final_grid,4,mo_num,mo_num)) - allocate(tmpE_gpu(mo_num,mo_num,mo_num,mo_num)) - allocate(noL_2e_gpu(mo_num,mo_num,mo_num,mo_num)) - - call deb_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & - final_weight_at_r_vector(1), & - mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & - int2_grad1_u12_bimo_t(1,1,1,1), & - tmpO_gpu(1), tmpJ_gpu(1,1), tmpA_gpu(1,1,1), tmpB_gpu(1,1,1), & - tmpC_gpu(1,1,1,1), tmpD_gpu(1,1,1,1), tmpE_gpu(1,1,1,1), & - noL_2e_gpu(1,1,1,1)) - - ! --- - - allocate(tmpO(n_points_final_grid)) - allocate(tmpJ(n_points_final_grid,3)) - allocate(tmpA(n_points_final_grid,3,mo_num)) - allocate(tmpB(n_points_final_grid,3,mo_num)) - allocate(tmpC(n_points_final_grid,4,mo_num,mo_num)) - allocate(tmpD(n_points_final_grid,4,mo_num,mo_num)) - allocate(tmpE(mo_num,mo_num,mo_num,mo_num)) - allocate(noL_2e(mo_num,mo_num,mo_num,mo_num)) - - call provide_no_2e_tmp(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & - final_weight_at_r_vector(1), & - mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & - int2_grad1_u12_bimo_t(1,1,1,1), & - tmpO(1), tmpJ(1,1), tmpA(1,1,1), tmpB(1,1,1), & - tmpC(1,1,1,1), tmpD(1,1,1,1), tmpE(1,1,1,1), & - noL_2e(1,1,1,1)) - - ! --- - - deallocate(int2_grad1_u12_bimo_t) - - acc_thr = 1d-12 - - ! --- - - err_tot = 0.d0 - nrm_tot = 0.d0 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpO(ipoint) - tmpO_gpu(ipoint)) - if(err_loc > acc_thr) then - print*, " error on", ipoint - print*, " CPU res", tmpO (ipoint) - print*, " GPU res", tmpO_gpu(ipoint) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpO(ipoint)) - enddo - print *, ' absolute accuracy on tmpO (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - err_tot = 0.d0 - nrm_tot = 0.d0 - do m = 1, 3 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpJ(ipoint,m) - tmpJ_gpu(ipoint,m)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m - print*, " CPU res", tmpJ (ipoint,m) - print*, " GPU res", tmpJ_gpu(ipoint,m) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpJ(ipoint,m)) - enddo - enddo - print *, ' absolute accuracy on tmpJ (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do m = 1, 3 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpA(ipoint,m,i) - tmpA_gpu(ipoint,m,i)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m, i - print*, " CPU res", tmpA (ipoint,m,i) - print*, " GPU res", tmpA_gpu(ipoint,m,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpA(ipoint,m,i)) - enddo - enddo - enddo - print *, ' absolute accuracy on tmpA (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do m = 1, 3 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpB(ipoint,m,i) - tmpB_gpu(ipoint,m,i)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m, i - print*, " CPU res", tmpB (ipoint,m,i) - print*, " GPU res", tmpB_gpu(ipoint,m,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpB(ipoint,m,i)) - enddo - enddo - enddo - print *, ' absolute accuracy on tmpB (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do j = 1, mo_num - do m = 1, 3 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpC(ipoint,m,i,j) - tmpC_gpu(ipoint,m,i,j)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m, i, j - print*, " CPU res", tmpC (ipoint,m,i,j) - print*, " GPU res", tmpC_gpu(ipoint,m,i,j) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpC(ipoint,m,i,j)) - enddo - enddo - enddo - enddo - print *, ' absolute accuracy on tmpC (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do j = 1, mo_num - do m = 1, 3 - do ipoint = 1, n_points_final_grid - err_loc = dabs(tmpD(ipoint,m,i,j) - tmpD_gpu(ipoint,m,i,j)) - if(err_loc > acc_thr) then - print*, " error on", ipoint, m, i, j - print*, " CPU res", tmpD (ipoint,m,i,j) - print*, " GPU res", tmpD_gpu(ipoint,m,i,j) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpD(ipoint,m,i,j)) - enddo - enddo - enddo - enddo - print *, ' absolute accuracy on tmpD (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do j = 1, mo_num - do k = 1, mo_num - do l = 1, mo_num - err_loc = dabs(tmpE(l,k,j,i) - tmpE_gpu(l,k,j,i)) - if(err_loc > acc_thr) then - print*, " error on", l, k, j, i - print*, " CPU res", tmpE (l,k,j,i) - print*, " GPU res", tmpE_gpu(l,k,j,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(tmpE(l,k,j,i)) - enddo - enddo - enddo - enddo - print *, ' absolute accuracy on tmpE (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - err_tot = 0.d0 - nrm_tot = 0.d0 - do i = 1, mo_num - do j = 1, mo_num - do k = 1, mo_num - do l = 1, mo_num - err_loc = dabs(noL_2e(l,k,j,i) - noL_2e_gpu(l,k,j,i)) - if(err_loc > acc_thr) then - print*, " error on", l, k, j, i - print*, " CPU res", noL_2e (l,k,j,i) - print*, " GPU res", noL_2e_gpu(l,k,j,i) - stop - endif - err_tot = err_tot + err_loc - nrm_tot = nrm_tot + dabs(noL_2e(l,k,j,i)) - enddo - enddo - enddo - enddo - print *, ' absolute accuracy on noL_2e (%) =', 100.d0 * err_tot / nrm_tot - - ! --- - - deallocate(tmpO, tmpO_gpu) - deallocate(tmpJ, tmpJ_gpu) - deallocate(tmpA, tmpA_gpu) - deallocate(tmpB, tmpB_gpu) - deallocate(tmpC, tmpC_gpu) - deallocate(tmpD, tmpD_gpu) - deallocate(tmpE, tmpE_gpu) - deallocate(noL_2e, noL_2e_gpu) - - return -end - - diff --git a/plugins/local/tc_int/deb_no_gpu.irp.f b/plugins/local/tc_int/deb_no_gpu.irp.f index e14404e6..de8adb97 100644 --- a/plugins/local/tc_int/deb_no_gpu.irp.f +++ b/plugins/local/tc_int/deb_no_gpu.irp.f @@ -1,13 +1,47 @@ ! --- -subroutine deb_no_gpu() +program deb_no_gpu + + implicit none + + print *, ' j2e_type = ', j2e_type + print *, ' j1e_type = ', j1e_type + print *, ' env_type = ', env_type + + my_grid_becke = .True. + PROVIDE tc_grid1_a tc_grid1_r + my_n_pt_r_grid = tc_grid1_r + my_n_pt_a_grid = tc_grid1_a + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + my_extra_grid_becke = .True. + PROVIDE tc_grid2_a tc_grid2_r + my_n_pt_r_extra_grid = tc_grid2_r + my_n_pt_a_extra_grid = tc_grid2_a + touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid + + call write_int(6, my_n_pt_r_grid, 'radial external grid over') + call write_int(6, my_n_pt_a_grid, 'angular external grid over') + + call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over') + call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over') + + call main() + +end + +! --- + +subroutine main() use cutc_module implicit none integer :: i, j, k, l, ipoint + double precision :: time0, time1 + double precision :: tt0, tt1 double precision :: acc_thr, err_tot, nrm_tot, err_loc double precision :: noL_0e double precision :: noL_0e_gpu(1) @@ -24,15 +58,24 @@ subroutine deb_no_gpu() PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp + call wall_time(time0) + print*, ' start deb_no_gpu' + + + allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' + call wall_time(tt0) open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") read(11) int2_grad1_u12_ao close(11) + call wall_time(tt1) + write(*,"(A,2X,F15.7)") ' wall time for reading (sec) = ', (tt1 - tt0) allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) + call wall_time(tt0) !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (ipoint) & @@ -64,6 +107,8 @@ subroutine deb_no_gpu() enddo !$OMP END DO !$OMP END PARALLEL + call wall_time(tt1) + write(*,"(A,2X,F15.7)") ' wall time for 3e-tensor (sec) = ', (tt1 - tt0) deallocate(tmp) @@ -161,6 +206,9 @@ subroutine deb_no_gpu() print *, ' absolute accuracy on noL_0e (%) =', 100.d0 * err_tot / nrm_tot + call wall_time(time1) + write(*,"(A,2X,F15.7)") ' wall time for deb_no_gpu (sec) = ', (time1 - time0) + return end diff --git a/plugins/local/tc_int/deb_tc_int_cuda.irp.f b/plugins/local/tc_int/deb_tc_int_cuda.irp.f deleted file mode 100644 index ad20d861..00000000 --- a/plugins/local/tc_int/deb_tc_int_cuda.irp.f +++ /dev/null @@ -1,55 +0,0 @@ -! --- - -program write_tc_int_cuda - - implicit none - - print *, ' j2e_type = ', j2e_type - print *, ' j1e_type = ', j1e_type - print *, ' env_type = ', env_type - - my_grid_becke = .True. - PROVIDE tc_grid1_a tc_grid1_r - my_n_pt_r_grid = tc_grid1_r - my_n_pt_a_grid = tc_grid1_a - touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid - - my_extra_grid_becke = .True. - PROVIDE tc_grid2_a tc_grid2_r - my_n_pt_r_extra_grid = tc_grid2_r - my_n_pt_a_extra_grid = tc_grid2_a - touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid - - call write_int(6, my_n_pt_r_grid, 'radial external grid over') - call write_int(6, my_n_pt_a_grid, 'angular external grid over') - - call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over') - call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over') - - call main() - -end - -! --- - -subroutine main() - - implicit none - - !call deb_int_2e_ao_gpu() - - !call deb_no_2e_gpu_tmp() - !call deb_no_2e_gpu() - - !call deb_no_1e_gpu_tmp() - !call deb_no_1e_gpu() - - !call deb_no_0e_gpu() - - call deb_no_gpu() - - return -end - -! --- - From 674635c057464b60c339b22cc8ca3fdd5c4afa14 Mon Sep 17 00:00:00 2001 From: Abdallah Ammar Date: Tue, 27 Aug 2024 15:23:42 +0200 Subject: [PATCH 19/19] remove double counting of nuclear repulsion --- plugins/local/slater_tc/slater_tc_opt.irp.f | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/local/slater_tc/slater_tc_opt.irp.f b/plugins/local/slater_tc/slater_tc_opt.irp.f index 3c4421f8..bf954882 100644 --- a/plugins/local/slater_tc/slater_tc_opt.irp.f +++ b/plugins/local/slater_tc/slater_tc_opt.irp.f @@ -129,9 +129,9 @@ subroutine htilde_mu_mat_opt_bi_ortho(key_j, key_i, Nint, hmono, htwoe, hthree, endif - if(degree==0) then - htot += nuclear_repulsion - endif +! if(degree==0) then +! htot += nuclear_repulsion +! endif end