10
0
mirror of https://github.com/QuantumPackage/qp2.git synced 2024-12-22 12:23:43 +01:00

WORKING ON DEBUG CUDA-INTEG

This commit is contained in:
Abdallah Ammar 2024-08-01 10:05:47 +02:00
parent eb236e0112
commit b6b0ed5d22
8 changed files with 959 additions and 108 deletions

View File

@ -118,7 +118,7 @@ BEGIN_PROVIDER [double precision, ao_two_e_tc_tot, (ao_num, ao_num, ao_num, ao_n
!$OMP END PARALLEL !$OMP END PARALLEL
call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 &
, int2_grad1_u12_square_ao(1,1,1), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , int2_grad1_u12_square_ao(1,1,1), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid &
, 0.d0, ao_two_e_tc_tot, ao_num*ao_num) , 0.d0, ao_two_e_tc_tot(1,1,1,1), ao_num*ao_num)
deallocate(c_mat) deallocate(c_mat)
endif endif

1
plugins/local/tc_int/LIB Normal file
View File

@ -0,0 +1 @@
-ltc_int_cu

View File

@ -2,7 +2,7 @@
! --- ! ---
subroutine provide_int2_grad1_u12_ao() subroutine provide_int2_grad1_u12_ao()
use gpu
BEGIN_DOC BEGIN_DOC
! !
! int2_grad1_u12_ao(i,j,ipoint,1) = \int dr2 [\grad1 u(r1,r2)]_x1 \chi_i(r2) \chi_j(r2) ! int2_grad1_u12_ao(i,j,ipoint,1) = \int dr2 [\grad1 u(r1,r2)]_x1 \chi_i(r2) \chi_j(r2)
@ -35,8 +35,8 @@ subroutine provide_int2_grad1_u12_ao()
double precision :: weight1, ao_k_r, ao_i_r double precision :: weight1, ao_k_r, ao_i_r
double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq
double precision :: time0, time1, time2, tc1, tc2, tc double precision :: time0, time1, time2, tc1, tc2, tc
type(gpu_double4) :: int2_grad1_u12_ao double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:)
type(gpu_double3) :: tmp_grad1_u12, tmp_grad1_u12p, tmp double precision, allocatable :: tmp_grad1_u12(:,:,:), tmp(:,:,:)
double precision, allocatable :: c_mat(:,:,:), tc_int_2e_ao(:,:,:,:) double precision, allocatable :: c_mat(:,:,:), tc_int_2e_ao(:,:,:,:)
double precision, external :: get_ao_two_e_integral double precision, external :: get_ao_two_e_integral
@ -52,7 +52,6 @@ subroutine provide_int2_grad1_u12_ao()
call total_memory(mem) call total_memory(mem)
mem = max(1.d0, qp_max_mem - mem) mem = max(1.d0, qp_max_mem - mem)
mem = 6
n_double = mem * 1.d8 n_double = mem * 1.d8
n_blocks = int(min(n_double / (n_points_extra_final_grid * 4.d0), 1.d0*n_points_final_grid)) n_blocks = int(min(n_double / (n_points_extra_final_grid * 4.d0), 1.d0*n_points_final_grid))
n_rest = int(mod(n_points_final_grid, n_blocks)) n_rest = int(mod(n_points_final_grid, n_blocks))
@ -66,9 +65,9 @@ subroutine provide_int2_grad1_u12_ao()
! --- ! ---
! --- ! ---
call gpu_allocate(int2_grad1_u12_ao, ao_num,ao_num,n_points_final_grid,4) allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4))
call gpu_allocate(tmp,n_points_extra_final_grid,ao_num,ao_num) allocate(tmp(n_points_extra_final_grid,ao_num,ao_num))
!$OMP PARALLEL & !$OMP PARALLEL &
!$OMP DEFAULT (NONE) & !$OMP DEFAULT (NONE) &
!$OMP PRIVATE (j, i, jpoint) & !$OMP PRIVATE (j, i, jpoint) &
@ -77,23 +76,17 @@ subroutine provide_int2_grad1_u12_ao()
do j = 1, ao_num do j = 1, ao_num
do i = 1, ao_num do i = 1, ao_num
do jpoint = 1, n_points_extra_final_grid do jpoint = 1, n_points_extra_final_grid
tmp%f(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) tmp(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j)
enddo enddo
enddo enddo
enddo enddo
!$OMP END DO !$OMP END DO
!$OMP END PARALLEL !$OMP END PARALLEL
call gpu_allocate(tmp_grad1_u12,n_points_extra_final_grid,n_blocks,4) allocate(tmp_grad1_u12(n_points_extra_final_grid,n_blocks,4))
call gpu_allocate(tmp_grad1_u12p,n_points_extra_final_grid,n_blocks,4)
tc = 0.d0 tc = 0.d0
type(gpu_stream) :: stream(4)
do i=1,4
call gpu_stream_create(stream(i))
enddo
do i_pass = 1, n_pass do i_pass = 1, n_pass
ii = (i_pass-1)*n_blocks + 1 ii = (i_pass-1)*n_blocks + 1
@ -102,25 +95,22 @@ subroutine provide_int2_grad1_u12_ao()
!$OMP PARALLEL & !$OMP PARALLEL &
!$OMP DEFAULT (NONE) & !$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i_blocks, ipoint) & !$OMP PRIVATE (i_blocks, ipoint) &
!$OMP SHARED (n_blocks, n_points_extra_final_grid, ii, final_grid_points, tmp_grad1_u12) !$OMP SHARED (n_blocks, n_points_extra_final_grid, ii, tmp_grad1_u12)
!$OMP DO !$OMP DO
do i_blocks = 1, n_blocks do i_blocks = 1, n_blocks
ipoint = ii - 1 + i_blocks ! r1 ipoint = ii - 1 + i_blocks ! r1
call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12%f(1,i_blocks,1), tmp_grad1_u12%f(1,i_blocks,2), & call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,i_blocks,1), tmp_grad1_u12(1,i_blocks,2), &
tmp_grad1_u12%f(1,i_blocks,3), tmp_grad1_u12%f(1,i_blocks,4)) tmp_grad1_u12(1,i_blocks,3), tmp_grad1_u12(1,i_blocks,4))
enddo enddo
!$OMP END DO !$OMP END DO
!$OMP END PARALLEL !$OMP END PARALLEL
call wall_time(tc2) call wall_time(tc2)
tc = tc + tc2 - tc1 tc = tc + tc2 - tc1
call gpu_synchronize()
call gpu_copy(tmp_grad1_u12,tmp_grad1_u12p)
do m = 1, 4 do m = 1, 4
call gpu_set_stream(blas_handle, stream(m)) call dgemm("T", "N", ao_num*ao_num, n_blocks, n_points_extra_final_grid, 1.d0 &
call gpu_dgemm(blas_handle, "T", "N", ao_num*ao_num, n_blocks, n_points_extra_final_grid, 1.d0 & , tmp(1,1,1), n_points_extra_final_grid, tmp_grad1_u12(1,1,m), n_points_extra_final_grid &
, tmp%f(1,1,1), n_points_extra_final_grid, tmp_grad1_u12p%f(1,1,m), n_points_extra_final_grid & , 0.d0, int2_grad1_u12_ao(1,1,ii,m), ao_num*ao_num)
, 0.d0, int2_grad1_u12_ao%f(1,1,ii,m), ao_num*ao_num)
enddo enddo
enddo enddo
@ -132,12 +122,12 @@ subroutine provide_int2_grad1_u12_ao()
!$OMP PARALLEL & !$OMP PARALLEL &
!$OMP DEFAULT (NONE) & !$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i_rest, ipoint) & !$OMP PRIVATE (i_rest, ipoint) &
!$OMP SHARED (n_rest, n_points_extra_final_grid, ii, final_grid_points, tmp_grad1_u12) !$OMP SHARED (n_rest, n_points_extra_final_grid, ii, tmp_grad1_u12)
!$OMP DO !$OMP DO
do i_rest = 1, n_rest do i_rest = 1, n_rest
ipoint = ii - 1 + i_rest ! r1 ipoint = ii - 1 + i_rest ! r1
call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12%f(1,i_rest,1), tmp_grad1_u12%f(1,i_rest,2), & call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,i_rest,1), tmp_grad1_u12(1,i_rest,2), &
tmp_grad1_u12%f(1,i_rest,3), tmp_grad1_u12%f(1,i_rest,4)) tmp_grad1_u12(1,i_rest,3), tmp_grad1_u12(1,i_rest,4))
enddo enddo
!$OMP END DO !$OMP END DO
!$OMP END PARALLEL !$OMP END PARALLEL
@ -145,23 +135,15 @@ subroutine provide_int2_grad1_u12_ao()
tc = tc + tc2 - tc1 tc = tc + tc2 - tc1
do m = 1, 4 do m = 1, 4
call gpu_set_stream(blas_handle, stream(m)) call dgemm("T", "N", ao_num*ao_num, n_rest, n_points_extra_final_grid, 1.d0 &
call gpu_dgemm(blas_handle, "T", "N", ao_num*ao_num, n_rest, n_points_extra_final_grid, 1.d0 & , tmp(1,1,1), n_points_extra_final_grid, tmp_grad1_u12(1,1,m), n_points_extra_final_grid &
, tmp%f(1,1,1), n_points_extra_final_grid, tmp_grad1_u12%f(1,1,m), n_points_extra_final_grid & , 0.d0, int2_grad1_u12_ao(1,1,ii,m), ao_num*ao_num)
, 0.d0, int2_grad1_u12_ao%f(1,1,ii,m), ao_num*ao_num)
enddo enddo
endif endif
call gpu_synchronize() deallocate(tmp_grad1_u12)
call gpu_deallocate(tmp_grad1_u12)
call gpu_deallocate(tmp_grad1_u12p)
do i=1,4 deallocate(tmp)
call gpu_stream_destroy(stream(i))
enddo
call gpu_deallocate(tmp)
call wall_time(time1) call wall_time(time1)
@ -169,8 +151,6 @@ subroutine provide_int2_grad1_u12_ao()
print*, ' wall time Jastrow derivatives (min) = ', tc / 60.d0 print*, ' wall time Jastrow derivatives (min) = ', tc / 60.d0
call print_memory_usage() call print_memory_usage()
!TODO
stop
! --- ! ---
! --- ! ---
! --- ! ---
@ -196,7 +176,7 @@ stop
!$OMP END DO !$OMP END DO
!$OMP END PARALLEL !$OMP END PARALLEL
call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 &
, int2_grad1_u12_ao%f(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , int2_grad1_u12_ao(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid &
, 0.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) , 0.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num)
deallocate(c_mat) deallocate(c_mat)
@ -232,7 +212,7 @@ stop
!$OMP END PARALLEL !$OMP END PARALLEL
call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 & call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 &
, int2_grad1_u12_ao%f(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid &
, 1.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) , 1.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num)
enddo enddo
deallocate(c_mat) deallocate(c_mat)
@ -281,9 +261,10 @@ stop
print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write")
call ezfio_set_work_empty(.False.) call ezfio_set_work_empty(.False.)
write(11) int2_grad1_u12_ao%f(:,:,:,1:3) write(11) int2_grad1_u12_ao(:,:,:,1:3)
close(11) close(11)
deallocate(int2_grad1_u12_ao)
print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot'
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/ao_two_e_tc_tot', action="write") open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/ao_two_e_tc_tot', action="write")
@ -295,7 +276,6 @@ stop
! ---- ! ----
call gpu_deallocate(int2_grad1_u12_ao)
deallocate(tc_int_2e_ao) deallocate(tc_int_2e_ao)
call wall_time(time2) call wall_time(time2)

View File

@ -3,24 +3,24 @@
subroutine provide_int2_grad1_u12_ao_gpu() subroutine provide_int2_grad1_u12_ao_gpu()
use gpu_module use gpu
BEGIN_DOC BEGIN_DOC
! !
! int2_grad1_u12_ao(i,j,ipoint,1) = \int dr2 [\grad1 u(r1,r2)]_x1 \chi_i(r2) \chi_j(r2) ! int2_grad1_u12_ao(i,j,ipoint,1) = \int dr2 [\grad1 u(r1,r2)]_x1 \chi_i(r2) \chi_j(r2)
! int2_grad1_u12_ao(i,j,ipoint,2) = \int dr2 [\grad1 u(r1,r2)]_y1 \chi_i(r2) \chi_j(r2) ! int2_grad1_u12_ao(i,j,ipoint,2) = \int dr2 [\grad1 u(r1,r2)]_y1 \chi_i(r2) \chi_j(r2)
! int2_grad1_u12_ao(i,j,ipoint,3) = \int dr2 [\grad1 u(r1,r2)]_z1 \chi_i(r2) \chi_j(r2) ! int2_grad1_u12_ao(i,j,ipoint,3) = \int dr2 [\grad1 u(r1,r2)]_z1 \chi_i(r2) \chi_j(r2)
! int2_grad1_u12_ao(i,j,ipoint,4) = \int dr2 [-(1/2) [\grad1 u(r1,r2)]^2] \chi_i(r2) \chi_j(r2) ! int2_grad1_u12_ao(i,j,ipoint,4) = \int dr2 [-(1/2) [\grad1 u(r1,r2)]^2] \chi_i(r2) \chi_j(r2)
! !
! !
! tc_int_2e_ao(k,i,l,j) = (ki|V^TC(r_12)|lj) ! tc_int_2e_ao(k,i,l,j) = (ki|V^TC(r_12)|lj)
! = <lk| V^TC(r_12) |ji> where V^TC(r_12) is the total TC operator ! = <lk| V^TC(r_12) |ji> where V^TC(r_12) is the total TC operator
! = tc_grad_and_lapl_ao(k,i,l,j) + tc_grad_square_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j) ! = tc_grad_and_lapl_ao(k,i,l,j) + tc_grad_square_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j)
! where: ! where:
! !
! tc_grad_and_lapl_ao(k,i,l,j) = < k l | -1/2 \Delta_1 u(r1,r2) - \grad_1 u(r1,r2) . \grad_1 | ij > ! tc_grad_and_lapl_ao(k,i,l,j) = < k l | -1/2 \Delta_1 u(r1,r2) - \grad_1 u(r1,r2) . \grad_1 | ij >
! = -1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) ! = -1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2)
! = 1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 (-1) \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) ! = 1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 (-1) \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2)
! !
! tc_grad_square_ao(k,i,l,j) = -1/2 <kl | |\grad_1 u(r1,r2)|^2 + |\grad_2 u(r1,r2)|^2 | ij> ! tc_grad_square_ao(k,i,l,j) = -1/2 <kl | |\grad_1 u(r1,r2)|^2 + |\grad_2 u(r1,r2)|^2 | ij>
! !
@ -37,8 +37,9 @@ subroutine provide_int2_grad1_u12_ao_gpu()
double precision :: weight1, ao_k_r, ao_i_r double precision :: weight1, ao_k_r, ao_i_r
double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq
double precision :: time0, time1, time2, tc1, tc2, tc double precision :: time0, time1, time2, tc1, tc2, tc
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:), tc_int_2e_ao(:,:,:,:) type(gpu_double4) :: int2_grad1_u12_ao
double precision, allocatable :: tmp(:,:,:), c_mat(:,:,:), tmp_grad1_u12(:,:,:) type(gpu_double3) :: tmp_grad1_u12, tmp_grad1_u12p, tmp
double precision, allocatable :: c_mat(:,:,:), tc_int_2e_ao(:,:,:,:)
double precision, external :: get_ao_two_e_integral double precision, external :: get_ao_two_e_integral
@ -48,11 +49,12 @@ subroutine provide_int2_grad1_u12_ao_gpu()
print*, ' start provide_int2_grad1_u12_ao_gpu ...' print*, ' start provide_int2_grad1_u12_ao ...'
call wall_time(time0) call wall_time(time0)
call total_memory(mem) call total_memory(mem)
mem = max(1.d0, qp_max_mem - mem) mem = max(1.d0, qp_max_mem - mem)
mem = 6
n_double = mem * 1.d8 n_double = mem * 1.d8
n_blocks = int(min(n_double / (n_points_extra_final_grid * 4.d0), 1.d0*n_points_final_grid)) n_blocks = int(min(n_double / (n_points_extra_final_grid * 4.d0), 1.d0*n_points_final_grid))
n_rest = int(mod(n_points_final_grid, n_blocks)) n_rest = int(mod(n_points_final_grid, n_blocks))
@ -62,41 +64,198 @@ subroutine provide_int2_grad1_u12_ao_gpu()
call write_int(6, n_blocks, 'Size of the blocks') call write_int(6, n_blocks, 'Size of the blocks')
call write_int(6, n_rest, 'Size of the last block') call write_int(6, n_rest, 'Size of the last block')
! ---
! ---
! --- ! ---
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) call gpu_allocate(int2_grad1_u12_ao, ao_num,ao_num,n_points_final_grid,4)
allocate(tc_int_2e_ao(ao_num,ao_num,ao_num,ao_num))
double precision, allocatable :: aos_data1(:,:,:) call gpu_allocate(tmp,n_points_extra_final_grid,ao_num,ao_num)
double precision, allocatable :: aos_data2(:,:,:) !$OMP PARALLEL &
allocate(aos_data1(n_points_final_grid,ao_num,4)) !$OMP DEFAULT (NONE) &
allocate(aos_data2(n_points_extra_final_grid,ao_num,4)) !$OMP PRIVATE (j, i, jpoint) &
!$OMP SHARED (tmp, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp)
do k = 1, ao_num !$OMP DO SCHEDULE (static)
do ipoint = 1, n_points_final_grid do j = 1, ao_num
aos_data1(ipoint,k,1) = aos_in_r_array(i,ipoint) do i = 1, ao_num
aos_data1(ipoint,k,2) = aos_grad_in_r_array(i,ipoint,1) do jpoint = 1, n_points_extra_final_grid
aos_data1(ipoint,k,3) = aos_grad_in_r_array(i,ipoint,2) tmp%f(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j)
aos_data1(ipoint,k,4) = aos_grad_in_r_array(i,ipoint,3) enddo
enddo enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
do ipoint = 1, n_points_extra_final_grid call gpu_allocate(tmp_grad1_u12,n_points_extra_final_grid,n_blocks,4)
aos_data1(ipoint,k,1) = aos_in_r_array_extra(i,ipoint) call gpu_allocate(tmp_grad1_u12p,n_points_extra_final_grid,n_blocks,4)
aos_data1(ipoint,k,2) = aos_grad_in_r_array_extra(i,ipoint,1)
aos_data1(ipoint,k,3) = aos_grad_in_r_array_extra(i,ipoint,2) tc = 0.d0
aos_data1(ipoint,k,4) = aos_grad_in_r_array_extra(i,ipoint,3)
type(gpu_stream) :: stream(4)
do i=1,4
call gpu_stream_create(stream(i))
enddo
do i_pass = 1, n_pass
ii = (i_pass-1)*n_blocks + 1
call wall_time(tc1)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i_blocks, ipoint) &
!$OMP SHARED (n_blocks, n_points_extra_final_grid, ii, final_grid_points, tmp_grad1_u12)
!$OMP DO
do i_blocks = 1, n_blocks
ipoint = ii - 1 + i_blocks ! r1
call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12%f(1,i_blocks,1), tmp_grad1_u12%f(1,i_blocks,2), &
tmp_grad1_u12%f(1,i_blocks,3), tmp_grad1_u12%f(1,i_blocks,4))
enddo
!$OMP END DO
!$OMP END PARALLEL
call wall_time(tc2)
tc = tc + tc2 - tc1
call gpu_synchronize()
call gpu_copy(tmp_grad1_u12,tmp_grad1_u12p)
do m = 1, 4
call gpu_set_stream(blas_handle, stream(m))
call gpu_dgemm(blas_handle, "T", "N", ao_num*ao_num, n_blocks, n_points_extra_final_grid, 1.d0 &
, tmp%f(1,1,1), n_points_extra_final_grid, tmp_grad1_u12p%f(1,1,m), n_points_extra_final_grid &
, 0.d0, int2_grad1_u12_ao%f(1,1,ii,m), ao_num*ao_num)
enddo enddo
enddo enddo
call tc_int_bh(n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, & if(n_rest .gt. 0) then
jBH_size, jBH_m, jBH_n, jBH_o, jBH_c, &
final_grid_points, final_grid_points_extra, nucl_coord, & ii = n_pass*n_blocks + 1
final_weight_at_r_vector, final_weight_at_r_vector_extra, &
aos_data1, aos_data2, int2_grad1_u12_ao, tc_int_2e_ao) call wall_time(tc1)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i_rest, ipoint) &
!$OMP SHARED (n_rest, n_points_extra_final_grid, ii, final_grid_points, tmp_grad1_u12)
!$OMP DO
do i_rest = 1, n_rest
ipoint = ii - 1 + i_rest ! r1
call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12%f(1,i_rest,1), tmp_grad1_u12%f(1,i_rest,2), &
tmp_grad1_u12%f(1,i_rest,3), tmp_grad1_u12%f(1,i_rest,4))
enddo
!$OMP END DO
!$OMP END PARALLEL
call wall_time(tc2)
tc = tc + tc2 - tc1
do m = 1, 4
call gpu_set_stream(blas_handle, stream(m))
call gpu_dgemm(blas_handle, "T", "N", ao_num*ao_num, n_rest, n_points_extra_final_grid, 1.d0 &
, tmp%f(1,1,1), n_points_extra_final_grid, tmp_grad1_u12%f(1,1,m), n_points_extra_final_grid &
, 0.d0, int2_grad1_u12_ao%f(1,1,ii,m), ao_num*ao_num)
enddo
endif
call gpu_synchronize()
call gpu_deallocate(tmp_grad1_u12)
call gpu_deallocate(tmp_grad1_u12p)
do i=1,4
call gpu_stream_destroy(stream(i))
enddo
call gpu_deallocate(tmp)
call wall_time(time1)
print*, ' wall time for int2_grad1_u12_ao (min) = ', (time1-time0) / 60.d0
print*, ' wall time Jastrow derivatives (min) = ', tc / 60.d0
call print_memory_usage()
!TODO
stop
! ---
! ---
! ---
allocate(tc_int_2e_ao(ao_num,ao_num,ao_num,ao_num))
call wall_time(time1)
allocate(c_mat(n_points_final_grid,ao_num,ao_num))
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i, k, ipoint) &
!$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector)
!$OMP DO SCHEDULE (static)
do i = 1, ao_num
do k = 1, ao_num
do ipoint = 1, n_points_final_grid
c_mat(ipoint,k,i) = final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,k)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 &
, int2_grad1_u12_ao%f(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid &
, 0.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num)
deallocate(c_mat)
call wall_time(time2)
print*, ' wall time of Hermitian part of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0
call print_memory_usage()
! --- ! ---
call wall_time(time1) call wall_time(time1)
allocate(c_mat(n_points_final_grid,ao_num,ao_num))
do m = 1, 3
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) &
!$OMP SHARED (aos_in_r_array_transp, aos_grad_in_r_array_transp_bis, c_mat, &
!$OMP ao_num, n_points_final_grid, final_weight_at_r_vector, m)
!$OMP DO SCHEDULE (static)
do i = 1, ao_num
do k = 1, ao_num
do ipoint = 1, n_points_final_grid
weight1 = 0.5d0 * final_weight_at_r_vector(ipoint)
ao_i_r = aos_in_r_array_transp(ipoint,i)
ao_k_r = aos_in_r_array_transp(ipoint,k)
c_mat(ipoint,k,i) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,m) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,m))
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 &
, int2_grad1_u12_ao%f(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid &
, 1.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num)
enddo
deallocate(c_mat)
call wall_time(time2)
print*, ' wall time of non-Hermitian part of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0
call print_memory_usage()
! ---
call wall_time(time1)
call sum_A_At(tc_int_2e_ao(1,1,1,1), ao_num*ao_num)
call wall_time(time2)
print*, ' lower- and upper-triangle of tc_int_2e_ao (min) ', (time2 - time1) / 60.d0
call print_memory_usage()
! ---
call wall_time(time1)
PROVIDE ao_integrals_map PROVIDE ao_integrals_map
!$OMP PARALLEL DEFAULT(NONE) & !$OMP PARALLEL DEFAULT(NONE) &
@ -107,7 +266,7 @@ subroutine provide_int2_grad1_u12_ao_gpu()
do l = 1, ao_num do l = 1, ao_num
do i = 1, ao_num do i = 1, ao_num
do k = 1, ao_num do k = 1, ao_num
! < 1:i, 2:j | 1:k, 2:l > ! < 1:i, 2:j | 1:k, 2:l >
tc_int_2e_ao(k,i,l,j) = tc_int_2e_ao(k,i,l,j) + get_ao_two_e_integral(i, j, k, l, ao_integrals_map) tc_int_2e_ao(k,i,l,j) = tc_int_2e_ao(k,i,l,j) + get_ao_two_e_integral(i, j, k, l, ao_integrals_map)
enddo enddo
enddo enddo
@ -125,7 +284,7 @@ subroutine provide_int2_grad1_u12_ao_gpu()
print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write")
call ezfio_set_work_empty(.False.) call ezfio_set_work_empty(.False.)
write(11) int2_grad1_u12_ao(:,:,:,1:3) write(11) int2_grad1_u12_ao%f(:,:,:,1:3)
close(11) close(11)
print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot'
@ -138,7 +297,7 @@ subroutine provide_int2_grad1_u12_ao_gpu()
! ---- ! ----
deallocate(int2_grad1_u12_ao) call gpu_deallocate(int2_grad1_u12_ao)
deallocate(tc_int_2e_ao) deallocate(tc_int_2e_ao)
call wall_time(time2) call wall_time(time2)

View File

@ -0,0 +1,446 @@
! ---
program write_tc_int_cuda
implicit none
print *, ' j2e_type = ', j2e_type
print *, ' j1e_type = ', j1e_type
print *, ' env_type = ', env_type
my_grid_becke = .True.
PROVIDE tc_grid1_a tc_grid1_r
my_n_pt_r_grid = tc_grid1_r
my_n_pt_a_grid = tc_grid1_a
touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid
my_extra_grid_becke = .True.
PROVIDE tc_grid2_a tc_grid2_r
my_n_pt_r_extra_grid = tc_grid2_r
my_n_pt_a_extra_grid = tc_grid2_a
touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid
call write_int(6, my_n_pt_r_grid, 'radial external grid over')
call write_int(6, my_n_pt_a_grid, 'angular external grid over')
call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over')
call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over')
call main()
end
! ---
subroutine main()
implicit none
!call deb_int_long_range_gpu()
!call deb_int_bh_kernel_gpu()
call deb_int2_grad1_u12_ao_gpu()
return
end
! ---
subroutine deb_int_long_range_gpu()
use gpu_module
implicit none
integer :: i, j, k
integer :: ipoint, jpoint
integer :: nBlocks, blockSize
double precision :: acc_thr, err_tot, nrm_tot, err_loc
double precision :: time0, time1
double precision :: cuda_time0, cuda_time1
double precision :: cpu_time0, cpu_time1
double precision, allocatable :: aos_data2(:,:,:)
double precision, allocatable :: int_fct_long_range(:,:,:)
double precision, allocatable :: int_fct_long_range_gpu(:,:,:)
call wall_time(time0)
print*, ' start deb_int_long_range_gpu'
! ---
nBlocks = 256
blockSize = 32
allocate(aos_data2(n_points_extra_final_grid,ao_num,4))
allocate(int_fct_long_range_gpu(n_points_extra_final_grid,ao_num,ao_num))
do k = 1, ao_num
do ipoint = 1, n_points_extra_final_grid
aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint)
aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1)
aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2)
aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3)
enddo
enddo
! ---
call wall_time(cuda_time0)
call deb_int_long_range(nBlocks, blockSize, &
n_points_extra_final_grid, ao_num, final_weight_at_r_vector_extra, aos_data2, &
int_fct_long_range_gpu)
call wall_time(cuda_time1)
print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0
deallocate(aos_data2)
! ---
allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num))
call wall_time(cpu_time0)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (j, i, jpoint) &
!$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp)
!$OMP DO SCHEDULE (static)
do j = 1, ao_num
do i = 1, ao_num
do jpoint = 1, n_points_extra_final_grid
int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
call wall_time(cpu_time1)
print*, ' wall time on CPU (min) = ', (cpu_time1-cpu_time0) / 60.d0
! ---
acc_thr = 1d-12
err_tot = 0.d0
nrm_tot = 0.d0
do j = 1, ao_num
do i = 1, ao_num
do jpoint = 1, n_points_extra_final_grid
err_loc = dabs(int_fct_long_range(jpoint,i,j) - int_fct_long_range_gpu(jpoint,i,j))
if(err_loc > acc_thr) then
print*, " error on", jpoint, i, j
print*, " CPU res", int_fct_long_range (jpoint,i,j)
print*, " GPU res", int_fct_long_range_gpu(jpoint,i,j)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(int_fct_long_range(jpoint,i,j))
enddo
enddo
enddo
print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot
! ---
deallocate(int_fct_long_range)
deallocate(int_fct_long_range_gpu)
call wall_time(time1)
print*, ' wall time for deb_int_long_range_gpu (min) = ', (time1-time0) / 60.d0
return
end
! ---
subroutine deb_int_bh_kernel_gpu()
use gpu_module
implicit none
integer :: m
integer :: ipoint, jpoint
integer :: nBlocks, blockSize
double precision :: acc_thr, err_tot, nrm_tot, err_loc
double precision :: time0, time1
double precision :: cuda_time0, cuda_time1
double precision :: cpu_time0, cpu_time1
double precision, allocatable :: r1(:,:), r2(:,:)
double precision, allocatable :: grad1_u12(:,:,:)
double precision, allocatable :: grad1_u12_gpu(:,:,:)
call wall_time(time0)
print*, ' start deb_int_bh_kernel_gpu'
! ---
allocate(r1(n_points_final_grid,3))
allocate(r2(n_points_extra_final_grid,3))
do ipoint = 1, n_points_final_grid
r1(ipoint,1) = final_grid_points(1,ipoint)
r1(ipoint,2) = final_grid_points(2,ipoint)
r1(ipoint,3) = final_grid_points(3,ipoint)
enddo
do ipoint = 1, n_points_extra_final_grid
r2(ipoint,1) = final_grid_points_extra(1,ipoint)
r2(ipoint,2) = final_grid_points_extra(2,ipoint)
r2(ipoint,3) = final_grid_points_extra(3,ipoint)
enddo
! ---
nBlocks = 256
blockSize = 32
allocate(grad1_u12_gpu(n_points_extra_final_grid,n_points_final_grid,4))
call wall_time(cuda_time0)
call deb_int_bh_kernel(nBlocks, blockSize, &
n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, &
r1, r2, nucl_coord, jBH_c, jBH_m, jBH_n, jBH_o, &
grad1_u12_gpu)
call wall_time(cuda_time1)
print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0
! ---
allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4))
call wall_time(cpu_time0)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (ipoint) &
!$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12)
!$OMP DO
do ipoint = 1, n_points_final_grid
call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) &
, grad1_u12(1,ipoint,2) &
, grad1_u12(1,ipoint,3) &
, grad1_u12(1,ipoint,4) )
enddo
!$OMP END DO
!$OMP END PARALLEL
call wall_time(cpu_time1)
print*, ' wall time on CPU (min) = ', (cpu_time1-cpu_time0) / 60.d0
! ---
acc_thr = 1d-12
err_tot = 0.d0
nrm_tot = 0.d0
do m = 1, 4
do ipoint = 1, n_points_final_grid
do jpoint = 1, n_points_extra_final_grid
err_loc = dabs(grad1_u12(jpoint,ipoint,m) - grad1_u12_gpu(jpoint,ipoint,m))
if(err_loc > acc_thr) then
print*, " error on", jpoint, ipoint, m
print*, " CPU res", grad1_u12 (jpoint,ipoint,m)
print*, " GPU res", grad1_u12_gpu(jpoint,ipoint,m)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(grad1_u12(jpoint,ipoint,m))
enddo
enddo
enddo
print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot
! ---
deallocate(r1, r2)
deallocate(grad1_u12)
deallocate(grad1_u12_gpu)
call wall_time(time1)
print*, ' wall time for deb_int_bh_kernel_gpu (min) = ', (time1-time0) / 60.d0
return
end
! ---
subroutine deb_int2_grad1_u12_ao_gpu()
use gpu_module
implicit none
integer :: m
integer :: i, j, k
integer :: ipoint, jpoint
integer :: nBlocks, blockSize
double precision :: acc_thr, err_tot, nrm_tot, err_loc
double precision :: time0, time1
double precision :: cuda_time0, cuda_time1
double precision :: cpu_time0, cpu_time1
double precision, allocatable :: r1(:,:), r2(:,:), aos_data2(:,:,:)
double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:)
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:)
double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:)
call wall_time(time0)
print*, ' start deb_int2_grad1_u12_ao_gpu'
! ---
allocate(r1(n_points_final_grid,3))
allocate(r2(n_points_extra_final_grid,3))
allocate(aos_data2(n_points_extra_final_grid,ao_num,4))
do ipoint = 1, n_points_final_grid
r1(ipoint,1) = final_grid_points(1,ipoint)
r1(ipoint,2) = final_grid_points(2,ipoint)
r1(ipoint,3) = final_grid_points(3,ipoint)
enddo
do ipoint = 1, n_points_extra_final_grid
r2(ipoint,1) = final_grid_points_extra(1,ipoint)
r2(ipoint,2) = final_grid_points_extra(2,ipoint)
r2(ipoint,3) = final_grid_points_extra(3,ipoint)
enddo
do k = 1, ao_num
do ipoint = 1, n_points_extra_final_grid
aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint)
aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1)
aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2)
aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3)
enddo
enddo
! ---
nBlocks = 256
blockSize = 32
allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,4))
call wall_time(cuda_time0)
call deb_int2_grad1_u12_ao(nBlocks, blockSize, &
n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, &
r1, r2, final_weight_at_r_vector_extra, nucl_coord, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, &
int2_grad1_u12_ao_gpu)
call wall_time(cuda_time1)
print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0
! ---
allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num))
allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4))
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4))
call wall_time(cpu_time0)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (j, i, jpoint) &
!$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp)
!$OMP DO SCHEDULE (static)
do j = 1, ao_num
do i = 1, ao_num
do jpoint = 1, n_points_extra_final_grid
int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (ipoint) &
!$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12)
!$OMP DO
do ipoint = 1, n_points_final_grid
call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) &
, grad1_u12(1,ipoint,2) &
, grad1_u12(1,ipoint,3) &
, grad1_u12(1,ipoint,4) )
enddo
!$OMP END DO
!$OMP END PARALLEL
do m = 1, 4
call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 &
, int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid &
, 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num)
enddo
call wall_time(cpu_time1)
print*, ' wall time on CPU (min) = ', (cpu_time1-cpu_time0) / 60.d0
! ---
acc_thr = 1d-12
err_tot = 0.d0
nrm_tot = 0.d0
do m = 1, 4
do ipoint = 1, n_points_final_grid
do j = 1, ao_num
do i = 1, ao_num
err_loc = dabs(int2_grad1_u12_ao(i,j,ipoint,m) - int2_grad1_u12_ao_gpu(i,j,ipoint,m))
if(err_loc > acc_thr) then
print*, " error on", i, j, ipoint, m
print*, " CPU res", int2_grad1_u12_ao (i,j,ipoint,m)
print*, " GPU res", int2_grad1_u12_ao_gpu(i,j,ipoint,m)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(int2_grad1_u12_ao(i,j,ipoint,m))
enddo
enddo
enddo
enddo
print *, ' absolute accuracy (%) =', 100.d0 * err_tot / nrm_tot
! ---
deallocate(r1, r2, aos_data2)
deallocate(int_fct_long_range, grad1_u12)
deallocate(int2_grad1_u12_ao)
deallocate(int2_grad1_u12_ao_gpu)
call wall_time(time1)
print*, ' wall time for deb_int2_grad1_u12_ao_gpu (min) = ', (time1-time0) / 60.d0
return
end

View File

@ -1,2 +0,0 @@

View File

@ -1,38 +1,111 @@
! ---
module gpu_module module gpu_module
use iso_c_binding use, intrinsic :: iso_c_binding
implicit none implicit none
interface interface
subroutine tc_int_bh(n_grid1, n_grid2, ao_num, n_nuc, & ! ---
size_bh, m_bh, n_bh, o_bh, c_bh, &
r1, r2, rn, wr1, wr2, aos_data1, &
aos_data2, int2_grad1_u12, tc_int_2e_ao) bind(C)
import c_int, c_double subroutine tc_int_c(nBlocks, blockSize, &
n_grid1, n_grid2, n_ao, n_nuc, size_bh, &
r1, wr1, r2, wr2, rn, &
aos_data1, aos_data2, &
c_bh, m_bh, n_bh, o_bh, &
int2_grad1_u12_ao, int_2e_ao) bind(C, name = "tc_int_c")
integer(c_int), intent(in), value :: n_grid1, n_grid2, ao_num, n_nuc, size_bh import c_int, c_double, c_ptr
integer(c_int), intent(in), value :: nBlocks, blockSize
integer(c_int), intent(in), value :: n_grid1, n_grid2
integer(c_int), intent(in), value :: n_ao
integer(c_int), intent(in), value :: n_nuc
integer(c_int), intent(in), value :: size_bh
real(c_double), intent(in) :: r1(n_grid1,3), wr1(n_grid1)
real(c_double), intent(in) :: r2(n_grid2,3), wr2(n_grid2)
real(c_double), intent(in) :: rn(n_nuc,3)
real(c_double), intent(in) :: aos_data1(n_grid1,n_ao,4)
real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4)
real(c_double), intent(in) :: c_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: m_bh(size_bh,n_nuc) integer(c_int), intent(in) :: m_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: n_bh(size_bh,n_nuc) integer(c_int), intent(in) :: n_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: o_bh(size_bh,n_nuc) integer(c_int), intent(in) :: o_bh(size_bh,n_nuc)
real(c_double), intent(in) :: c_bh(size_bh,n_nuc) real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4)
real(c_double), intent(in) :: r1(n_grid1,3), r2(n_grid2,3) real(c_double), intent(out) :: int_2e_ao(n_ao,n_ao,n_ao,n_ao)
real(c_double), intent(in) :: rn(n_nuc,3)
real(c_double), intent(in) :: wr1(n_grid1), wr2(n_grid2) end subroutine tc_int_c
real(c_double), intent(in) :: aos_data1(n_grid1,ao_num,4), aos_data2(n_grid2,ao_num,4)
real(c_double), intent(out) :: int2_grad1_u12(n_grid1,ao_num,ao_num,4) ! ---
real(c_double), intent(out) :: tc_int_2e_ao(ao_num,ao_num,ao_num,ao_num)
subroutine deb_int_long_range(nBlocks, blockSize, &
n_grid2, n_ao, wr2, aos_data2, &
int_fct_long_range) bind(C, name = "deb_int_long_range")
import c_int, c_double
integer(c_int), intent(in), value :: nBlocks, blockSize
integer(c_int), intent(in), value :: n_grid2
integer(c_int), intent(in), value :: n_ao
real(c_double), intent(in) :: wr2(n_grid2)
real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4)
real(c_double), intent(out) :: int_fct_long_range(n_grid2,n_ao,n_ao)
end subroutine deb_int_long_range
! ---
subroutine deb_int_bh_kernel(nBlocks, blockSize, &
n_grid1, n_grid2, n_ao, n_nuc, size_bh, &
r1, r2, rn, c_bh, m_bh, n_bh, o_bh, &
grad1_u12) bind(C, name = "deb_int_bh_kernel")
import c_int, c_double, c_ptr
integer(c_int), intent(in), value :: nBlocks, blockSize
integer(c_int), intent(in), value :: n_grid1, n_grid2
integer(c_int), intent(in), value :: n_ao
integer(c_int), intent(in), value :: n_nuc
integer(c_int), intent(in), value :: size_bh
real(c_double), intent(in) :: r1(n_grid1,3)
real(c_double), intent(in) :: r2(n_grid2,3)
real(c_double), intent(in) :: rn(n_nuc,3)
real(c_double), intent(in) :: c_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: m_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: n_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: o_bh(size_bh,n_nuc)
real(c_double), intent(out) :: grad1_u12(n_grid2,n_grid1,4)
end subroutine deb_int_bh_kernel
! ---
subroutine deb_int2_grad1_u12_ao(nBlocks, blockSize, &
n_grid1, n_grid2, n_ao, n_nuc, size_bh, &
r1, r2, wr2, rn, aos_data2, c_bh, m_bh, n_bh, o_bh, &
int2_grad1_u12_ao) bind(C, name ="deb_int2_grad1_u12_ao")
import c_int, c_double, c_ptr
integer(c_int), intent(in), value :: nBlocks, blockSize
integer(c_int), intent(in), value :: n_grid1, n_grid2
integer(c_int), intent(in), value :: n_ao
integer(c_int), intent(in), value :: n_nuc
integer(c_int), intent(in), value :: size_bh
real(c_double), intent(in) :: r1(n_grid1,3)
real(c_double), intent(in) :: r2(n_grid2,3)
real(c_double), intent(in) :: wr2(n_grid2)
real(c_double), intent(in) :: rn(n_nuc,3)
real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4)
real(c_double), intent(in) :: c_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: m_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: n_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: o_bh(size_bh,n_nuc)
real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4)
end subroutine deb_int2_grad1_u12_ao
! ---
end subroutine
end interface end interface
end module end module gpu_module
! ---

View File

@ -0,0 +1,194 @@
! ---
program write_tc_int_cuda
implicit none
print *, ' j2e_type = ', j2e_type
print *, ' j1e_type = ', j1e_type
print *, ' env_type = ', env_type
my_grid_becke = .True.
PROVIDE tc_grid1_a tc_grid1_r
my_n_pt_r_grid = tc_grid1_r
my_n_pt_a_grid = tc_grid1_a
touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid
my_extra_grid_becke = .True.
PROVIDE tc_grid2_a tc_grid2_r
my_n_pt_r_extra_grid = tc_grid2_r
my_n_pt_a_extra_grid = tc_grid2_a
touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid
call write_int(6, my_n_pt_r_grid, 'radial external grid over')
call write_int(6, my_n_pt_a_grid, 'angular external grid over')
call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over')
call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over')
call main()
end
! ---
subroutine main()
implicit none
PROVIDE io_tc_integ
print*, 'io_tc_integ = ', io_tc_integ
if(io_tc_integ .ne. "Write") then
print*, 'io_tc_integ != Write'
print*, io_tc_integ
stop
endif
call do_work_on_gpu()
call ezfio_set_tc_keywords_io_tc_integ('Read')
end
! ---
subroutine do_work_on_gpu()
use gpu_module
implicit none
integer :: k, ipoint
integer :: nBlocks, blockSize
integer :: n_grid1, n_grid2
integer :: n_ao
integer :: n_nuc
integer :: size_bh
double precision, allocatable :: r1(:,:), wr1(:), r2(:,:), wr2(:), rn(:,:)
double precision, allocatable :: aos_data1(:,:,:), aos_data2(:,:,:)
double precision, allocatable :: c_bh(:,:)
integer, allocatable :: m_bh(:,:), n_bh(:,:), o_bh(:,:)
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:)
double precision, allocatable :: int_2e_ao(:,:,:,:)
double precision :: time0, time1
double precision :: cuda_time0, cuda_time1
call wall_time(time0)
print*, ' start calculation of TC-integrals'
nBlocks = 100
blockSize = 32
n_grid1 = n_points_final_grid
n_grid2 = n_points_extra_final_grid
n_ao = ao_num
n_nuc = nucl_num
size_bh = jBH_size
print*, " nBlocks =", nBlocks
print*, " blockSize =", blockSize
print*, " n_grid1 =", n_grid1
print*, " n_grid2 =", n_grid2
print*, " n_ao =", n_ao
print*, " n_nuc =", n_nuc
print *, " size_bh =", size_bh
allocate(r1(n_grid1,3), wr1(n_grid1))
allocate(r2(n_grid2,3), wr2(n_grid2))
allocate(rn(n_nuc,3))
allocate(aos_data1(n_grid1,n_ao,4))
allocate(aos_data2(n_grid2,n_ao,4))
allocate(c_bh(size_bh,n_nuc), m_bh(size_bh,n_nuc), n_bh(size_bh,n_nuc), o_bh(size_bh,n_nuc))
allocate(int2_grad1_u12_ao(n_ao,n_ao,n_grid1,4))
allocate(int_2e_ao(n_ao,n_ao,n_ao,n_ao))
do ipoint = 1, n_points_final_grid
r1(ipoint,1) = final_grid_points(1,ipoint)
r1(ipoint,2) = final_grid_points(2,ipoint)
r1(ipoint,3) = final_grid_points(3,ipoint)
wr1(ipoint) = final_weight_at_r_vector(ipoint)
enddo
do ipoint = 1, n_points_extra_final_grid
r2(ipoint,1) = final_grid_points_extra(1,ipoint)
r2(ipoint,2) = final_grid_points_extra(2,ipoint)
r2(ipoint,3) = final_grid_points_extra(3,ipoint)
wr2(ipoint) = final_weight_at_r_vector_extra(ipoint)
enddo
do k = 1, ao_num
do ipoint = 1, n_points_final_grid
aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint)
aos_data1(ipoint,k,2) = aos_grad_in_r_array(k,ipoint,1)
aos_data1(ipoint,k,3) = aos_grad_in_r_array(k,ipoint,2)
aos_data1(ipoint,k,4) = aos_grad_in_r_array(k,ipoint,3)
enddo
do ipoint = 1, n_points_extra_final_grid
aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint)
aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1)
aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2)
aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3)
enddo
enddo
rn(:,:) = nucl_coord(:,:)
c_bh(:,:) = jBH_c(:,:)
m_bh(:,:) = jBH_m(:,:)
n_bh(:,:) = jBH_n(:,:)
o_bh(:,:) = jBH_o(:,:)
call wall_time(cuda_time0)
print*, ' start CUDA kernel'
int2_grad1_u12_ao = 0.d0
int_2e_ao = 0.d0
call tc_int_c(nBlocks, blockSize, &
n_grid1, n_grid2, n_ao, n_nuc, size_bh, &
r1, wr1, r2, wr2, rn, aos_data1, aos_data2, &
c_bh, m_bh, n_bh, o_bh, &
int2_grad1_u12_ao, int_2e_ao)
call wall_time(cuda_time1)
print*, ' wall time for CUDA kernel (min) = ', (cuda_time1-cuda_time0) / 60.d0
deallocate(r1, wr1, r2, wr2, rn)
deallocate(aos_data1, aos_data2)
deallocate(c_bh, m_bh, n_bh, o_bh)
! ---
print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write")
call ezfio_set_work_empty(.False.)
write(11) int2_grad1_u12_ao(:,:,:,1:3)
close(11)
deallocate(int2_grad1_u12_ao)
print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot'
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/ao_two_e_tc_tot', action="write")
call ezfio_set_work_empty(.False.)
do k = 1, ao_num
write(11) int_2e_ao(:,:,:,k)
enddo
close(11)
deallocate(int_2e_ao)
! ----
call wall_time(time1)
print*, ' wall time for TC-integrals (min) = ', (time1-time0) / 60.d0
return
end
! ---