9
1
mirror of https://github.com/QuantumPackage/qp2.git synced 2024-11-09 06:53:38 +01:00

GPU acceleration compute_tc_int

This commit is contained in:
Anthony Scemama 2024-07-12 17:32:41 +02:00
parent d1ef3e178f
commit 4a9a11c630
2 changed files with 69 additions and 49 deletions

View File

@ -1,3 +1,4 @@
gpu
tc_keywords tc_keywords
jastrow jastrow
qmckl qmckl

View File

@ -2,7 +2,7 @@
! --- ! ---
subroutine provide_int2_grad1_u12_ao() subroutine provide_int2_grad1_u12_ao()
use gpu
BEGIN_DOC BEGIN_DOC
! !
! int2_grad1_u12_ao(i,j,ipoint,1) = \int dr2 [\grad1 u(r1,r2)]_x1 \chi_i(r2) \chi_j(r2) ! int2_grad1_u12_ao(i,j,ipoint,1) = \int dr2 [\grad1 u(r1,r2)]_x1 \chi_i(r2) \chi_j(r2)
@ -35,8 +35,9 @@ subroutine provide_int2_grad1_u12_ao()
double precision :: weight1, ao_k_r, ao_i_r double precision :: weight1, ao_k_r, ao_i_r
double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq double precision :: der_envsq_x, der_envsq_y, der_envsq_z, lap_envsq
double precision :: time0, time1, time2, tc1, tc2, tc double precision :: time0, time1, time2, tc1, tc2, tc
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:), tc_int_2e_ao(:,:,:,:) type(gpu_double4) :: int2_grad1_u12_ao
double precision, allocatable :: tmp(:,:,:), c_mat(:,:,:), tmp_grad1_u12(:,:,:) type(gpu_double3) :: tmp_grad1_u12, tmp_grad1_u12p, tmp
double precision, allocatable :: c_mat(:,:,:), tc_int_2e_ao(:,:,:,:)
double precision, external :: get_ao_two_e_integral double precision, external :: get_ao_two_e_integral
@ -51,6 +52,7 @@ subroutine provide_int2_grad1_u12_ao()
call total_memory(mem) call total_memory(mem)
mem = max(1.d0, qp_max_mem - mem) mem = max(1.d0, qp_max_mem - mem)
mem = 6
n_double = mem * 1.d8 n_double = mem * 1.d8
n_blocks = int(min(n_double / (n_points_extra_final_grid * 4.d0), 1.d0*n_points_final_grid)) n_blocks = int(min(n_double / (n_points_extra_final_grid * 4.d0), 1.d0*n_points_final_grid))
n_rest = int(mod(n_points_final_grid, n_blocks)) n_rest = int(mod(n_points_final_grid, n_blocks))
@ -64,9 +66,9 @@ subroutine provide_int2_grad1_u12_ao()
! --- ! ---
! --- ! ---
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4)) call gpu_allocate(int2_grad1_u12_ao, ao_num,ao_num,n_points_final_grid,4)
allocate(tmp(n_points_extra_final_grid,ao_num,ao_num)) call gpu_allocate(tmp,n_points_extra_final_grid,ao_num,ao_num)
!$OMP PARALLEL & !$OMP PARALLEL &
!$OMP DEFAULT (NONE) & !$OMP DEFAULT (NONE) &
!$OMP PRIVATE (j, i, jpoint) & !$OMP PRIVATE (j, i, jpoint) &
@ -75,21 +77,28 @@ subroutine provide_int2_grad1_u12_ao()
do j = 1, ao_num do j = 1, ao_num
do i = 1, ao_num do i = 1, ao_num
do jpoint = 1, n_points_extra_final_grid do jpoint = 1, n_points_extra_final_grid
tmp(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j) tmp%f(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j)
enddo enddo
enddo enddo
enddo enddo
!$OMP END DO !$OMP END DO
!$OMP END PARALLEL !$OMP END PARALLEL
allocate(tmp_grad1_u12(n_points_extra_final_grid,n_blocks,4)) call gpu_allocate(tmp_grad1_u12,n_points_extra_final_grid,n_blocks,4)
call gpu_allocate(tmp_grad1_u12p,n_points_extra_final_grid,n_blocks,4)
tc = 0.d0 tc = 0.d0
type(gpu_stream) :: stream(4)
do i=1,4
call gpu_stream_create(stream(i))
enddo
do i_pass = 1, n_pass do i_pass = 1, n_pass
ii = (i_pass-1)*n_blocks + 1 ii = (i_pass-1)*n_blocks + 1
call wall_time(tc1) call wall_time(tc1)
!$OMP PARALLEL & !$OMP PARALLEL &
!$OMP DEFAULT (NONE) & !$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i_blocks, ipoint) & !$OMP PRIVATE (i_blocks, ipoint) &
@ -97,27 +106,26 @@ subroutine provide_int2_grad1_u12_ao()
!$OMP DO !$OMP DO
do i_blocks = 1, n_blocks do i_blocks = 1, n_blocks
ipoint = ii - 1 + i_blocks ! r1 ipoint = ii - 1 + i_blocks ! r1
call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,i_blocks,1), tmp_grad1_u12(1,i_blocks,2), tmp_grad1_u12(1,i_blocks,3), tmp_grad1_u12(1,i_blocks,4)) call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12%f(1,i_blocks,1), tmp_grad1_u12%f(1,i_blocks,2), &
tmp_grad1_u12%f(1,i_blocks,3), tmp_grad1_u12%f(1,i_blocks,4))
enddo enddo
!$OMP END DO !$OMP END DO
!$OMP END PARALLEL !$OMP END PARALLEL
call wall_time(tc2) call wall_time(tc2)
tc = tc + tc2 - tc1 tc = tc + tc2 - tc1
call gpu_synchronize()
call gpu_copy(tmp_grad1_u12,tmp_grad1_u12p)
do m = 1, 4 do m = 1, 4
call dgemm( "T", "N", ao_num*ao_num, n_blocks, n_points_extra_final_grid, 1.d0 & call gpu_set_stream(blas_handle, stream(m))
, tmp(1,1,1), n_points_extra_final_grid, tmp_grad1_u12(1,1,m), n_points_extra_final_grid & call gpu_dgemm(blas_handle, "T", "N", ao_num*ao_num, n_blocks, n_points_extra_final_grid, 1.d0 &
, 0.d0, int2_grad1_u12_ao(1,1,ii,m), ao_num*ao_num) , tmp%f(1,1,1), n_points_extra_final_grid, tmp_grad1_u12p%f(1,1,m), n_points_extra_final_grid &
, 0.d0, int2_grad1_u12_ao%f(1,1,ii,m), ao_num*ao_num)
enddo enddo
enddo enddo
deallocate(tmp_grad1_u12)
if(n_rest .gt. 0) then if(n_rest .gt. 0) then
allocate(tmp_grad1_u12(n_points_extra_final_grid,n_rest,4))
ii = n_pass*n_blocks + 1 ii = n_pass*n_blocks + 1
call wall_time(tc1) call wall_time(tc1)
@ -128,7 +136,8 @@ subroutine provide_int2_grad1_u12_ao()
!$OMP DO !$OMP DO
do i_rest = 1, n_rest do i_rest = 1, n_rest
ipoint = ii - 1 + i_rest ! r1 ipoint = ii - 1 + i_rest ! r1
call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12(1,i_rest,1), tmp_grad1_u12(1,i_rest,2), tmp_grad1_u12(1,i_rest,3), tmp_grad1_u12(1,i_rest,4)) call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, tmp_grad1_u12%f(1,i_rest,1), tmp_grad1_u12%f(1,i_rest,2), &
tmp_grad1_u12%f(1,i_rest,3), tmp_grad1_u12%f(1,i_rest,4))
enddo enddo
!$OMP END DO !$OMP END DO
!$OMP END PARALLEL !$OMP END PARALLEL
@ -136,15 +145,23 @@ subroutine provide_int2_grad1_u12_ao()
tc = tc + tc2 - tc1 tc = tc + tc2 - tc1
do m = 1, 4 do m = 1, 4
call dgemm( "T", "N", ao_num*ao_num, n_rest, n_points_extra_final_grid, 1.d0 & call gpu_set_stream(blas_handle, stream(m))
, tmp(1,1,1), n_points_extra_final_grid, tmp_grad1_u12(1,1,m), n_points_extra_final_grid & call gpu_dgemm(blas_handle, "T", "N", ao_num*ao_num, n_rest, n_points_extra_final_grid, 1.d0 &
, 0.d0, int2_grad1_u12_ao(1,1,ii,m), ao_num*ao_num) , tmp%f(1,1,1), n_points_extra_final_grid, tmp_grad1_u12%f(1,1,m), n_points_extra_final_grid &
, 0.d0, int2_grad1_u12_ao%f(1,1,ii,m), ao_num*ao_num)
enddo enddo
deallocate(tmp_grad1_u12)
endif endif
call gpu_synchronize()
call gpu_deallocate(tmp_grad1_u12)
call gpu_deallocate(tmp_grad1_u12p)
deallocate(tmp) do i=1,4
call gpu_stream_destroy(stream(i))
enddo
call gpu_deallocate(tmp)
call wall_time(time1) call wall_time(time1)
@ -152,6 +169,8 @@ subroutine provide_int2_grad1_u12_ao()
print*, ' wall time Jastrow derivatives (min) = ', tc / 60.d0 print*, ' wall time Jastrow derivatives (min) = ', tc / 60.d0
call print_memory_usage() call print_memory_usage()
!TODO
stop
! --- ! ---
! --- ! ---
! --- ! ---
@ -177,7 +196,7 @@ subroutine provide_int2_grad1_u12_ao()
!$OMP END DO !$OMP END DO
!$OMP END PARALLEL !$OMP END PARALLEL
call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 &
, int2_grad1_u12_ao(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , int2_grad1_u12_ao%f(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid &
, 0.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) , 0.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num)
deallocate(c_mat) deallocate(c_mat)
@ -213,7 +232,7 @@ subroutine provide_int2_grad1_u12_ao()
!$OMP END PARALLEL !$OMP END PARALLEL
call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 & call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 &
, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid & , int2_grad1_u12_ao%f(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid &
, 1.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num) , 1.d0, tc_int_2e_ao(1,1,1,1), ao_num*ao_num)
enddo enddo
deallocate(c_mat) deallocate(c_mat)
@ -263,7 +282,7 @@ subroutine provide_int2_grad1_u12_ao()
print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' print*, ' Writing int2_grad1_u12_ao in ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write") open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="write")
call ezfio_set_work_empty(.False.) call ezfio_set_work_empty(.False.)
write(11) int2_grad1_u12_ao(:,:,:,1:3) write(11) int2_grad1_u12_ao%f(:,:,:,1:3)
close(11) close(11)
print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot' print*, ' Saving tc_int_2e_ao in ', trim(ezfio_filename) // '/work/ao_two_e_tc_tot'
@ -276,7 +295,7 @@ subroutine provide_int2_grad1_u12_ao()
! ---- ! ----
deallocate(int2_grad1_u12_ao) call gpu_deallocate(int2_grad1_u12_ao)
deallocate(tc_int_2e_ao) deallocate(tc_int_2e_ao)
call wall_time(time2) call wall_time(time2)