10
0
mirror of https://github.com/QuantumPackage/qp2.git synced 2024-09-13 06:18:30 +02:00

cleaning in cpu vs gpu comparison

This commit is contained in:
AbdAmmar 2024-08-13 12:19:30 +02:00
parent bf15b68b0b
commit 2e6df70e03
10 changed files with 640 additions and 1154 deletions

View File

@ -0,0 +1,233 @@
! ---
program compute_int_2e_ao_cpu
implicit none
print *, ' j2e_type = ', j2e_type
print *, ' j1e_type = ', j1e_type
print *, ' env_type = ', env_type
my_grid_becke = .True.
PROVIDE tc_grid1_a tc_grid1_r
my_n_pt_r_grid = tc_grid1_r
my_n_pt_a_grid = tc_grid1_a
touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid
my_extra_grid_becke = .True.
PROVIDE tc_grid2_a tc_grid2_r
my_n_pt_r_extra_grid = tc_grid2_r
my_n_pt_a_extra_grid = tc_grid2_a
touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid
call write_int(6, my_n_pt_r_grid, 'radial external grid over')
call write_int(6, my_n_pt_a_grid, 'angular external grid over')
call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over')
call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over')
call main()
end
! ---
subroutine main()
use cutc_module
implicit none
integer :: m
integer :: i, j, k, l
integer :: ipoint, jpoint
double precision :: weight1, ao_i_r, ao_k_r
double precision :: time0, time1
double precision :: wall_time0, wall_time1
double precision :: wall_ttime0, wall_ttime1
double precision :: tt1, tt2
double precision, allocatable :: rn(:,:), aos_data1(:,:,:), aos_data2(:,:,:)
double precision, allocatable :: grad1_u12(:,:,:), int_fct_long_range(:,:,:), c_mat(:,:,:)
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:)
double precision, allocatable :: int_2e_ao(:,:,:,:)
call wall_time(time0)
print*, ' start compute_int_2e_ao_cpu'
! ---
allocate(rn(3,nucl_num))
allocate(aos_data1(n_points_final_grid,ao_num,4))
allocate(aos_data2(n_points_extra_final_grid,ao_num,4))
do k = 1, nucl_num
rn(1,k) = nucl_coord(k,1)
rn(2,k) = nucl_coord(k,2)
rn(3,k) = nucl_coord(k,3)
enddo
do k = 1, ao_num
do ipoint = 1, n_points_final_grid
aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint)
aos_data1(ipoint,k,2) = aos_grad_in_r_array(k,ipoint,1)
aos_data1(ipoint,k,3) = aos_grad_in_r_array(k,ipoint,2)
aos_data1(ipoint,k,4) = aos_grad_in_r_array(k,ipoint,3)
enddo
enddo
do k = 1, ao_num
do ipoint = 1, n_points_extra_final_grid
aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint)
aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1)
aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2)
aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3)
enddo
enddo
allocate(int_fct_long_range(n_points_extra_final_grid,ao_num,ao_num))
allocate(grad1_u12(n_points_extra_final_grid,n_points_final_grid,4))
allocate(c_mat(n_points_final_grid,ao_num,ao_num))
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,4))
allocate(int_2e_ao(ao_num,ao_num,ao_num,ao_num))
call wall_time(wall_time0)
call wall_time(wall_ttime0)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (j, i, jpoint) &
!$OMP SHARED (int_fct_long_range, ao_num, n_points_extra_final_grid, final_weight_at_r_vector_extra, aos_in_r_array_extra_transp)
!$OMP DO SCHEDULE (static)
do j = 1, ao_num
do i = 1, ao_num
do jpoint = 1, n_points_extra_final_grid
int_fct_long_range(jpoint,i,j) = final_weight_at_r_vector_extra(jpoint) * aos_in_r_array_extra_transp(jpoint,i) * aos_in_r_array_extra_transp(jpoint,j)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
call wall_time(wall_ttime1)
write(*,"(A,2X,F15.7)") ' wall time for int_long_range (sec) = ', (wall_ttime1 - wall_ttime0)
call wall_time(wall_ttime0)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (ipoint) &
!$OMP SHARED (n_points_final_grid, n_points_extra_final_grid, grad1_u12)
!$OMP DO
do ipoint = 1, n_points_final_grid
call get_grad1_u12_for_tc(ipoint, n_points_extra_final_grid, grad1_u12(1,ipoint,1) &
, grad1_u12(1,ipoint,2) &
, grad1_u12(1,ipoint,3) &
, grad1_u12(1,ipoint,4) )
enddo
!$OMP END DO
!$OMP END PARALLEL
call wall_time(wall_ttime1)
write(*,"(A,2X,F15.7)") ' wall time for tc_int_bh (sec) = ', (wall_ttime1 - wall_ttime0)
call wall_time(wall_ttime0)
do m = 1, 4
call dgemm("T", "N", ao_num*ao_num, n_points_final_grid, n_points_extra_final_grid, 1.d0 &
, int_fct_long_range(1,1,1), n_points_extra_final_grid, grad1_u12(1,1,m), n_points_extra_final_grid &
, 0.d0, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num)
enddo
call wall_time(wall_ttime1)
write(*,"(A,2X,F15.7)") ' wall time for DGEMM of integ over r2 (sec) = ', (wall_ttime1 - wall_ttime0)
call wall_time(wall_ttime0)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i, k, ipoint) &
!$OMP SHARED (aos_in_r_array_transp, c_mat, ao_num, n_points_final_grid, final_weight_at_r_vector)
!$OMP DO SCHEDULE (static)
do i = 1, ao_num
do k = 1, ao_num
do ipoint = 1, n_points_final_grid
c_mat(ipoint,k,i) = final_weight_at_r_vector(ipoint) * aos_in_r_array_transp(ipoint,i) * aos_in_r_array_transp(ipoint,k)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
call wall_time(wall_ttime1)
write(*,"(A,2X,F15.7)") ' wall time of Hermitian part (sec) = ', (wall_ttime1 - wall_ttime0)
call wall_time(wall_ttime0)
call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 &
, int2_grad1_u12_ao(1,1,1,4), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid &
, 0.d0, int_2e_ao(1,1,1,1), ao_num*ao_num)
call wall_time(wall_ttime1)
write(*,"(A,2X,F15.7)") ' wall time for DGEMM of Hermitian part (sec) = ', (wall_ttime1 - wall_ttime0)
tt1 = 0.d0
tt2 = 0.d0
do m = 1, 3
call wall_time(wall_ttime0)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i, k, ipoint, weight1, ao_i_r, ao_k_r) &
!$OMP SHARED (aos_in_r_array_transp, aos_grad_in_r_array_transp_bis, c_mat, &
!$OMP ao_num, n_points_final_grid, final_weight_at_r_vector, m)
!$OMP DO SCHEDULE (static)
do i = 1, ao_num
do k = 1, ao_num
do ipoint = 1, n_points_final_grid
weight1 = final_weight_at_r_vector(ipoint)
ao_i_r = aos_in_r_array_transp(ipoint,i)
ao_k_r = aos_in_r_array_transp(ipoint,k)
c_mat(ipoint,k,i) = weight1 * (ao_k_r * aos_grad_in_r_array_transp_bis(ipoint,i,m) - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,m))
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
call wall_time(wall_ttime1)
tt1 += wall_ttime1 - wall_ttime0
call wall_time(wall_ttime0)
call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -0.5d0 &
, int2_grad1_u12_ao(1,1,1,m), ao_num*ao_num, c_mat(1,1,1), n_points_final_grid &
, 1.d0, int_2e_ao(1,1,1,1), ao_num*ao_num)
call wall_time(wall_ttime1)
tt2 += wall_ttime1 - wall_ttime0
enddo
write(*,"(A,2X,F15.7)") ' wall time of non-Hermitian part (sec) = ', tt1
write(*,"(A,2X,F15.7)") ' wall time for DGEMM of non Hermitian part (sec) = ', tt2
call wall_time(wall_ttime0)
call sum_A_At(int_2e_ao(1,1,1,1), ao_num*ao_num)
call wall_time(wall_ttime1)
write(*,"(A,2X,F15.7)") ' wall time of A + A.T (sec) = ', wall_ttime1 - wall_ttime0
call wall_time(wall_time1)
write(*,"(A,2X,F15.7)") ' wall time on cpu (sec) = ', (wall_time1 - wall_time0)
deallocate(int_fct_long_range, grad1_u12, c_mat)
deallocate(int_2e_ao, int2_grad1_u12_ao)
deallocate(rn, aos_data1, aos_data2)
call wall_time(time1)
write(*,"(A,2X,F15.7)") ' wall time for compute_int_2e_ao_cpu (sec) = ', (time1 - time0)
return
end

View File

@ -0,0 +1,117 @@
! ---
program compute_int_2e_ao_gpu
implicit none
print *, ' j2e_type = ', j2e_type
print *, ' j1e_type = ', j1e_type
print *, ' env_type = ', env_type
my_grid_becke = .True.
PROVIDE tc_grid1_a tc_grid1_r
my_n_pt_r_grid = tc_grid1_r
my_n_pt_a_grid = tc_grid1_a
touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid
my_extra_grid_becke = .True.
PROVIDE tc_grid2_a tc_grid2_r
my_n_pt_r_extra_grid = tc_grid2_r
my_n_pt_a_extra_grid = tc_grid2_a
touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid
call write_int(6, my_n_pt_r_grid, 'radial external grid over')
call write_int(6, my_n_pt_a_grid, 'angular external grid over')
call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over')
call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over')
call main()
end
! ---
subroutine main()
use cutc_module
implicit none
integer :: i, j, k, l
integer :: ipoint
double precision :: time0, time1
double precision, allocatable :: rn(:,:), aos_data1(:,:,:), aos_data2(:,:,:)
double precision, allocatable :: int2_grad1_u12_ao_gpu(:,:,:,:)
double precision, allocatable :: int_2e_ao_gpu(:,:,:,:)
call wall_time(time0)
print*, ' start compute_int_2e_ao_gpu'
! ---
allocate(rn(3,nucl_num))
allocate(aos_data1(n_points_final_grid,ao_num,4))
allocate(aos_data2(n_points_extra_final_grid,ao_num,4))
do k = 1, nucl_num
rn(1,k) = nucl_coord(k,1)
rn(2,k) = nucl_coord(k,2)
rn(3,k) = nucl_coord(k,3)
enddo
do k = 1, ao_num
do ipoint = 1, n_points_final_grid
aos_data1(ipoint,k,1) = aos_in_r_array(k,ipoint)
aos_data1(ipoint,k,2) = aos_grad_in_r_array(k,ipoint,1)
aos_data1(ipoint,k,3) = aos_grad_in_r_array(k,ipoint,2)
aos_data1(ipoint,k,4) = aos_grad_in_r_array(k,ipoint,3)
enddo
enddo
do k = 1, ao_num
do ipoint = 1, n_points_extra_final_grid
aos_data2(ipoint,k,1) = aos_in_r_array_extra(k,ipoint)
aos_data2(ipoint,k,2) = aos_grad_in_r_array_extra(k,ipoint,1)
aos_data2(ipoint,k,3) = aos_grad_in_r_array_extra(k,ipoint,2)
aos_data2(ipoint,k,4) = aos_grad_in_r_array_extra(k,ipoint,3)
enddo
enddo
! ---
integer :: nB
integer :: sB
PROVIDE nxBlocks nyBlocks nzBlocks
PROVIDE blockxSize blockySize blockzSize
sB = 32
nB = (n_points_final_grid + sB - 1) / sB
call ezfio_set_tc_int_blockxSize(sB)
call ezfio_set_tc_int_nxBlocks(nB)
allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,3))
allocate(int_2e_ao_gpu(ao_num,ao_num,ao_num,ao_num))
call cutc_int(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, &
n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, &
final_grid_points, final_weight_at_r_vector, &
final_grid_points_extra, final_weight_at_r_vector_extra, &
rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, &
int2_grad1_u12_ao_gpu, int_2e_ao_gpu)
deallocate(int_2e_ao_gpu, int2_grad1_u12_ao_gpu)
deallocate(rn, aos_data1, aos_data2)
call wall_time(time1)
write(*,"(A,2X,F15.7)") ' wall time for compute_int_2e_ao_gpu (sec) = ', (time1 - time0)
return
end

View File

@ -0,0 +1,143 @@
! ---
program compute_no_cpu
implicit none
print *, ' j2e_type = ', j2e_type
print *, ' j1e_type = ', j1e_type
print *, ' env_type = ', env_type
my_grid_becke = .True.
PROVIDE tc_grid1_a tc_grid1_r
my_n_pt_r_grid = tc_grid1_r
my_n_pt_a_grid = tc_grid1_a
touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid
my_extra_grid_becke = .True.
PROVIDE tc_grid2_a tc_grid2_r
my_n_pt_r_extra_grid = tc_grid2_r
my_n_pt_a_extra_grid = tc_grid2_a
touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid
call write_int(6, my_n_pt_r_grid, 'radial external grid over')
call write_int(6, my_n_pt_a_grid, 'angular external grid over')
call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over')
call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over')
call main()
end
! ---
subroutine main()
use cutc_module
implicit none
integer :: i, j, k, l, ipoint
double precision :: time0, time1
double precision :: tt0, tt1
double precision :: acc_thr, err_tot, nrm_tot, err_loc
double precision :: noL_0e
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:)
double precision, allocatable :: tmp(:,:,:,:)
double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:)
double precision, allocatable :: noL_1e (:,:)
double precision, allocatable :: noL_2e (:,:,:,:)
PROVIDE mo_l_coef mo_r_coef
PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp
call wall_time(time0)
print*, ' start compute_no_cpu'
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3))
print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
call wall_time(tt0)
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read")
read(11) int2_grad1_u12_ao
close(11)
call wall_time(tt1)
write(*,"(A,2X,F15.7)") ' wall time for reading (sec) = ', (tt1 - tt0)
allocate(tmp(mo_num,mo_num,n_points_final_grid,3))
allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num))
call wall_time(tt0)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (ipoint) &
!$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp)
!$OMP DO SCHEDULE (dynamic)
do ipoint = 1, n_points_final_grid
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num)
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num)
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num)
enddo
!$OMP END DO
!$OMP END PARALLEL
deallocate(int2_grad1_u12_ao)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i, j, ipoint) &
!$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t)
!$OMP DO COLLAPSE(2) SCHEDULE (dynamic)
do ipoint = 1, n_points_final_grid
do i = 1, mo_num
do j = 1, mo_num
int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1)
int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2)
int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
call wall_time(tt1)
write(*,"(A,2X,F15.7)") ' wall time for 3e-tensor (sec) = ', (tt1 - tt0)
deallocate(tmp)
allocate(noL_2e(mo_num,mo_num,mo_num,mo_num))
allocate(noL_1e(mo_num,mo_num))
call provide_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), noL_2e(1,1,1,1))
call provide_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), noL_1e(1,1))
call provide_no_0e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), noL_0e)
deallocate(int2_grad1_u12_bimo_t)
deallocate(noL_2e)
deallocate(noL_1e)
call wall_time(time1)
write(*,"(A,2X,F15.7)") ' wall time for compute_no_cpu (sec) = ', (time1 - time0)
return
end
! ---

View File

@ -1,34 +1,77 @@
! --- ! ---
subroutine deb_no_0e_gpu() program compute_no_gpu
implicit none
print *, ' j2e_type = ', j2e_type
print *, ' j1e_type = ', j1e_type
print *, ' env_type = ', env_type
my_grid_becke = .True.
PROVIDE tc_grid1_a tc_grid1_r
my_n_pt_r_grid = tc_grid1_r
my_n_pt_a_grid = tc_grid1_a
touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid
my_extra_grid_becke = .True.
PROVIDE tc_grid2_a tc_grid2_r
my_n_pt_r_extra_grid = tc_grid2_r
my_n_pt_a_extra_grid = tc_grid2_a
touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid
call write_int(6, my_n_pt_r_grid, 'radial external grid over')
call write_int(6, my_n_pt_a_grid, 'angular external grid over')
call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over')
call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over')
call main()
end
! ---
subroutine main()
use cutc_module use cutc_module
implicit none implicit none
integer :: i, j, k, l, ipoint integer :: i, j, k, l, ipoint
double precision :: time0, time1
double precision :: tt0, tt1
double precision :: acc_thr, err_tot, nrm_tot, err_loc double precision :: acc_thr, err_tot, nrm_tot, err_loc
double precision :: noL_0e
double precision :: noL_0e_gpu(1) double precision :: noL_0e_gpu(1)
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:) double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:)
double precision, allocatable :: tmp(:,:,:,:) double precision, allocatable :: tmp(:,:,:,:)
double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:) double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:)
double precision, allocatable :: noL_1e_gpu(:,:)
double precision, allocatable :: noL_2e_gpu(:,:,:,:)
PROVIDE mo_l_coef mo_r_coef PROVIDE mo_l_coef mo_r_coef
PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp
call wall_time(time0)
print*, ' start compute_no_gpu'
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3))
print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
call wall_time(tt0)
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read")
read(11) int2_grad1_u12_ao read(11) int2_grad1_u12_ao
close(11) close(11)
call wall_time(tt1)
write(*,"(A,2X,F15.7)") ' wall time for reading (sec) = ', (tt1 - tt0)
allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) allocate(tmp(mo_num,mo_num,n_points_final_grid,3))
allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num))
call wall_time(tt0)
!$OMP PARALLEL & !$OMP PARALLEL &
!$OMP DEFAULT (NONE) & !$OMP DEFAULT (NONE) &
!$OMP PRIVATE (ipoint) & !$OMP PRIVATE (ipoint) &
@ -60,37 +103,30 @@ subroutine deb_no_0e_gpu()
enddo enddo
!$OMP END DO !$OMP END DO
!$OMP END PARALLEL !$OMP END PARALLEL
call wall_time(tt1)
write(*,"(A,2X,F15.7)") ' wall time for 3e-tensor (sec) = ', (tt1 - tt0)
deallocate(tmp) deallocate(tmp)
! --- allocate(noL_2e_gpu(mo_num,mo_num,mo_num,mo_num))
allocate(noL_1e_gpu(mo_num,mo_num))
call cutc_no_0e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, & call cutc_no(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), & final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), & mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), noL_0e_gpu(1)) int2_grad1_u12_bimo_t(1,1,1,1), noL_2e_gpu(1,1,1,1), noL_1e_gpu(1,1), noL_0e_gpu(1))
! ---
call provide_no_0e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), noL_0e)
! ---
deallocate(int2_grad1_u12_bimo_t) deallocate(int2_grad1_u12_bimo_t)
deallocate(noL_2e_gpu)
deallocate(noL_1e_gpu)
print *, 'noL_0e CPU = ', noL_0e call wall_time(time1)
print *, 'noL_0e GPU = ', noL_0e_gpu(1) write(*,"(A,2X,F15.7)") ' wall time for compute_no_gpu (sec) = ', (time1 - time0)
err_tot = dabs(noL_0e - noL_0e_gpu(1))
nrm_tot = dabs(noL_0e)
print *, ' absolute accuracy on noL_0e (%) =', 100.d0 * err_tot / nrm_tot
return return
end end
! --- ! ---

View File

@ -41,155 +41,6 @@ module cutc_module
! --- ! ---
subroutine deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, &
blockxSize, blockySize, blockzSize, &
n_grid1, n_grid2, n_ao, n_nuc, size_bh, &
r1, wr1, r2, wr2, rn, &
aos_data1, aos_data2, &
c_bh, m_bh, n_bh, o_bh, &
int2_grad1_u12_ao, int_2e_ao) bind(C, name = "deb_int_2e_ao")
import c_int, c_double, c_ptr
integer(c_int), intent(in), value :: nxBlocks, blockxSize
integer(c_int), intent(in), value :: nyBlocks, blockySize
integer(c_int), intent(in), value :: nzBlocks, blockzSize
integer(c_int), intent(in), value :: n_grid1, n_grid2
integer(c_int), intent(in), value :: n_ao
integer(c_int), intent(in), value :: n_nuc
integer(c_int), intent(in), value :: size_bh
real(c_double), intent(in) :: r1(3,n_grid1), wr1(n_grid1)
real(c_double), intent(in) :: r2(3,n_grid2), wr2(n_grid2)
real(c_double), intent(in) :: rn(3,n_nuc)
real(c_double), intent(in) :: aos_data1(n_grid1,n_ao,4)
real(c_double), intent(in) :: aos_data2(n_grid2,n_ao,4)
real(c_double), intent(in) :: c_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: m_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: n_bh(size_bh,n_nuc)
integer(c_int), intent(in) :: o_bh(size_bh,n_nuc)
real(c_double), intent(out) :: int2_grad1_u12_ao(n_ao,n_ao,n_grid1,3)
real(c_double), intent(out) :: int_2e_ao(n_ao,n_ao,n_ao,n_ao)
end subroutine deb_int_2e_ao
! ---
subroutine cutc_no_2e(n_grid1, n_mo, ne_a, ne_b, &
wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, &
no_2e) bind(C, name = "cutc_no_2e")
import c_int, c_double, c_ptr
integer(c_int), intent(in), value :: n_grid1
integer(c_int), intent(in), value :: n_mo
integer(c_int), intent(in), value :: ne_a
integer(c_int), intent(in), value :: ne_b
real(c_double), intent(in) :: wr1(n_grid1)
real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo)
real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo)
real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo)
real(c_double), intent(out) :: no_2e(n_mo,n_mo,n_mo,n_mo)
end subroutine cutc_no_2e
! ---
subroutine deb_no_2e(n_grid1, n_mo, ne_a, ne_b, &
wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, &
tmpO, tmpJ, tmpA, tmpB, tmpC, tmpD, tmpE, &
no_2e) bind(C, name = "deb_no_2e")
import c_int, c_double, c_ptr
integer(c_int), intent(in), value :: n_grid1
integer(c_int), intent(in), value :: n_mo
integer(c_int), intent(in), value :: ne_a
integer(c_int), intent(in), value :: ne_b
real(c_double), intent(in) :: wr1(n_grid1)
real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo)
real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo)
real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo)
real(c_double), intent(out) :: tmpO(n_grid1), tmpJ(n_grid1,3)
real(c_double), intent(out) :: tmpA(n_grid1,3,n_mo), tmpB(n_grid1,3,n_mo)
real(c_double), intent(out) :: tmpC(n_grid1,4,n_mo,n_mo), tmpD(n_grid1,4,n_mo,n_mo)
real(c_double), intent(out) :: tmpE(n_mo,n_mo,n_mo,n_mo)
real(c_double), intent(out) :: no_2e(n_mo,n_mo,n_mo,n_mo)
end subroutine deb_no_2e
! ---
subroutine cutc_no_1e(n_grid1, n_mo, ne_a, ne_b, &
wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, &
no_1e) bind(C, name = "cutc_no_1e")
import c_int, c_double, c_ptr
integer(c_int), intent(in), value :: n_grid1
integer(c_int), intent(in), value :: n_mo
integer(c_int), intent(in), value :: ne_a
integer(c_int), intent(in), value :: ne_b
real(c_double), intent(in) :: wr1(n_grid1)
real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo)
real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo)
real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo)
real(c_double), intent(out) :: no_1e(n_mo,n_mo)
end subroutine cutc_no_1e
! ---
subroutine deb_no_1e(n_grid1, n_mo, ne_a, ne_b, &
wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, &
tmpO, tmpJ, tmpM, tmpS, tmpC, tmpD, tmpL, tmpR, tmpE, tmpF, &
no_1e) bind(C, name = "deb_no_1e")
import c_int, c_double, c_ptr
integer(c_int), intent(in), value :: n_grid1
integer(c_int), intent(in), value :: n_mo
integer(c_int), intent(in), value :: ne_a
integer(c_int), intent(in), value :: ne_b
real(c_double), intent(in) :: wr1(n_grid1)
real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo)
real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo)
real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo)
real(c_double), intent(out) :: tmpO(n_grid1)
real(c_double), intent(out) :: tmpJ(n_grid1,3)
real(c_double), intent(out) :: tmpM(n_grid1,3)
real(c_double), intent(out) :: tmpS(n_grid1)
real(c_double), intent(out) :: tmpC(n_grid1,4,n_mo,n_mo)
real(c_double), intent(out) :: tmpD(n_grid1,4)
real(c_double), intent(out) :: tmpL(n_grid1,3,n_mo)
real(c_double), intent(out) :: tmpR(n_grid1,3,n_mo)
real(c_double), intent(out) :: tmpE(n_grid1,5,n_mo)
real(c_double), intent(out) :: tmpF(n_grid1,5,n_mo)
real(c_double), intent(out) :: no_1e(n_mo,n_mo)
end subroutine deb_no_1e
! ---
subroutine cutc_no_0e(n_grid1, n_mo, ne_a, ne_b, &
wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, &
no_0e) bind(C, name = "cutc_no_0e")
import c_int, c_double, c_ptr
integer(c_int), intent(in), value :: n_grid1
integer(c_int), intent(in), value :: n_mo
integer(c_int), intent(in), value :: ne_a
integer(c_int), intent(in), value :: ne_b
real(c_double), intent(in) :: wr1(n_grid1)
real(c_double), intent(in) :: mos_l_in_r(n_grid1,n_mo)
real(c_double), intent(in) :: mos_r_in_r(n_grid1,n_mo)
real(c_double), intent(in) :: int2_grad1_u12(n_grid1,3,n_mo,n_mo)
real(c_double), intent(out) :: no_0e(1)
end subroutine cutc_no_0e
! ---
subroutine cutc_no(n_grid1, n_mo, ne_a, ne_b, & subroutine cutc_no(n_grid1, n_mo, ne_a, ne_b, &
wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, & wr1, mos_l_in_r, mos_r_in_r, int2_grad1_u12, &
no_2e, no_1e, no_0e) bind(C, name = "cutc_no") no_2e, no_1e, no_0e) bind(C, name = "cutc_no")

View File

@ -1,7 +1,39 @@
! --- ! ---
subroutine deb_int_2e_ao_gpu() program deb_int_2e_ao_gpu
implicit none
print *, ' j2e_type = ', j2e_type
print *, ' j1e_type = ', j1e_type
print *, ' env_type = ', env_type
my_grid_becke = .True.
PROVIDE tc_grid1_a tc_grid1_r
my_n_pt_r_grid = tc_grid1_r
my_n_pt_a_grid = tc_grid1_a
touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid
my_extra_grid_becke = .True.
PROVIDE tc_grid2_a tc_grid2_r
my_n_pt_r_extra_grid = tc_grid2_r
my_n_pt_a_extra_grid = tc_grid2_a
touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid
call write_int(6, my_n_pt_r_grid, 'radial external grid over')
call write_int(6, my_n_pt_a_grid, 'angular external grid over')
call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over')
call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over')
call main()
end
! ---
subroutine main()
use cutc_module use cutc_module
@ -28,7 +60,6 @@ subroutine deb_int_2e_ao_gpu()
double precision, allocatable :: int_2e_ao_gpu(:,:,:,:) double precision, allocatable :: int_2e_ao_gpu(:,:,:,:)
call wall_time(time0) call wall_time(time0)
print*, ' start deb_int_2e_ao_gpu' print*, ' start deb_int_2e_ao_gpu'
@ -80,12 +111,12 @@ subroutine deb_int_2e_ao_gpu()
allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,3)) allocate(int2_grad1_u12_ao_gpu(ao_num,ao_num,n_points_final_grid,3))
allocate(int_2e_ao_gpu(ao_num,ao_num,ao_num,ao_num)) allocate(int_2e_ao_gpu(ao_num,ao_num,ao_num,ao_num))
call deb_int_2e_ao(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, & call cutc_int(nxBlocks, nyBlocks, nzBlocks, blockxSize, blockySize, blockzSize, &
n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, & n_points_final_grid, n_points_extra_final_grid, ao_num, nucl_num, jBH_size, &
final_grid_points, final_weight_at_r_vector, & final_grid_points, final_weight_at_r_vector, &
final_grid_points_extra, final_weight_at_r_vector_extra, & final_grid_points_extra, final_weight_at_r_vector_extra, &
rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, & rn, aos_data1, aos_data2, jBH_c, jBH_m, jBH_n, jBH_o, &
int2_grad1_u12_ao_gpu, int_2e_ao_gpu) int2_grad1_u12_ao_gpu, int_2e_ao_gpu)
! --- ! ---
@ -223,7 +254,6 @@ subroutine deb_int_2e_ao_gpu()
acc_thr = 1d-12 acc_thr = 1d-12
print *, ' precision on int2_grad1_u12_ao '
err_tot = 0.d0 err_tot = 0.d0
nrm_tot = 0.d0 nrm_tot = 0.d0
do m = 1, 3 do m = 1, 3
@ -246,7 +276,6 @@ subroutine deb_int_2e_ao_gpu()
print *, ' absolute accuracy on int2_grad1_u12_ao (%) =', 100.d0 * err_tot / nrm_tot print *, ' absolute accuracy on int2_grad1_u12_ao (%) =', 100.d0 * err_tot / nrm_tot
print *, ' precision on int_2e_ao '
err_tot = 0.d0 err_tot = 0.d0
nrm_tot = 0.d0 nrm_tot = 0.d0
do i = 1, ao_num do i = 1, ao_num

View File

@ -1,499 +0,0 @@
! ---
subroutine deb_no_1e_gpu()
use cutc_module
implicit none
integer :: i, j, k, l, ipoint
double precision :: acc_thr, err_tot, nrm_tot, err_loc
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:)
double precision, allocatable :: tmp(:,:,:,:)
double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:)
double precision, allocatable :: noL_1e(:,:)
double precision, allocatable :: noL_1e_gpu(:,:)
PROVIDE mo_l_coef mo_r_coef
PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3))
print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read")
read(11) int2_grad1_u12_ao
close(11)
allocate(tmp(mo_num,mo_num,n_points_final_grid,3))
allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num))
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (ipoint) &
!$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp)
!$OMP DO SCHEDULE (dynamic)
do ipoint = 1, n_points_final_grid
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num)
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num)
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num)
enddo
!$OMP END DO
!$OMP END PARALLEL
deallocate(int2_grad1_u12_ao)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i, j, ipoint) &
!$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t)
!$OMP DO COLLAPSE(2) SCHEDULE (dynamic)
do ipoint = 1, n_points_final_grid
do i = 1, mo_num
do j = 1, mo_num
int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1)
int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2)
int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
deallocate(tmp)
! ---
allocate(noL_1e_gpu(mo_num,mo_num))
call cutc_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), noL_1e_gpu(1,1))
! ---
allocate(noL_1e(mo_num,mo_num))
call provide_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), noL_1e(1,1))
! ---
deallocate(int2_grad1_u12_bimo_t)
acc_thr = 1d-12
err_tot = 0.d0
nrm_tot = 0.d0
do k = 1, mo_num
do l = 1, mo_num
err_loc = dabs(noL_1e(l,k) - noL_1e_gpu(l,k))
if(err_loc > acc_thr) then
print*, " error on", l, k
print*, " CPU res", noL_1e (l,k)
print*, " GPU res", noL_1e_gpu(l,k)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(noL_1e(l,k))
enddo
enddo
print *, ' absolute accuracy on noL_1e (%) =', 100.d0 * err_tot / nrm_tot
deallocate(noL_1e)
deallocate(noL_1e_gpu)
return
end
! ---
subroutine deb_no_1e_gpu_tmp()
use cutc_module
implicit none
integer :: i, j, k, l, m, ipoint
double precision :: acc_thr, err_tot, nrm_tot, err_loc
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:)
double precision, allocatable :: tmp(:,:,:,:)
double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:)
double precision, allocatable :: tmpO(:), tmpO_gpu(:)
double precision, allocatable :: tmpJ(:,:), tmpJ_gpu(:,:)
double precision, allocatable :: tmpM(:,:), tmpM_gpu(:,:)
double precision, allocatable :: tmpS(:), tmpS_gpu(:)
double precision, allocatable :: tmpC(:,:,:,:), tmpC_gpu(:,:,:,:)
double precision, allocatable :: tmpD(:,:), tmpD_gpu(:,:)
double precision, allocatable :: tmpL(:,:,:), tmpL_gpu(:,:,:)
double precision, allocatable :: tmpR(:,:,:), tmpR_gpu(:,:,:)
double precision, allocatable :: tmpE(:,:,:), tmpE_gpu(:,:,:)
double precision, allocatable :: tmpF(:,:,:), tmpF_gpu(:,:,:)
double precision, allocatable :: noL_1e(:,:), noL_1e_gpu(:,:)
! ---
PROVIDE mo_l_coef mo_r_coef
PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3))
print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read")
read(11) int2_grad1_u12_ao
close(11)
allocate(tmp(mo_num,mo_num,n_points_final_grid,3))
allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num))
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (ipoint) &
!$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp)
!$OMP DO SCHEDULE (dynamic)
do ipoint = 1, n_points_final_grid
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num)
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num)
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num)
enddo
!$OMP END DO
!$OMP END PARALLEL
deallocate(int2_grad1_u12_ao)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i, j, ipoint) &
!$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t)
!$OMP DO COLLAPSE(2) SCHEDULE (dynamic)
do ipoint = 1, n_points_final_grid
do i = 1, mo_num
do j = 1, mo_num
int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1)
int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2)
int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
deallocate(tmp)
! ---
allocate(tmpO_gpu(n_points_final_grid))
allocate(tmpJ_gpu(n_points_final_grid,3))
allocate(tmpM_gpu(n_points_final_grid,3))
allocate(tmpS_gpu(n_points_final_grid))
allocate(tmpC_gpu(n_points_final_grid,4,mo_num,mo_num))
allocate(tmpD_gpu(n_points_final_grid,4))
allocate(tmpL_gpu(n_points_final_grid,3,mo_num))
allocate(tmpR_gpu(n_points_final_grid,3,mo_num))
allocate(tmpE_gpu(n_points_final_grid,5,mo_num))
allocate(tmpF_gpu(n_points_final_grid,5,mo_num))
allocate(noL_1e_gpu(mo_num,mo_num))
call deb_no_1e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), int2_grad1_u12_bimo_t(1,1,1,1), &
tmpO_gpu(1), tmpJ_gpu(1,1), tmpM_gpu(1,1), tmpS_gpu(1), tmpC_gpu(1,1,1,1), tmpD_gpu(1,1), &
tmpL_gpu(1,1,1), tmpR_gpu(1,1,1), tmpE_gpu(1,1,1), tmpF_gpu(1,1,1), noL_1e_gpu(1,1))
! ---
allocate(tmpO(n_points_final_grid))
allocate(tmpJ(n_points_final_grid,3))
allocate(tmpM(n_points_final_grid,3))
allocate(tmpS(n_points_final_grid))
allocate(tmpC(n_points_final_grid,4,mo_num,mo_num))
allocate(tmpD(n_points_final_grid,4))
allocate(tmpL(n_points_final_grid,3,mo_num))
allocate(tmpR(n_points_final_grid,3,mo_num))
allocate(tmpE(n_points_final_grid,5,mo_num))
allocate(tmpF(n_points_final_grid,5,mo_num))
allocate(noL_1e(mo_num,mo_num))
call provide_no_1e_tmp(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), int2_grad1_u12_bimo_t(1,1,1,1), &
tmpO(1), tmpJ(1,1), tmpM(1,1), tmpS(1), tmpC(1,1,1,1), tmpD(1,1), tmpL(1,1,1), tmpR(1,1,1), &
tmpE(1,1,1), tmpF(1,1,1), noL_1e(1,1))
! ---
deallocate(int2_grad1_u12_bimo_t)
acc_thr = 1d-12
! ---
! tmpO(n_points_final_grid))
err_tot = 0.d0
nrm_tot = 0.d0
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpO(ipoint) - tmpO_gpu(ipoint))
if(err_loc > acc_thr) then
print*, " error on", ipoint
print*, " CPU res", tmpO (ipoint)
print*, " GPU res", tmpO_gpu(ipoint)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpO(ipoint))
enddo
print *, ' absolute accuracy on tmpO (%) =', 100.d0 * err_tot / nrm_tot
! ---
! tmpJ(n_points_final_grid,3))
err_tot = 0.d0
nrm_tot = 0.d0
do m = 1, 3
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpJ(ipoint,m) - tmpJ_gpu(ipoint,m))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m
print*, " CPU res", tmpJ (ipoint,m)
print*, " GPU res", tmpJ_gpu(ipoint,m)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpJ(ipoint,m))
enddo
enddo
print *, ' absolute accuracy on tmpJ (%) =', 100.d0 * err_tot / nrm_tot
! ---
! tmpM(n_points_final_grid,3))
err_tot = 0.d0
nrm_tot = 0.d0
do m = 1, 3
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpM(ipoint,m) - tmpM_gpu(ipoint,m))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m
print*, " CPU res", tmpM (ipoint,m)
print*, " GPU res", tmpM_gpu(ipoint,m)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpM(ipoint,m))
enddo
enddo
print *, ' absolute accuracy on tmpM (%) =', 100.d0 * err_tot / nrm_tot
! ---
! tmpS(n_points_final_grid))
err_tot = 0.d0
nrm_tot = 0.d0
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpS(ipoint) - tmpS_gpu(ipoint))
if(err_loc > acc_thr) then
print*, " error on", ipoint
print*, " CPU res", tmpS (ipoint)
print*, " GPU res", tmpS_gpu(ipoint)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpS(ipoint))
enddo
print *, ' absolute accuracy on tmpS (%) =', 100.d0 * err_tot / nrm_tot
! ---
! tmpC(n_points_final_grid,4,mo_num,mo_num))
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do j = 1, mo_num
do m = 1, 4
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpC(ipoint,m,j,i) - tmpC_gpu(ipoint,m,j,i))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m, j, i
print*, " CPU res", tmpC (ipoint,m,j,i)
print*, " GPU res", tmpC_gpu(ipoint,m,j,i)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpC(ipoint,m,j,i))
enddo
enddo
enddo
enddo
print *, ' absolute accuracy on tmpC (%) =', 100.d0 * err_tot / nrm_tot
! ---
! tmpD(n_points_final_grid,4))
err_tot = 0.d0
nrm_tot = 0.d0
do m = 1, 4
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpD(ipoint,m) - tmpD_gpu(ipoint,m))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m
print*, " CPU res", tmpD (ipoint,m)
print*, " GPU res", tmpD_gpu(ipoint,m)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpD(ipoint,m))
enddo
enddo
print *, ' absolute accuracy on tmpD (%) =', 100.d0 * err_tot / nrm_tot
! ---
! tmpL(n_points_final_grid,3,mo_num))
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do m = 1, 3
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpL(ipoint,m,i) - tmpL_gpu(ipoint,m,i))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m, i
print*, " CPU res", tmpL (ipoint,m,i)
print*, " GPU res", tmpL_gpu(ipoint,m,i)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpL(ipoint,m,i))
enddo
enddo
enddo
print *, ' absolute accuracy on tmpL (%) =', 100.d0 * err_tot / nrm_tot
! ---
! tmpR(n_points_final_grid,3,mo_num))
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do m = 1, 3
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpR(ipoint,m,i) - tmpR_gpu(ipoint,m,i))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m, i
print*, " CPU res", tmpR (ipoint,m,i)
print*, " GPU res", tmpR_gpu(ipoint,m,i)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpR(ipoint,m,i))
enddo
enddo
enddo
print *, ' absolute accuracy on tmpR (%) =', 100.d0 * err_tot / nrm_tot
! ---
! tmpE(n_points_final_grid,5,mo_num))
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do m = 1, 5
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpE(ipoint,m,i) - tmpE_gpu(ipoint,m,i))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m, i
print*, " CPU res", tmpE (ipoint,m,i)
print*, " GPU res", tmpE_gpu(ipoint,m,i)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpE(ipoint,m,i))
enddo
enddo
enddo
print *, ' absolute accuracy on tmpE (%) =', 100.d0 * err_tot / nrm_tot
! ---
! tmpF(n_points_final_grid,5,mo_num))
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do m = 1, 5
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpF(ipoint,m,i) - tmpF_gpu(ipoint,m,i))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m, i
print*, " CPU res", tmpF (ipoint,m,i)
print*, " GPU res", tmpF_gpu(ipoint,m,i)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpF(ipoint,m,i))
enddo
enddo
enddo
print *, ' absolute accuracy on tmpF (%) =', 100.d0 * err_tot / nrm_tot
! ---
! noL_1e(mo_num,mo_num))
err_tot = 0.d0
nrm_tot = 0.d0
do k = 1, mo_num
do l = 1, mo_num
err_loc = dabs(noL_1e(l,k) - noL_1e_gpu(l,k))
if(err_loc > acc_thr) then
print*, " error on", l, k
print*, " CPU res", noL_1e (l,k)
print*, " GPU res", noL_1e_gpu(l,k)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(noL_1e(l,k))
enddo
enddo
print *, ' absolute accuracy on noL_1e (%) =', 100.d0 * err_tot / nrm_tot
! ---
deallocate(tmpO)
deallocate(tmpJ)
deallocate(tmpM)
deallocate(tmpS)
deallocate(tmpC)
deallocate(tmpD)
deallocate(tmpL)
deallocate(tmpR)
deallocate(tmpE)
deallocate(tmpF)
deallocate(noL_1e)
deallocate(tmpO_gpu)
deallocate(tmpJ_gpu)
deallocate(tmpM_gpu)
deallocate(tmpS_gpu)
deallocate(tmpC_gpu)
deallocate(tmpD_gpu)
deallocate(tmpL_gpu)
deallocate(tmpR_gpu)
deallocate(tmpE_gpu)
deallocate(tmpF_gpu)
deallocate(noL_1e_gpu)
return
end

View File

@ -1,417 +0,0 @@
! ---
subroutine deb_no_2e_gpu()
use cutc_module
implicit none
integer :: i, j, k, l, ipoint
double precision :: acc_thr, err_tot, nrm_tot, err_loc
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:)
double precision, allocatable :: tmp(:,:,:,:)
double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:)
double precision, allocatable :: noL_2e(:,:,:,:)
double precision, allocatable :: noL_2e_gpu(:,:,:,:)
PROVIDE mo_l_coef mo_r_coef
PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3))
print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read")
read(11) int2_grad1_u12_ao
close(11)
allocate(tmp(mo_num,mo_num,n_points_final_grid,3))
allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num))
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (ipoint) &
!$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp)
!$OMP DO SCHEDULE (dynamic)
do ipoint = 1, n_points_final_grid
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num)
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num)
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num)
enddo
!$OMP END DO
!$OMP END PARALLEL
deallocate(int2_grad1_u12_ao)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i, j, ipoint) &
!$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t)
!$OMP DO COLLAPSE(2) SCHEDULE (dynamic)
do ipoint = 1, n_points_final_grid
do i = 1, mo_num
do j = 1, mo_num
int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1)
int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2)
int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
deallocate(tmp)
! ---
allocate(noL_2e_gpu(mo_num,mo_num,mo_num,mo_num))
call cutc_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), noL_2e_gpu(1,1,1,1))
! ---
allocate(noL_2e(mo_num,mo_num,mo_num,mo_num))
call provide_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), noL_2e(1,1,1,1))
! ---
deallocate(int2_grad1_u12_bimo_t)
acc_thr = 1d-12
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do j = 1, mo_num
do k = 1, mo_num
do l = 1, mo_num
err_loc = dabs(noL_2e(l,k,j,i) - noL_2e_gpu(l,k,j,i))
if(err_loc > acc_thr) then
print*, " error on", l, k, j, i
print*, " CPU res", noL_2e (l,k,j,i)
print*, " GPU res", noL_2e_gpu(l,k,j,i)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(noL_2e(l,k,j,i))
enddo
enddo
enddo
enddo
print *, ' absolute accuracy on noL_2e (%) =', 100.d0 * err_tot / nrm_tot
deallocate(noL_2e)
deallocate(noL_2e_gpu)
return
end
! ---
subroutine deb_no_2e_gpu_tmp()
use cutc_module
implicit none
integer :: i, j, k, l, m, ipoint
double precision :: acc_thr, err_tot, nrm_tot, err_loc
double precision, allocatable :: int2_grad1_u12_ao(:,:,:,:)
double precision, allocatable :: tmp(:,:,:,:)
double precision, allocatable :: int2_grad1_u12_bimo_t(:,:,:,:)
double precision, allocatable :: tmpO(:), tmpO_gpu(:)
double precision, allocatable :: tmpJ(:,:), tmpJ_gpu(:,:)
double precision, allocatable :: tmpA(:,:,:), tmpA_gpu(:,:,:)
double precision, allocatable :: tmpB(:,:,:), tmpB_gpu(:,:,:)
double precision, allocatable :: tmpC(:,:,:,:), tmpC_gpu(:,:,:,:)
double precision, allocatable :: tmpD(:,:,:,:), tmpD_gpu(:,:,:,:)
double precision, allocatable :: tmpE(:,:,:,:), tmpE_gpu(:,:,:,:)
double precision, allocatable :: noL_2e(:,:,:,:), noL_2e_gpu(:,:,:,:)
PROVIDE mo_l_coef mo_r_coef
PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3))
print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read")
read(11) int2_grad1_u12_ao
close(11)
allocate(tmp(mo_num,mo_num,n_points_final_grid,3))
allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num))
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (ipoint) &
!$OMP SHARED (ao_num, mo_num, n_points_final_grid, int2_grad1_u12_ao, tmp)
!$OMP DO SCHEDULE (dynamic)
do ipoint = 1, n_points_final_grid
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,1), ao_num, tmp(1,1,ipoint,1), mo_num)
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,2), ao_num, tmp(1,1,ipoint,2), mo_num)
call ao_to_mo_bi_ortho(int2_grad1_u12_ao(1,1,ipoint,3), ao_num, tmp(1,1,ipoint,3), mo_num)
enddo
!$OMP END DO
!$OMP END PARALLEL
deallocate(int2_grad1_u12_ao)
!$OMP PARALLEL &
!$OMP DEFAULT (NONE) &
!$OMP PRIVATE (i, j, ipoint) &
!$OMP SHARED (mo_num, n_points_final_grid, tmp, int2_grad1_u12_bimo_t)
!$OMP DO COLLAPSE(2) SCHEDULE (dynamic)
do ipoint = 1, n_points_final_grid
do i = 1, mo_num
do j = 1, mo_num
int2_grad1_u12_bimo_t(ipoint,1,j,i) = tmp(j,i,ipoint,1)
int2_grad1_u12_bimo_t(ipoint,2,j,i) = tmp(j,i,ipoint,2)
int2_grad1_u12_bimo_t(ipoint,3,j,i) = tmp(j,i,ipoint,3)
enddo
enddo
enddo
!$OMP END DO
!$OMP END PARALLEL
deallocate(tmp)
! ---
allocate(tmpO_gpu(n_points_final_grid))
allocate(tmpJ_gpu(n_points_final_grid,3))
allocate(tmpA_gpu(n_points_final_grid,3,mo_num))
allocate(tmpB_gpu(n_points_final_grid,3,mo_num))
allocate(tmpC_gpu(n_points_final_grid,4,mo_num,mo_num))
allocate(tmpD_gpu(n_points_final_grid,4,mo_num,mo_num))
allocate(tmpE_gpu(mo_num,mo_num,mo_num,mo_num))
allocate(noL_2e_gpu(mo_num,mo_num,mo_num,mo_num))
call deb_no_2e(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), &
tmpO_gpu(1), tmpJ_gpu(1,1), tmpA_gpu(1,1,1), tmpB_gpu(1,1,1), &
tmpC_gpu(1,1,1,1), tmpD_gpu(1,1,1,1), tmpE_gpu(1,1,1,1), &
noL_2e_gpu(1,1,1,1))
! ---
allocate(tmpO(n_points_final_grid))
allocate(tmpJ(n_points_final_grid,3))
allocate(tmpA(n_points_final_grid,3,mo_num))
allocate(tmpB(n_points_final_grid,3,mo_num))
allocate(tmpC(n_points_final_grid,4,mo_num,mo_num))
allocate(tmpD(n_points_final_grid,4,mo_num,mo_num))
allocate(tmpE(mo_num,mo_num,mo_num,mo_num))
allocate(noL_2e(mo_num,mo_num,mo_num,mo_num))
call provide_no_2e_tmp(n_points_final_grid, mo_num, elec_alpha_num, elec_beta_num, &
final_weight_at_r_vector(1), &
mos_l_in_r_array_transp(1,1), mos_r_in_r_array_transp(1,1), &
int2_grad1_u12_bimo_t(1,1,1,1), &
tmpO(1), tmpJ(1,1), tmpA(1,1,1), tmpB(1,1,1), &
tmpC(1,1,1,1), tmpD(1,1,1,1), tmpE(1,1,1,1), &
noL_2e(1,1,1,1))
! ---
deallocate(int2_grad1_u12_bimo_t)
acc_thr = 1d-12
! ---
err_tot = 0.d0
nrm_tot = 0.d0
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpO(ipoint) - tmpO_gpu(ipoint))
if(err_loc > acc_thr) then
print*, " error on", ipoint
print*, " CPU res", tmpO (ipoint)
print*, " GPU res", tmpO_gpu(ipoint)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpO(ipoint))
enddo
print *, ' absolute accuracy on tmpO (%) =', 100.d0 * err_tot / nrm_tot
! ---
err_tot = 0.d0
nrm_tot = 0.d0
do m = 1, 3
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpJ(ipoint,m) - tmpJ_gpu(ipoint,m))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m
print*, " CPU res", tmpJ (ipoint,m)
print*, " GPU res", tmpJ_gpu(ipoint,m)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpJ(ipoint,m))
enddo
enddo
print *, ' absolute accuracy on tmpJ (%) =', 100.d0 * err_tot / nrm_tot
! ---
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do m = 1, 3
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpA(ipoint,m,i) - tmpA_gpu(ipoint,m,i))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m, i
print*, " CPU res", tmpA (ipoint,m,i)
print*, " GPU res", tmpA_gpu(ipoint,m,i)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpA(ipoint,m,i))
enddo
enddo
enddo
print *, ' absolute accuracy on tmpA (%) =', 100.d0 * err_tot / nrm_tot
! ---
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do m = 1, 3
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpB(ipoint,m,i) - tmpB_gpu(ipoint,m,i))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m, i
print*, " CPU res", tmpB (ipoint,m,i)
print*, " GPU res", tmpB_gpu(ipoint,m,i)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpB(ipoint,m,i))
enddo
enddo
enddo
print *, ' absolute accuracy on tmpB (%) =', 100.d0 * err_tot / nrm_tot
! ---
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do j = 1, mo_num
do m = 1, 3
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpC(ipoint,m,i,j) - tmpC_gpu(ipoint,m,i,j))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m, i, j
print*, " CPU res", tmpC (ipoint,m,i,j)
print*, " GPU res", tmpC_gpu(ipoint,m,i,j)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpC(ipoint,m,i,j))
enddo
enddo
enddo
enddo
print *, ' absolute accuracy on tmpC (%) =', 100.d0 * err_tot / nrm_tot
! ---
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do j = 1, mo_num
do m = 1, 3
do ipoint = 1, n_points_final_grid
err_loc = dabs(tmpD(ipoint,m,i,j) - tmpD_gpu(ipoint,m,i,j))
if(err_loc > acc_thr) then
print*, " error on", ipoint, m, i, j
print*, " CPU res", tmpD (ipoint,m,i,j)
print*, " GPU res", tmpD_gpu(ipoint,m,i,j)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpD(ipoint,m,i,j))
enddo
enddo
enddo
enddo
print *, ' absolute accuracy on tmpD (%) =', 100.d0 * err_tot / nrm_tot
! ---
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do j = 1, mo_num
do k = 1, mo_num
do l = 1, mo_num
err_loc = dabs(tmpE(l,k,j,i) - tmpE_gpu(l,k,j,i))
if(err_loc > acc_thr) then
print*, " error on", l, k, j, i
print*, " CPU res", tmpE (l,k,j,i)
print*, " GPU res", tmpE_gpu(l,k,j,i)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(tmpE(l,k,j,i))
enddo
enddo
enddo
enddo
print *, ' absolute accuracy on tmpE (%) =', 100.d0 * err_tot / nrm_tot
! ---
err_tot = 0.d0
nrm_tot = 0.d0
do i = 1, mo_num
do j = 1, mo_num
do k = 1, mo_num
do l = 1, mo_num
err_loc = dabs(noL_2e(l,k,j,i) - noL_2e_gpu(l,k,j,i))
if(err_loc > acc_thr) then
print*, " error on", l, k, j, i
print*, " CPU res", noL_2e (l,k,j,i)
print*, " GPU res", noL_2e_gpu(l,k,j,i)
stop
endif
err_tot = err_tot + err_loc
nrm_tot = nrm_tot + dabs(noL_2e(l,k,j,i))
enddo
enddo
enddo
enddo
print *, ' absolute accuracy on noL_2e (%) =', 100.d0 * err_tot / nrm_tot
! ---
deallocate(tmpO, tmpO_gpu)
deallocate(tmpJ, tmpJ_gpu)
deallocate(tmpA, tmpA_gpu)
deallocate(tmpB, tmpB_gpu)
deallocate(tmpC, tmpC_gpu)
deallocate(tmpD, tmpD_gpu)
deallocate(tmpE, tmpE_gpu)
deallocate(noL_2e, noL_2e_gpu)
return
end

View File

@ -1,13 +1,47 @@
! --- ! ---
subroutine deb_no_gpu() program deb_no_gpu
implicit none
print *, ' j2e_type = ', j2e_type
print *, ' j1e_type = ', j1e_type
print *, ' env_type = ', env_type
my_grid_becke = .True.
PROVIDE tc_grid1_a tc_grid1_r
my_n_pt_r_grid = tc_grid1_r
my_n_pt_a_grid = tc_grid1_a
touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid
my_extra_grid_becke = .True.
PROVIDE tc_grid2_a tc_grid2_r
my_n_pt_r_extra_grid = tc_grid2_r
my_n_pt_a_extra_grid = tc_grid2_a
touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid
call write_int(6, my_n_pt_r_grid, 'radial external grid over')
call write_int(6, my_n_pt_a_grid, 'angular external grid over')
call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over')
call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over')
call main()
end
! ---
subroutine main()
use cutc_module use cutc_module
implicit none implicit none
integer :: i, j, k, l, ipoint integer :: i, j, k, l, ipoint
double precision :: time0, time1
double precision :: tt0, tt1
double precision :: acc_thr, err_tot, nrm_tot, err_loc double precision :: acc_thr, err_tot, nrm_tot, err_loc
double precision :: noL_0e double precision :: noL_0e
double precision :: noL_0e_gpu(1) double precision :: noL_0e_gpu(1)
@ -24,15 +58,24 @@ subroutine deb_no_gpu()
PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp PROVIDE mos_l_in_r_array_transp mos_r_in_r_array_transp
call wall_time(time0)
print*, ' start deb_no_gpu'
allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3)) allocate(int2_grad1_u12_ao(ao_num,ao_num,n_points_final_grid,3))
print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao' print*, ' Reading int2_grad1_u12_ao from ', trim(ezfio_filename) // '/work/int2_grad1_u12_ao'
call wall_time(tt0)
open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read") open(unit=11, form="unformatted", file=trim(ezfio_filename)//'/work/int2_grad1_u12_ao', action="read")
read(11) int2_grad1_u12_ao read(11) int2_grad1_u12_ao
close(11) close(11)
call wall_time(tt1)
write(*,"(A,2X,F15.7)") ' wall time for reading (sec) = ', (tt1 - tt0)
allocate(tmp(mo_num,mo_num,n_points_final_grid,3)) allocate(tmp(mo_num,mo_num,n_points_final_grid,3))
allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num)) allocate(int2_grad1_u12_bimo_t(n_points_final_grid,3,mo_num,mo_num))
call wall_time(tt0)
!$OMP PARALLEL & !$OMP PARALLEL &
!$OMP DEFAULT (NONE) & !$OMP DEFAULT (NONE) &
!$OMP PRIVATE (ipoint) & !$OMP PRIVATE (ipoint) &
@ -64,6 +107,8 @@ subroutine deb_no_gpu()
enddo enddo
!$OMP END DO !$OMP END DO
!$OMP END PARALLEL !$OMP END PARALLEL
call wall_time(tt1)
write(*,"(A,2X,F15.7)") ' wall time for 3e-tensor (sec) = ', (tt1 - tt0)
deallocate(tmp) deallocate(tmp)
@ -161,6 +206,9 @@ subroutine deb_no_gpu()
print *, ' absolute accuracy on noL_0e (%) =', 100.d0 * err_tot / nrm_tot print *, ' absolute accuracy on noL_0e (%) =', 100.d0 * err_tot / nrm_tot
call wall_time(time1)
write(*,"(A,2X,F15.7)") ' wall time for deb_no_gpu (sec) = ', (time1 - time0)
return return
end end

View File

@ -1,55 +0,0 @@
! ---
program write_tc_int_cuda
implicit none
print *, ' j2e_type = ', j2e_type
print *, ' j1e_type = ', j1e_type
print *, ' env_type = ', env_type
my_grid_becke = .True.
PROVIDE tc_grid1_a tc_grid1_r
my_n_pt_r_grid = tc_grid1_r
my_n_pt_a_grid = tc_grid1_a
touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid
my_extra_grid_becke = .True.
PROVIDE tc_grid2_a tc_grid2_r
my_n_pt_r_extra_grid = tc_grid2_r
my_n_pt_a_extra_grid = tc_grid2_a
touch my_extra_grid_becke my_n_pt_r_extra_grid my_n_pt_a_extra_grid
call write_int(6, my_n_pt_r_grid, 'radial external grid over')
call write_int(6, my_n_pt_a_grid, 'angular external grid over')
call write_int(6, my_n_pt_r_extra_grid, 'radial internal grid over')
call write_int(6, my_n_pt_a_extra_grid, 'angular internal grid over')
call main()
end
! ---
subroutine main()
implicit none
!call deb_int_2e_ao_gpu()
!call deb_no_2e_gpu_tmp()
!call deb_no_2e_gpu()
!call deb_no_1e_gpu_tmp()
!call deb_no_1e_gpu()
!call deb_no_0e_gpu()
call deb_no_gpu()
return
end
! ---