Compare commits

...

2 Commits

Author SHA1 Message Date
Anthony Scemama 8c6fb17a8d H_vo on GPU 2023-08-04 12:33:52 +02:00
Anthony Scemama 98948f1947 Added H_vv 2023-08-04 12:09:07 +02:00
6 changed files with 228 additions and 219 deletions

View File

@ -123,25 +123,22 @@ subroutine run_ccsd_space_orb
cc_space_v_oovo, cc_space_v_ovvo, cc_space_v_ovov, cc_space_v_ovoo, &
cc_space_f_oo, cc_space_f_vo, cc_space_f_vv)
if (.not.do_ao_cholesky) then
print *, 'ao_choleky is required'
stop -1
endif
do while (not_converged)
! Residue
if (do_ao_cholesky) then
call compute_H_vv_chol(nO,nV,tau_x,H_vv)
call compute_H_vo_chol(nO,nV,t1,H_vo)
call gpu_upload(gpu_data, nO, nV, t1, t2, tau, tau_x, H_vv);
call compute_H_oo_chol_gpu(gpu_data,nO,nV,0,H_oo)
call gpu_upload(gpu_data, nO, nV, t1, t2, tau, tau_x);
call compute_H_oo_chol_gpu(gpu_data,nO,nV,0,H_oo)
call compute_H_vo_chol_gpu(gpu_data,nO,nV,1,H_vo)
call compute_H_vv_chol_gpu(gpu_data,nO,nV,2,H_vv)
call compute_r1_space_chol(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r1,max_r1)
call compute_r2_space_chol_gpu(gpu_data, nO, nV, t1, r2, max_r2)
else
call compute_H_oo(nO,nV,t1,t2,tau,H_oo)
call compute_H_vv(nO,nV,t1,t2,tau,H_vv)
call compute_H_vo(nO,nV,t1,t2,H_vo)
call compute_r1_space_chol(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r1,max_r1)
call compute_r2_space_chol_gpu(gpu_data, nO, nV, t1, r2, max_r2)
call compute_r1_space(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r1,max_r1)
call compute_r2_space(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r2,max_r2)
endif
max_r = max(max_r1,max_r2)
! Update

View File

@ -290,190 +290,3 @@ subroutine compute_r1_space_chol(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r1,max_r1)
end
! H_oo
subroutine compute_H_oo_chol(nO,nV,tau_x,H_oo)
implicit none
integer, intent(in) :: nO,nV
double precision, intent(in) :: tau_x(nO, nO, nV, nV)
double precision, intent(out) :: H_oo(nO, nO)
integer :: a,b,i,j,u,k
double precision, allocatable :: tau_kau(:,:,:), tmp_vov(:,:,:)
allocate(tau_kau(cholesky_mo_num,nV,nO))
!$omp parallel &
!$omp default(shared) &
!$omp private(i,u,j,k,a,b,tmp_vov)
allocate(tmp_vov(nV,nO,nV) )
!$omp do
do u = 1, nO
do b=1,nV
do j=1,nO
do a=1,nV
tmp_vov(a,j,b) = tau_x(u,j,a,b)
enddo
enddo
enddo
call dgemm('N','T',cholesky_mo_num,nV,nO*nV,1.d0, &
cc_space_v_ov_chol, cholesky_mo_num, tmp_vov, nV, &
0.d0, tau_kau(1,1,u), cholesky_mo_num)
enddo
!$omp end do nowait
deallocate(tmp_vov)
!$omp do
do i = 1, nO
do u = 1, nO
H_oo(u,i) = cc_space_f_oo(u,i)
enddo
enddo
!$omp end do nowait
!$omp barrier
!$omp end parallel
call dgemm('T', 'N', nO, nO, cholesky_mo_num*nV, 1.d0, &
tau_kau, cholesky_mo_num*nV, cc_space_v_vo_chol, cholesky_mo_num*nV, &
1.d0, H_oo, nO)
end
! H_vv
subroutine compute_H_vv_chol(nO,nV,tau_x,H_vv)
implicit none
integer, intent(in) :: nO,nV
double precision, intent(in) :: tau_x(nO, nO, nV, nV)
double precision, intent(out) :: H_vv(nV, nV)
integer :: a,b,i,j,u,k, beta
double precision, allocatable :: tau_kia(:,:,:), tmp_oov(:,:,:)
allocate(tau_kia(cholesky_mo_num,nO,nV))
!$omp parallel &
!$omp default(shared) &
!$omp private(i,beta,j,k,a,b,tmp_oov)
allocate(tmp_oov(nO,nO,nV) )
!$omp do
do a = 1, nV
do b=1,nV
do j=1,nO
do i=1,nO
tmp_oov(i,j,b) = tau_x(i,j,a,b)
enddo
enddo
enddo
call dgemm('N','T',cholesky_mo_num,nO,nO*nV,1.d0, &
cc_space_v_ov_chol, cholesky_mo_num, tmp_oov, nO, &
0.d0, tau_kia(1,1,a), cholesky_mo_num)
enddo
!$omp end do nowait
deallocate(tmp_oov)
!$omp do
do beta = 1, nV
do a = 1, nV
H_vv(a,beta) = cc_space_f_vv(a,beta)
enddo
enddo
!$omp end do nowait
!$omp barrier
!$omp end parallel
call dgemm('T', 'N', nV, nV, cholesky_mo_num*nO, -1.d0, &
tau_kia, cholesky_mo_num*nO, cc_space_v_ov_chol, cholesky_mo_num*nO, &
1.d0, H_vv, nV)
end
! H_vo
subroutine compute_H_vo_chol(nO,nV,t1,H_vo)
implicit none
integer, intent(in) :: nO,nV
double precision, intent(in) :: t1(nO, nV)
double precision, intent(out) :: H_vo(nV, nO)
integer :: a,b,i,j,u,k
double precision, allocatable :: tmp_k(:), tmp(:,:,:), tmp2(:,:,:)
do i=1,nO
do a=1,nV
H_vo(a,i) = cc_space_f_vo(a,i)
enddo
enddo
allocate(tmp_k(cholesky_mo_num))
call dgemm('N', 'N', cholesky_mo_num, 1, nO*nV, 2.d0, &
cc_space_v_ov_chol, cholesky_mo_num, &
t1, nO*nV, 0.d0, tmp_k, cholesky_mo_num)
call dgemm('T','N',nV*nO,1,cholesky_mo_num,1.d0, &
cc_space_v_vo_chol, cholesky_mo_num, tmp_k, cholesky_mo_num, 1.d0, &
H_vo, nV*nO)
deallocate(tmp_k)
allocate(tmp(cholesky_mo_num,nO,nO))
allocate(tmp2(cholesky_mo_num,nO,nO))
call dgemm('N','T', cholesky_mo_num*nO, nO, nV, 1.d0, &
cc_space_v_ov_chol, cholesky_mo_num*nO, t1, nO, 0.d0, tmp, cholesky_mo_num*nO)
do i=1,nO
do j=1,nO
do k=1,cholesky_mo_num
tmp2(k,j,i) = tmp(k,i,j)
enddo
enddo
enddo
deallocate(tmp)
call dgemm('T','N', nV, nO, cholesky_mo_num*nO, -1.d0, &
cc_space_v_ov_chol, cholesky_mo_num*nO, tmp2, cholesky_mo_num*nO, &
1.d0, H_vo, nV)
end
subroutine compute_H_oo_chol2(nO,nV,tau_x,H_oo)
implicit none
integer, intent(in) :: nO,nV
double precision, intent(in) :: tau_x(nO, nO, nV, nV)
double precision, intent(out) :: H_oo(nO, nO)
integer :: a,b,i,j,u,k
double precision, allocatable :: tau_kau(:,:,:), tmp_vov(:,:,:), tmp_ovv(:,:,:)
allocate(tau_kau(cholesky_mo_num,nV,nO))
allocate(tmp_vov(nV,nO,nV) )
allocate(tmp_ovv(nO,nV,nV) )
do u = 1, nO
call dcopy(nO*nV*nV, tau_x(u,1,1,1), nO, tmp_ovv, 1)
print *, u
print *, tmp_ovv
do b=1,nV
do j=1,nO
do a=1,nV
tmp_vov(a,j,b) = tmp_ovv(j,a,b)
enddo
enddo
enddo
call dgemm('N','T',cholesky_mo_num,nV,nO*nV,1.d0, &
cc_space_v_ov_chol, cholesky_mo_num, tmp_vov, nV, &
0.d0, tau_kau(1,1,u), cholesky_mo_num)
enddo
deallocate(tmp_vov)
call dcopy(nO*nO, cc_space_f_oo, 1, H_oo, 1);
call dgemm('T', 'N', nO, nO, cholesky_mo_num*nV, 1.d0, &
tau_kau, cholesky_mo_num*nV, cc_space_v_vo_chol, cholesky_mo_num*nV, &
1.d0, H_oo, nO)
end

View File

@ -11,8 +11,7 @@ void gpu_upload(gpu_data* data,
double* t1,
double* t2,
double* tau,
double* tau_x,
double* H_vv)
double* tau_x)
{
int lda;
const int cholesky_mo_num = data->cholesky_mo_num;
@ -40,10 +39,6 @@ void gpu_upload(gpu_data* data,
double* d_t2 = data[igpu].t2;
lda = nO*nO;
cublasSetMatrix(nO*nO, nV*nV, sizeof(double), t2, lda, d_t2, lda);
double* d_H_vv = data[igpu].H_vv;
lda = nV;
cublasSetMatrix(nV, nV, sizeof(double), H_vv, lda, d_H_vv, lda);
}
}
@ -1353,6 +1348,7 @@ void compute_h_oo_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_o
{
int ngpus = 1;
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
igpu = igpu % ngpus;
const int cholesky_mo_num = data[igpu].cholesky_mo_num;
cudaSetDevice(igpu);
@ -1382,13 +1378,13 @@ void compute_h_oo_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_o
double* d_tmp_vov;
cudaMalloc((void **)&d_tmp_vov, nV*nO*nV * sizeof(double));
for (int i=0 ; i<nV ; ++i) {
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0;
beta = 0.0;
for (int u=0 ; u<nO ; ++u) {
cublasDcopy(handle, nO*nV*nV, &(d_tau_x[u]), nO, d_tmp_ovv, 1);
for (int i=0 ; i<nV ; ++i) {
cudaStreamCreate(&(stream[i]));
}
for (int b=0 ; b<nV ; ++b) {
cublasSetStream(handle, stream[b]);
A = &(d_tmp_ovv[nO*nV*b]); lda = nO;
@ -1396,9 +1392,7 @@ void compute_h_oo_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_o
C = &(d_tmp_vov[nV*nO*b]); ldc = nV;
cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, nV, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
for (int i=0 ; i<nV ; ++i) {
cudaStreamDestroy(stream[i]);
}
cudaDeviceSynchronize();
cublasSetStream(handle, NULL);
alpha = 1.0;
beta = 0.0;
@ -1408,6 +1402,9 @@ void compute_h_oo_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_o
C=&(d_tau_kau[cholesky_mo_num*nV*u]); ldc=cholesky_mo_num;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
}
for (int i=0 ; i<nV ; ++i) {
cudaStreamDestroy(stream[i]);
}
cudaFree(d_tmp_vov);
cudaFree(d_tmp_ovv);
@ -1437,3 +1434,191 @@ void compute_h_oo_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_o
cublasDestroy(handle);
}
void compute_h_vv_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_vv)
{
int ngpus = 1;
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
igpu = igpu % ngpus;
const int cholesky_mo_num = data[igpu].cholesky_mo_num;
cudaSetDevice(igpu);
int m,n,k, lda, ldb, ldc;
double alpha, beta;
double* A;
double* B;
double* C;
cudaStream_t stream[nV];
cublasHandle_t handle;
cublasCreate(&handle);
double* d_H_vv = data[igpu].H_vv;
double* d_tau_x = data[igpu].tau_x;
double* d_cc_space_f_vv = data[igpu].cc_space_f_vv;
double* d_cc_space_v_ov_chol = data[igpu].cc_space_v_ov_chol;
double* d_tau_kia;
cudaMalloc((void **)&d_tau_kia, cholesky_mo_num*nO*nV * sizeof(double));
double* d_tmp_oov;
cudaMalloc((void **)&d_tmp_oov, nO*nO*nV * sizeof(double));
alpha = 1.0;
beta = 0.0;
for (int i=0 ; i<nV ; ++i) {
cudaStreamCreate(&(stream[i]));
}
for (int a=0 ; a<nV ; ++a) {
for (int b=0 ; b<nV ; ++b) {
cublasSetStream(handle, stream[b]);
cublasDcopy(handle, nO*nO, &(d_tau_x[nO*nO*(a+nV*b)]), 1, &(d_tmp_oov[nO*nO*b]), 1);
}
cudaDeviceSynchronize();
cublasSetStream(handle, NULL);
alpha = 1.0;
beta = 0.0;
m=cholesky_mo_num; n=nO; k=nO*nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num;
B=d_tmp_oov; ldb=nO;
C=&(d_tau_kia[cholesky_mo_num*nO*a]); ldc=cholesky_mo_num;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
}
for (int i=0 ; i<nV ; ++i) {
cudaStreamDestroy(stream[i]);
}
cudaFree(d_tmp_oov);
cublasDcopy(handle, nV*nV, d_cc_space_f_vv, 1, d_H_vv, 1);
alpha = -1.0;
beta = 1.0;
m=nV; n=nV; k=cholesky_mo_num*nO;
A=d_tau_kia; lda=cholesky_mo_num*nO;
B=d_cc_space_v_ov_chol; ldb=cholesky_mo_num*nO;
C=d_H_vv; ldc=nV;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_tau_kia);
// double* H_vv = malloc(nO*nO*sizeof(double));
cublasGetMatrix(nV, nV, sizeof(double), d_H_vv, nV, H_vv, nV);
for (int i=0 ; i<ngpus ; ++i) {
if (i != igpu) {
double* d_H_vv = data[i].H_vv;
cudaSetDevice(i);
cublasSetMatrix(nV, nV, sizeof(double), H_vv, nV, d_H_vv, nV);
}
}
// free(H_vv);
cublasDestroy(handle);
}
void compute_h_vo_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_vo)
{
int ngpus = 1;
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
igpu = igpu % ngpus;
const int cholesky_mo_num = data[igpu].cholesky_mo_num;
cudaSetDevice(igpu);
int m,n,k, lda, ldb, ldc;
double alpha, beta;
double* A;
double* B;
double* C;
cudaStream_t stream[nV];
cublasHandle_t handle;
cublasCreate(&handle);
double* d_t1 = data[igpu].t1;
double* d_H_vo = data[igpu].H_vo;
double* d_tau_x = data[igpu].tau_x;
double* d_cc_space_f_vo = data[igpu].cc_space_f_vo;
double* d_cc_space_v_ov_chol = data[igpu].cc_space_v_ov_chol;
double* d_cc_space_v_vo_chol = data[igpu].cc_space_v_vo_chol;
cublasDcopy(handle, nV*nO, d_cc_space_f_vo, 1, d_H_vo, 1);
double* d_tmp_k;
cudaMalloc((void **)&d_tmp_k, cholesky_mo_num * sizeof(double));
alpha = 2.0;
beta = 0.0;
m=cholesky_mo_num; n=1; k=nO*nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num;
B=d_t1; ldb=nO*nV;
C=d_tmp_k; ldc=cholesky_mo_num;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
alpha = 1.0;
beta = 1.0;
m=nV*nO; n=1; k=cholesky_mo_num;
A=d_cc_space_v_vo_chol; lda=cholesky_mo_num;
B=d_tmp_k; ldb=cholesky_mo_num;
C=d_H_vo; ldc=nV*nO;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_tmp_k);
double* d_tmp;
cudaMalloc((void **)&d_tmp, cholesky_mo_num*nO*nO * sizeof(double));
alpha = 1.0;
beta = 0.0;
m=cholesky_mo_num*nO; n=nO; k=nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num*nO;
B=d_t1; ldb=nO;
C=d_tmp; ldc=cholesky_mo_num*nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
double* d_tmp2;
cudaMalloc((void **)&d_tmp2, cholesky_mo_num*nO*nO * sizeof(double));
for (int i=0 ; i<nV ; ++i) {
cudaStreamCreate(&(stream[i]));
}
for (int i=0 ; i<nO ; ++i) {
for (int j=0 ; j<nO ; ++j) {
cublasSetStream(handle, stream[j]);
cublasDcopy(handle, cholesky_mo_num, &(d_tmp [cholesky_mo_num*(i+nO*j)]), 1,
&(d_tmp2[cholesky_mo_num*(j+nO*i)]), 1);
}
}
for (int i=0 ; i<nV ; ++i) {
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
alpha = -1.0;
beta = 1.0;
m=nV; n=nO; k=cholesky_mo_num*nO;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num*nO;
B=d_tmp2; ldb=cholesky_mo_num*nO;
C=d_H_vo; ldc=nV;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
// double* H_vo = malloc(nO*nO*sizeof(double));
cublasGetMatrix(nV, nO, sizeof(double), d_H_vo, nV, H_vo, nV);
for (int i=0 ; i<ngpus ; ++i) {
if (i != igpu) {
double* d_H_vo = data[i].H_vo;
cudaSetDevice(i);
cublasSetMatrix(nV, nO, sizeof(double), H_vo, nV, d_H_vo, nV);
}
}
// free(H_vo);
cublasDestroy(handle);
}

View File

@ -19,6 +19,7 @@ typedef struct {
double* t1;
double* t2;
double* H_oo;
double* H_vo;
double* H_vv;
int nO;
int nV;

View File

@ -108,19 +108,18 @@ gpu_data* gpu_init(
cudaMalloc((void **)&d_tau_x, lda * nV * nV * sizeof(double));
double* d_t1;
lda = nO;
cudaMalloc((void **)&d_t1, nO * nV * sizeof(double));
double* d_t2;
lda = nO*nO;
cudaMalloc((void **)&d_t2, nO*nO*nV*nV * sizeof(double));
double* d_H_oo;
lda = nO;
cudaMalloc((void **)&d_H_oo, nO * nO * sizeof(double));
double* d_H_vo;
cudaMalloc((void **)&d_H_vo, nV * nO * sizeof(double));
double* d_H_vv;
lda = nV;
cudaMalloc((void **)&d_H_vv, nV * nV * sizeof(double));
data[igpu].cc_space_v_oo_chol = d_cc_space_v_oo_chol;
@ -143,6 +142,7 @@ gpu_data* gpu_init(
data[igpu].t1 = d_t1;
data[igpu].t2 = d_t2;
data[igpu].H_oo = d_H_oo;
data[igpu].H_vo = d_H_vo;
data[igpu].H_vv = d_H_vv;
data[igpu].nO = nO;

View File

@ -28,7 +28,7 @@ module gpu_module
real(c_double), intent(in) :: cc_space_f_vv(nV,nV)
end function
subroutine gpu_upload(gpu_data, nO, nV, t1, t2, tau, tau_x, H_vv) bind(C)
subroutine gpu_upload(gpu_data, nO, nV, t1, t2, tau, tau_x) bind(C)
import c_int, c_double, c_ptr
type(c_ptr), value :: gpu_data
integer(c_int), intent(in), value :: nO, nV
@ -36,7 +36,6 @@ module gpu_module
real(c_double), intent(in) :: t2(nO,nO,nV,nV)
real(c_double), intent(in) :: tau(nO,nO,nV,nV)
real(c_double), intent(in) :: tau_x(nO,nO,nV,nV)
real(c_double), intent(in) :: H_vv(nV,nV)
end subroutine
subroutine compute_H_oo_chol_gpu(gpu_data, nO, nV, igpu, H_oo) bind(C)
@ -46,6 +45,20 @@ module gpu_module
real(c_double), intent(out) :: H_oo(nO,nO)
end subroutine
subroutine compute_H_vo_chol_gpu(gpu_data, nO, nV, igpu, H_vo) bind(C)
import c_int, c_double, c_ptr
type(c_ptr), value :: gpu_data
integer(c_int), intent(in), value :: nO, nV, igpu
real(c_double), intent(out) :: H_vo(nV,nO)
end subroutine
subroutine compute_H_vv_chol_gpu(gpu_data, nO, nV, igpu, H_vv) bind(C)
import c_int, c_double, c_ptr
type(c_ptr), value :: gpu_data
integer(c_int), intent(in), value :: nO, nV, igpu
real(c_double), intent(out) :: H_vv(nO,nO)
end subroutine
subroutine compute_r2_space_chol_gpu(gpu_data, nO, nV, t1, r2, max_r2) bind(C)
import c_int, c_double, c_ptr
type(c_ptr), value :: gpu_data