1
0
mirror of https://gitlab.com/scemama/qp_plugins_scemama.git synced 2024-11-07 06:33:40 +01:00

H_vo on GPU

This commit is contained in:
Anthony Scemama 2023-08-04 12:33:52 +02:00
parent 98948f1947
commit 8c6fb17a8d
6 changed files with 142 additions and 83 deletions

View File

@ -123,25 +123,22 @@ subroutine run_ccsd_space_orb
cc_space_v_oovo, cc_space_v_ovvo, cc_space_v_ovov, cc_space_v_ovoo, & cc_space_v_oovo, cc_space_v_ovvo, cc_space_v_ovov, cc_space_v_ovoo, &
cc_space_f_oo, cc_space_f_vo, cc_space_f_vv) cc_space_f_oo, cc_space_f_vo, cc_space_f_vv)
if (.not.do_ao_cholesky) then
print *, 'ao_choleky is required'
stop -1
endif
do while (not_converged) do while (not_converged)
! Residue ! Residue
if (do_ao_cholesky) then call gpu_upload(gpu_data, nO, nV, t1, t2, tau, tau_x);
call compute_H_vo_chol(nO,nV,t1,H_vo) call compute_H_oo_chol_gpu(gpu_data,nO,nV,0,H_oo)
call gpu_upload(gpu_data, nO, nV, t1, t2, tau, tau_x); call compute_H_vo_chol_gpu(gpu_data,nO,nV,1,H_vo)
call compute_H_oo_chol_gpu(gpu_data,nO,nV,0,H_oo) call compute_H_vv_chol_gpu(gpu_data,nO,nV,2,H_vv)
call compute_H_vv_chol_gpu(gpu_data,nO,nV,0,H_vv)
call compute_r1_space_chol(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r1,max_r1) call compute_r1_space_chol(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r1,max_r1)
call compute_r2_space_chol_gpu(gpu_data, nO, nV, t1, r2, max_r2) call compute_r2_space_chol_gpu(gpu_data, nO, nV, t1, r2, max_r2)
else
call compute_H_oo(nO,nV,t1,t2,tau,H_oo)
call compute_H_vv(nO,nV,t1,t2,tau,H_vv)
call compute_H_vo(nO,nV,t1,t2,H_vo)
call compute_r1_space(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r1,max_r1)
call compute_r2_space(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r2,max_r2)
endif
max_r = max(max_r1,max_r2) max_r = max(max_r1,max_r2)
! Update ! Update

View File

@ -290,53 +290,3 @@ subroutine compute_r1_space_chol(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r1,max_r1)
end end
! H_vo
subroutine compute_H_vo_chol(nO,nV,t1,H_vo)
implicit none
integer, intent(in) :: nO,nV
double precision, intent(in) :: t1(nO, nV)
double precision, intent(out) :: H_vo(nV, nO)
integer :: a,b,i,j,u,k
double precision, allocatable :: tmp_k(:), tmp(:,:,:), tmp2(:,:,:)
do i=1,nO
do a=1,nV
H_vo(a,i) = cc_space_f_vo(a,i)
enddo
enddo
allocate(tmp_k(cholesky_mo_num))
call dgemm('N', 'N', cholesky_mo_num, 1, nO*nV, 2.d0, &
cc_space_v_ov_chol, cholesky_mo_num, &
t1, nO*nV, 0.d0, tmp_k, cholesky_mo_num)
call dgemm('T','N',nV*nO,1,cholesky_mo_num,1.d0, &
cc_space_v_vo_chol, cholesky_mo_num, tmp_k, cholesky_mo_num, 1.d0, &
H_vo, nV*nO)
deallocate(tmp_k)
allocate(tmp(cholesky_mo_num,nO,nO))
allocate(tmp2(cholesky_mo_num,nO,nO))
call dgemm('N','T', cholesky_mo_num*nO, nO, nV, 1.d0, &
cc_space_v_ov_chol, cholesky_mo_num*nO, t1, nO, 0.d0, tmp, cholesky_mo_num*nO)
do i=1,nO
do j=1,nO
do k=1,cholesky_mo_num
tmp2(k,j,i) = tmp(k,i,j)
enddo
enddo
enddo
deallocate(tmp)
call dgemm('T','N', nV, nO, cholesky_mo_num*nO, -1.d0, &
cc_space_v_ov_chol, cholesky_mo_num*nO, tmp2, cholesky_mo_num*nO, &
1.d0, H_vo, nV)
end

View File

@ -1378,13 +1378,13 @@ void compute_h_oo_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_o
double* d_tmp_vov; double* d_tmp_vov;
cudaMalloc((void **)&d_tmp_vov, nV*nO*nV * sizeof(double)); cudaMalloc((void **)&d_tmp_vov, nV*nO*nV * sizeof(double));
for (int i=0 ; i<nV ; ++i) {
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0; alpha = 1.0;
beta = 0.0; beta = 0.0;
for (int u=0 ; u<nO ; ++u) { for (int u=0 ; u<nO ; ++u) {
cublasDcopy(handle, nO*nV*nV, &(d_tau_x[u]), nO, d_tmp_ovv, 1); cublasDcopy(handle, nO*nV*nV, &(d_tau_x[u]), nO, d_tmp_ovv, 1);
for (int i=0 ; i<nV ; ++i) {
cudaStreamCreate(&(stream[i]));
}
for (int b=0 ; b<nV ; ++b) { for (int b=0 ; b<nV ; ++b) {
cublasSetStream(handle, stream[b]); cublasSetStream(handle, stream[b]);
A = &(d_tmp_ovv[nO*nV*b]); lda = nO; A = &(d_tmp_ovv[nO*nV*b]); lda = nO;
@ -1392,9 +1392,7 @@ void compute_h_oo_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_o
C = &(d_tmp_vov[nV*nO*b]); ldc = nV; C = &(d_tmp_vov[nV*nO*b]); ldc = nV;
cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, nV, nO, &alpha, A, lda, &beta, B, ldb, C, ldc); cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, nV, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
} }
for (int i=0 ; i<nV ; ++i) { cudaDeviceSynchronize();
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL); cublasSetStream(handle, NULL);
alpha = 1.0; alpha = 1.0;
beta = 0.0; beta = 0.0;
@ -1404,6 +1402,9 @@ void compute_h_oo_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_o
C=&(d_tau_kau[cholesky_mo_num*nV*u]); ldc=cholesky_mo_num; C=&(d_tau_kau[cholesky_mo_num*nV*u]); ldc=cholesky_mo_num;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
} }
for (int i=0 ; i<nV ; ++i) {
cudaStreamDestroy(stream[i]);
}
cudaFree(d_tmp_vov); cudaFree(d_tmp_vov);
cudaFree(d_tmp_ovv); cudaFree(d_tmp_ovv);
@ -1469,16 +1470,16 @@ void compute_h_vv_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_v
alpha = 1.0; alpha = 1.0;
beta = 0.0; beta = 0.0;
// for (int i=0 ; i<nV ; ++i) { for (int i=0 ; i<nV ; ++i) {
// cudaStreamCreate(&(stream[i])); cudaStreamCreate(&(stream[i]));
// } }
for (int a=0 ; a<nV ; ++a) { for (int a=0 ; a<nV ; ++a) {
for (int b=0 ; b<nV ; ++b) { for (int b=0 ; b<nV ; ++b) {
// cublasSetStream(handle, stream[b]); cublasSetStream(handle, stream[b]);
cublasDcopy(handle, nO*nO, &(d_tau_x[nO*nO*(a+nV*b)]), 1, &(d_tmp_oov[nO*nO*b]), 1); cublasDcopy(handle, nO*nO, &(d_tau_x[nO*nO*(a+nV*b)]), 1, &(d_tmp_oov[nO*nO*b]), 1);
} }
// cudaDeviceSynchronize(); cudaDeviceSynchronize();
// cublasSetStream(handle, NULL); cublasSetStream(handle, NULL);
alpha = 1.0; alpha = 1.0;
beta = 0.0; beta = 0.0;
m=cholesky_mo_num; n=nO; k=nO*nV; m=cholesky_mo_num; n=nO; k=nO*nV;
@ -1487,9 +1488,9 @@ void compute_h_vv_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_v
C=&(d_tau_kia[cholesky_mo_num*nO*a]); ldc=cholesky_mo_num; C=&(d_tau_kia[cholesky_mo_num*nO*a]); ldc=cholesky_mo_num;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
} }
// for (int i=0 ; i<nV ; ++i) { for (int i=0 ; i<nV ; ++i) {
// cudaStreamDestroy(stream[i]); cudaStreamDestroy(stream[i]);
// } }
cudaFree(d_tmp_oov); cudaFree(d_tmp_oov);
cublasDcopy(handle, nV*nV, d_cc_space_f_vv, 1, d_H_vv, 1); cublasDcopy(handle, nV*nV, d_cc_space_f_vv, 1, d_H_vv, 1);
@ -1518,3 +1519,106 @@ void compute_h_vv_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_v
} }
void compute_h_vo_chol_gpu(gpu_data* data, int nO, int nV, int igpu, double* H_vo)
{
int ngpus = 1;
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
igpu = igpu % ngpus;
const int cholesky_mo_num = data[igpu].cholesky_mo_num;
cudaSetDevice(igpu);
int m,n,k, lda, ldb, ldc;
double alpha, beta;
double* A;
double* B;
double* C;
cudaStream_t stream[nV];
cublasHandle_t handle;
cublasCreate(&handle);
double* d_t1 = data[igpu].t1;
double* d_H_vo = data[igpu].H_vo;
double* d_tau_x = data[igpu].tau_x;
double* d_cc_space_f_vo = data[igpu].cc_space_f_vo;
double* d_cc_space_v_ov_chol = data[igpu].cc_space_v_ov_chol;
double* d_cc_space_v_vo_chol = data[igpu].cc_space_v_vo_chol;
cublasDcopy(handle, nV*nO, d_cc_space_f_vo, 1, d_H_vo, 1);
double* d_tmp_k;
cudaMalloc((void **)&d_tmp_k, cholesky_mo_num * sizeof(double));
alpha = 2.0;
beta = 0.0;
m=cholesky_mo_num; n=1; k=nO*nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num;
B=d_t1; ldb=nO*nV;
C=d_tmp_k; ldc=cholesky_mo_num;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
alpha = 1.0;
beta = 1.0;
m=nV*nO; n=1; k=cholesky_mo_num;
A=d_cc_space_v_vo_chol; lda=cholesky_mo_num;
B=d_tmp_k; ldb=cholesky_mo_num;
C=d_H_vo; ldc=nV*nO;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_tmp_k);
double* d_tmp;
cudaMalloc((void **)&d_tmp, cholesky_mo_num*nO*nO * sizeof(double));
alpha = 1.0;
beta = 0.0;
m=cholesky_mo_num*nO; n=nO; k=nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num*nO;
B=d_t1; ldb=nO;
C=d_tmp; ldc=cholesky_mo_num*nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
double* d_tmp2;
cudaMalloc((void **)&d_tmp2, cholesky_mo_num*nO*nO * sizeof(double));
for (int i=0 ; i<nV ; ++i) {
cudaStreamCreate(&(stream[i]));
}
for (int i=0 ; i<nO ; ++i) {
for (int j=0 ; j<nO ; ++j) {
cublasSetStream(handle, stream[j]);
cublasDcopy(handle, cholesky_mo_num, &(d_tmp [cholesky_mo_num*(i+nO*j)]), 1,
&(d_tmp2[cholesky_mo_num*(j+nO*i)]), 1);
}
}
for (int i=0 ; i<nV ; ++i) {
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
alpha = -1.0;
beta = 1.0;
m=nV; n=nO; k=cholesky_mo_num*nO;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num*nO;
B=d_tmp2; ldb=cholesky_mo_num*nO;
C=d_H_vo; ldc=nV;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
// double* H_vo = malloc(nO*nO*sizeof(double));
cublasGetMatrix(nV, nO, sizeof(double), d_H_vo, nV, H_vo, nV);
for (int i=0 ; i<ngpus ; ++i) {
if (i != igpu) {
double* d_H_vo = data[i].H_vo;
cudaSetDevice(i);
cublasSetMatrix(nV, nO, sizeof(double), H_vo, nV, d_H_vo, nV);
}
}
// free(H_vo);
cublasDestroy(handle);
}

View File

@ -19,6 +19,7 @@ typedef struct {
double* t1; double* t1;
double* t2; double* t2;
double* H_oo; double* H_oo;
double* H_vo;
double* H_vv; double* H_vv;
int nO; int nO;
int nV; int nV;

View File

@ -108,19 +108,18 @@ gpu_data* gpu_init(
cudaMalloc((void **)&d_tau_x, lda * nV * nV * sizeof(double)); cudaMalloc((void **)&d_tau_x, lda * nV * nV * sizeof(double));
double* d_t1; double* d_t1;
lda = nO;
cudaMalloc((void **)&d_t1, nO * nV * sizeof(double)); cudaMalloc((void **)&d_t1, nO * nV * sizeof(double));
double* d_t2; double* d_t2;
lda = nO*nO;
cudaMalloc((void **)&d_t2, nO*nO*nV*nV * sizeof(double)); cudaMalloc((void **)&d_t2, nO*nO*nV*nV * sizeof(double));
double* d_H_oo; double* d_H_oo;
lda = nO;
cudaMalloc((void **)&d_H_oo, nO * nO * sizeof(double)); cudaMalloc((void **)&d_H_oo, nO * nO * sizeof(double));
double* d_H_vo;
cudaMalloc((void **)&d_H_vo, nV * nO * sizeof(double));
double* d_H_vv; double* d_H_vv;
lda = nV;
cudaMalloc((void **)&d_H_vv, nV * nV * sizeof(double)); cudaMalloc((void **)&d_H_vv, nV * nV * sizeof(double));
data[igpu].cc_space_v_oo_chol = d_cc_space_v_oo_chol; data[igpu].cc_space_v_oo_chol = d_cc_space_v_oo_chol;
@ -143,6 +142,7 @@ gpu_data* gpu_init(
data[igpu].t1 = d_t1; data[igpu].t1 = d_t1;
data[igpu].t2 = d_t2; data[igpu].t2 = d_t2;
data[igpu].H_oo = d_H_oo; data[igpu].H_oo = d_H_oo;
data[igpu].H_vo = d_H_vo;
data[igpu].H_vv = d_H_vv; data[igpu].H_vv = d_H_vv;
data[igpu].nO = nO; data[igpu].nO = nO;

View File

@ -45,6 +45,13 @@ module gpu_module
real(c_double), intent(out) :: H_oo(nO,nO) real(c_double), intent(out) :: H_oo(nO,nO)
end subroutine end subroutine
subroutine compute_H_vo_chol_gpu(gpu_data, nO, nV, igpu, H_vo) bind(C)
import c_int, c_double, c_ptr
type(c_ptr), value :: gpu_data
integer(c_int), intent(in), value :: nO, nV, igpu
real(c_double), intent(out) :: H_vo(nV,nO)
end subroutine
subroutine compute_H_vv_chol_gpu(gpu_data, nO, nV, igpu, H_vv) bind(C) subroutine compute_H_vv_chol_gpu(gpu_data, nO, nV, igpu, H_vv) bind(C)
import c_int, c_double, c_ptr import c_int, c_double, c_ptr
type(c_ptr), value :: gpu_data type(c_ptr), value :: gpu_data