diff --git a/devel/ccsd_gpu/ccsd_space_orb_sub_chol.irp.f b/devel/ccsd_gpu/ccsd_space_orb_sub_chol.irp.f index ea8ba70..2c55310 100644 --- a/devel/ccsd_gpu/ccsd_space_orb_sub_chol.irp.f +++ b/devel/ccsd_gpu/ccsd_space_orb_sub_chol.irp.f @@ -456,71 +456,29 @@ subroutine compute_r2_space_chol(nO,nV,t1,t2,tau,H_oo,H_vv,H_vo,r2,max_r2) ! internal integer :: u,v,i,j,beta,gam,a,b double precision :: max_r2_local + double precision, allocatable :: Y_oovv(:,:,:,:) integer :: block_size, iblock, k block_size = 16 call set_multiple_levels_omp(.False.) - call compute_r2_space_chol_gpu(nO,nV,cholesky_mo_num,t1,tau, & - cc_space_v_vo_chol, cc_space_v_vv_chol, & + call compute_r2_space_chol_gpu(nO,nV,cholesky_mo_num,t1,t2,tau, & + cc_space_v_ov_chol, cc_space_v_vo_chol, cc_space_v_vv_chol, & cc_space_v_oooo, cc_space_v_vooo, cc_space_v_oovv, cc_space_v_vvoo, & - r2) + cc_space_f_vo, H_vv, r2) - double precision, allocatable :: X_oovv(:,:,:,:) - allocate(X_oovv(nO,nO,nV,nV)) - !$omp parallel & - !$omp shared(nO,nV,t2,X_oovv) & - !$omp private(u,v,gam,a) & - !$omp default(none) - !$omp do - do a = 1, nV - do gam = 1, nV - do v = 1, nO - do u = 1, nO - X_oovv(u,v,gam,a) = t2(u,v,gam,a) - enddo - enddo - enddo - enddo - !$omp end do - !$omp end parallel +! call compute_g_vir_chol_gpu(nO,nV,cholesky_mo_num,t1,t2,t2, & +! cc_space_v_ov_chol, cc_space_v_vo_chol, cc_space_v_vv_chol, & +! cc_space_v_oooo, cc_space_v_vooo, cc_space_v_oovv, cc_space_v_vvoo, & +! cc_space_f_vo, H_vv, r2) - double precision, allocatable :: g_vir(:,:) - allocate(g_vir(nV,nV)) - call compute_g_vir_chol(nO,nV,t1,t2,H_vv,g_vir) - - double precision, allocatable :: Y_oovv(:,:,:,:) - allocate(Y_oovv(nO,nO,nV,nV)) - - call dgemm('N','N',nO*nO*nV,nV,nV, & - 1d0, X_oovv, size(X_oovv,1) * size(X_oovv,2) * size(X_oovv,3), & - g_vir, size(g_vir,1), & - 0d0, Y_oovv, size(Y_oovv,1) * size(Y_oovv,2) * size(Y_oovv,3)) - deallocate(g_vir) - deallocate(X_oovv) - - !$omp parallel & - !$omp shared(nO,nV,r2,Y_oovv) & - !$omp private(u,v,gam,beta) & - !$omp default(none) - !$omp do - do gam = 1, nV - do beta = 1, nV - do v = 1, nO - do u = 1, nO - r2(u,v,beta,gam) = r2(u,v,beta,gam) + Y_oovv(u,v,beta,gam) + Y_oovv(v,u,gam,beta) - enddo - enddo - enddo - enddo - !$omp end do - !$omp end parallel - deallocate(Y_oovv) +!--- double precision, allocatable :: g_occ(:,:) allocate(g_occ(nO,nO)) call compute_g_occ_chol(nO,nV,t1,t2,H_oo,g_occ) + double precision, allocatable :: X_oovv(:,:,:,:) allocate(X_oovv(nO,nO,nV,nV)) call dgemm('N','N',nO,nO*nV*nV,nO, & 1d0, g_occ , size(g_occ,1), & @@ -982,7 +940,7 @@ end ! g_vir subroutine compute_g_vir_chol(nO,nV,t1,t2,H_vv,g_vir) - + use gpu_module implicit none integer, intent(in) :: nO,nV @@ -992,44 +950,44 @@ subroutine compute_g_vir_chol(nO,nV,t1,t2,H_vv,g_vir) integer :: a,tmp_a,b,k,l,c,d,tmp_c,tmp_d,i,j,u,v, beta, gam - call dgemm('N','N',nV,nV,nO, & - -1d0, cc_space_f_vo , size(cc_space_f_vo,1), & - t1 , size(t1,1), & - 0d0, g_vir, size(g_vir,1)) +! do beta = 1, nV +! do a = 1, nV +! g_vir(a,beta) = H_vv(a,beta) +! enddo +! enddo + +! call dgemm('N','N',nV,nV,nO, & +! -1d0, cc_space_f_vo , size(cc_space_f_vo,1), & +! t1 , size(t1,1), & +! 1d0, g_vir, size(g_vir,1)) double precision, allocatable :: tmp_k(:), tmp_vo(:,:,:), tmp_vo2(:,:,:) - allocate(tmp_k(cholesky_mo_num)) - call dgemm('N','N', cholesky_mo_num, 1, nO*nV, 1.d0, & - cc_space_v_ov_chol, cholesky_mo_num, t1, nO*nV, 0.d0, tmp_k, cholesky_mo_num) +! allocate(tmp_k(cholesky_mo_num)) +! call dgemm('N','N', cholesky_mo_num, 1, nO*nV, 1.d0, & +! cc_space_v_ov_chol, cholesky_mo_num, t1, nO*nV, 0.d0, tmp_k, cholesky_mo_num) +! +! call dgemm('T','N', nV*nV, 1, cholesky_mo_num, 2.d0, & +! cc_space_v_vv_chol, cholesky_mo_num, tmp_k, cholesky_mo_num, 1.d0, & +! g_vir, nV*nV) +! deallocate(tmp_k) - call dgemm('T','N', nV*nV, 1, cholesky_mo_num, 2.d0, & - cc_space_v_vv_chol, cholesky_mo_num, tmp_k, cholesky_mo_num, 1.d0, & - g_vir, nV*nV) - deallocate(tmp_k) - - allocate(tmp_vo(cholesky_mo_num,nV,nO)) - call dgemm('N','T',cholesky_mo_num*nV, nO, nV, 1.d0, & - cc_space_v_vv_chol, cholesky_mo_num*nV, t1, nO, 0.d0, tmp_vo, cholesky_mo_num*nV) - - allocate(tmp_vo2(cholesky_mo_num,nO,nV)) - do beta=1,nV - do i=1,nO - do k=1,cholesky_mo_num - tmp_vo2(k,i,beta) = -tmp_vo(k,beta,i) - enddo - enddo - enddo - deallocate(tmp_vo) - - do beta = 1, nV - do a = 1, nV - g_vir(a,beta) = g_vir(a,beta) + H_vv(a,beta) - enddo - enddo - - call dgemm('T','N', nV, nV, nO*cholesky_mo_num, 1.d0, & - cc_space_v_ov_chol, cholesky_mo_num*nO, & - tmp_vo2, cholesky_mo_num*nO, 1.d0, g_vir, nV) +! allocate(tmp_vo(cholesky_mo_num,nV,nO)) +! call dgemm('N','T',cholesky_mo_num*nV, nO, nV, 1.d0, & +! cc_space_v_vv_chol, cholesky_mo_num*nV, t1, nO, 0.d0, tmp_vo, cholesky_mo_num*nV) +! +! allocate(tmp_vo2(cholesky_mo_num,nO,nV)) +! do beta=1,nV +! do i=1,nO +! do k=1,cholesky_mo_num +! tmp_vo2(k,i,beta) = -tmp_vo(k,beta,i) +! enddo +! enddo +! enddo +! deallocate(tmp_vo) +! +! call dgemm('T','N', nV, nV, nO*cholesky_mo_num, 1.d0, & +! cc_space_v_ov_chol, cholesky_mo_num*nO, & +! tmp_vo2, cholesky_mo_num*nO, 1.d0, g_vir, nV) end diff --git a/devel/ccsd_gpu/gpu.c b/devel/ccsd_gpu/gpu.c index 0b89ab5..b2a835b 100644 --- a/devel/ccsd_gpu/gpu.c +++ b/devel/ccsd_gpu/gpu.c @@ -61,25 +61,22 @@ void gpu_dgemm(char transa, char transb, int m, int n, int k, double alpha, void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo_num, double* t1, + double* t2, double* tau, + double* cc_space_v_ov_chol, double* cc_space_v_vo_chol, double* cc_space_v_vv_chol, double* cc_space_v_oooo, double* cc_space_v_vooo, double* cc_space_v_oovv, double* cc_space_v_vvoo, + double* cc_space_f_vo, + double* H_vv, double* r2) { - double* d_tau; - double* d_r2; - double* d_cc_space_v_vv_chol; - double* d_cc_space_v_vo_chol; - double* d_t1; - double* d_tmp_cc; int ngpus; cudaGetDeviceCount(&ngpus); - #pragma omp parallel num_threads(ngpus) { int m,n,k, lda, ldb, ldc; @@ -100,7 +97,9 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo double* d_r2; double* d_cc_space_v_vv_chol; double* d_cc_space_v_vo_chol; + double* d_cc_space_v_ov_chol; double* d_t1; + double* d_t2; double* d_tmp_cc; lda = nO * nO; @@ -116,6 +115,10 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo cudaMalloc((void **)&d_cc_space_v_vv_chol, lda * nV * sizeof(double)); cublasSetMatrix(cholesky_mo_num*nV, nV, sizeof(double), cc_space_v_vv_chol, lda, d_cc_space_v_vv_chol, lda); + lda = cholesky_mo_num * nO; + cudaMalloc((void **)&d_cc_space_v_ov_chol, lda * nV * sizeof(double)); + cublasSetMatrix(cholesky_mo_num*nO, nV, sizeof(double), cc_space_v_ov_chol, lda, d_cc_space_v_ov_chol, lda); + lda = cholesky_mo_num * nV; cudaMalloc((void **)&d_cc_space_v_vo_chol, lda * nO * sizeof(double)); cublasSetMatrix(cholesky_mo_num*nV, nO, sizeof(double), cc_space_v_vo_chol, lda, d_cc_space_v_vo_chol, lda); @@ -124,6 +127,10 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo cudaMalloc((void **)&d_t1, nO * nV * sizeof(double)); cublasSetMatrix(nO, nV, sizeof(double), t1, lda, d_t1, lda); + lda = nO*nO; + cudaMalloc((void **)&d_t2, nO*nO*nV*nV * sizeof(double)); + cublasSetMatrix(nO*nO, nV*nV, sizeof(double), t2, lda, d_t2, lda); + #pragma omp sections { @@ -192,8 +199,109 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo C = d_r2; ldc = nO*nO; cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); cudaFree(d_A1); - } + + // g_vir + #pragma omp section + { + double* d_g_vir; + cudaMalloc((void**)&d_g_vir, nV*nV*sizeof(double)); + cublasSetMatrix(nV, nV, sizeof(double), H_vv, nV, d_g_vir, nV); + + double* d_cc_space_f_vo; + cudaMalloc((void**)&d_cc_space_f_vo, nV*nO*sizeof(double)); + cublasSetMatrix(nV, nO, sizeof(double), cc_space_f_vo, nV, d_cc_space_f_vo, nV); + + alpha = -1.0; + beta = 1.0; + m=nV ; n=nV; k=nO; + A = d_cc_space_f_vo ; lda = nV; + B = d_t1 ; ldb = nO; + C = d_g_vir; ldc = nV; + cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); + cudaFree(d_cc_space_f_vo); + + double* d_tmp_k; + cudaMalloc((void**)&d_tmp_k, cholesky_mo_num*sizeof(double)); + alpha = 1.0; + beta = 0.0; + m=cholesky_mo_num ; n=1; k=nO*nV; + A = d_cc_space_v_ov_chol; lda = cholesky_mo_num; + B = d_t1 ; ldb = nO*nV; + C = d_tmp_k; ldc = cholesky_mo_num; + cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); + + alpha = 2.0; + beta = 1.0; + m=nV*nV; n=1; k=cholesky_mo_num; + A = d_cc_space_v_vv_chol; lda = cholesky_mo_num; + B = d_tmp_k ; ldb = cholesky_mo_num; + C = d_g_vir; ldc = nV*nV; + cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); + cudaFree(d_tmp_k); + + double* d_tmp_vo; + cudaMalloc((void**)&d_tmp_vo, cholesky_mo_num*nV*nO*sizeof(double)); + alpha = 1.0; + beta = 0.0; + m=cholesky_mo_num*nV ; n=nO; k=nV; + A = d_cc_space_v_vv_chol; lda = cholesky_mo_num*nV; + B = d_t1 ; ldb = nO; + C = d_tmp_vo; ldc = cholesky_mo_num*nV; + cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); + + double* d_tmp_vo2; + cudaMalloc((void**)&d_tmp_vo2, cholesky_mo_num*nV*nO*sizeof(double)); + for (int i=0 ; i