1
0
mirror of https://gitlab.com/scemama/qp_plugins_scemama.git synced 2024-06-02 11:25:23 +02:00

Multi-GPU

This commit is contained in:
Anthony Scemama 2023-07-16 18:05:37 +02:00
parent ed440c16a2
commit c57f940eb3

View File

@ -4,7 +4,6 @@
#include <cublas_v2.h>
#include <cuda_runtime.h>
#define NGPUS 2
#define BLOCK_SIZE 16
void dgemm_(char*, char*, int*, int*, int*, double*, double*, int*, double*, int*,
@ -66,34 +65,31 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo
double* cc_space_v_vv_chol,
double* r2)
{
double* d_tau;
double* d_r2;
double* d_cc_space_v_vv_chol;
double* d_cc_space_v_vo_chol;
double* d_t1;
double* d_tmp_cc;
int ngpus;
cudaGetDeviceCount(&ngpus);
#pragma omp parallel num_threads(ngpus)
{
int m,n,k, lda, ldb, ldc;
double alpha, beta;
double* A;
double* B;
double* C;
double* d_taus[NGPUS];
double* d_r2s[NGPUS];
double* d_cc_space_v_vv_chols[NGPUS];
double* d_cc_space_v_vo_chols[NGPUS];
double* d_t1s[NGPUS];
double* d_tmp_ccs[NGPUS];
cublasHandle_t handles[NGPUS];
#pragma omp parallel
{
int ithread = omp_get_thread_num();
int igpu = ithread % NGPUS;
int igpu = ithread ;
cudaSetDevice(igpu);
cublasHandle_t handle;
if (ithread < NGPUS) {
cublasCreate(&handles[ithread]);
}
#pragma omp barrier
cublasHandle_t handle = handles[igpu];
cublasCreate(&handle);
double* d_tau;
double* d_r2;
@ -102,49 +98,32 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo
double* d_t1;
double* d_tmp_cc;
if (ithread < NGPUS) {
lda = nO * nO;
cudaMalloc((void **)&d_tau, lda * nV * nV * sizeof(double));
cublasSetMatrix(nO*nO, nV*nV, sizeof(double), tau, lda, d_tau, lda);
d_taus[igpu] = d_tau;
lda = nO * nO;
cudaMalloc((void **)&d_r2, lda * nV * nV * sizeof(double));
d_r2s[igpu] = d_r2;
lda = cholesky_mo_num * nV;
cudaMalloc((void **)&d_cc_space_v_vv_chol, lda * nV * sizeof(double));
cublasSetMatrix(cholesky_mo_num*nV, nV, sizeof(double), cc_space_v_vv_chol, lda, d_cc_space_v_vv_chol, lda);
d_cc_space_v_vv_chols[igpu] = d_cc_space_v_vv_chol;
lda = cholesky_mo_num * nV;
cudaMalloc((void **)&d_cc_space_v_vo_chol, lda * nO * sizeof(double));
cublasSetMatrix(cholesky_mo_num*nV, nO, sizeof(double), cc_space_v_vo_chol, lda, d_cc_space_v_vo_chol, lda);
d_cc_space_v_vo_chols[igpu] = d_cc_space_v_vo_chol;
lda = nO;
cudaMalloc((void **)&d_t1, nO * nV * sizeof(double));
cublasSetMatrix(nO, nV, sizeof(double), t1, lda, d_t1, lda);
d_t1s[igpu] = d_t1;
lda = cholesky_mo_num * nV;
cudaMalloc((void **)&d_tmp_cc, lda * nV * sizeof(double));
d_tmp_ccs[igpu] = d_tmp_cc;
alpha=1.0; beta=0.0;
m=cholesky_mo_num*nV; n=nV; k=nO;
A = d_cc_space_v_vo_chol; B = d_t1; C = d_tmp_cc;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, m, B, k, &beta, C, m);
}
#pragma omp barrier
d_tau = d_taus[igpu] ;
d_r2 = d_r2s[igpu] ;
d_cc_space_v_vv_chol = d_cc_space_v_vv_chols[igpu] ;
d_cc_space_v_vo_chol = d_cc_space_v_vo_chols[igpu] ;
d_t1 = d_t1s[igpu] ;
d_tmp_cc = d_tmp_ccs[igpu] ;
double* d_tmp_cc2;
cudaMalloc((void **)&d_tmp_cc2, cholesky_mo_num*nV*sizeof(double));
@ -213,7 +192,6 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo
cudaFree(d_B1);
cudaFree(d_tmp_cc2);
if (igpu < NGPUS) {
cudaFree(d_cc_space_v_vo_chol);
cudaFree(d_cc_space_v_vv_chol);
cudaFree(d_tau);
@ -233,8 +211,6 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo
cudaFree(d_r2);
cublasDestroy(handle);
}
}
}