mirror of
https://gitlab.com/scemama/qp_plugins_scemama.git
synced 2024-12-22 12:23:37 +01:00
GPU acceleration
This commit is contained in:
parent
5c1d3987a3
commit
7847ccc674
@ -24,8 +24,6 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo
|
||||
double* B;
|
||||
double* C;
|
||||
|
||||
double* tmp_cc = malloc(cholesky_mo_num*nV*nV*sizeof(double));
|
||||
|
||||
cublasHandle_t handle;
|
||||
cublasCreate(&handle);
|
||||
|
||||
@ -62,30 +60,15 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo
|
||||
m=cholesky_mo_num*nV; n=nV; k=nO;
|
||||
A = d_cc_space_v_vo_chol; B = d_t1; C = d_tmp_cc;
|
||||
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, m, B, k, &beta, C, m);
|
||||
cublasGetMatrix(m, n, sizeof(double), d_tmp_cc, lda, tmp_cc, lda);
|
||||
|
||||
// ---
|
||||
|
||||
/*
|
||||
m=cholesky_mo_num*nV; n=nV; k=nO;
|
||||
alpha=1.0; beta=0.0;
|
||||
lda=m ; ldb=k ; ldc=m;
|
||||
A=cc_space_v_vo_chol; B=t1; C=tmp_cc;
|
||||
dgemm_("N","N", &m, &n, &k, &alpha, A, &lda, B, &ldb, &beta, C, &ldc);
|
||||
*/
|
||||
|
||||
|
||||
#pragma omp parallel
|
||||
{
|
||||
double* tmp_cc2 = malloc(cholesky_mo_num*nV*sizeof(double));
|
||||
double* d_tmp_cc2;
|
||||
cudaMalloc((void **)&d_tmp_cc2, cholesky_mo_num*nV*sizeof(double));
|
||||
|
||||
double* B1 = malloc(nV*nV*BLOCK_SIZE*sizeof(double));
|
||||
double* d_B1;
|
||||
cudaMalloc((void**)&d_B1, nV*nV*BLOCK_SIZE*sizeof(double));
|
||||
|
||||
double* tmpB1 = malloc(nV*BLOCK_SIZE*nV*sizeof(double));
|
||||
double* d_tmpB1;
|
||||
cudaMalloc((void**)&d_tmpB1, nV*BLOCK_SIZE*nV*sizeof(double));
|
||||
|
||||
@ -121,8 +104,6 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo
|
||||
B=d_tmp_cc2; ldb=cholesky_mo_num;
|
||||
C=d_tmpB1 ; ldc=nV*BLOCK_SIZE;
|
||||
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, lda, &beta, C, ldc);
|
||||
// cublasGetMatrix(m, n, sizeof(double), d_tmpB1, ldc, tmpB1, ldc);
|
||||
|
||||
|
||||
for (size_t bet=iblock ; bet<(nV < iblock+BLOCK_SIZE ? nV : iblock+BLOCK_SIZE) ; ++bet) {
|
||||
|
||||
@ -132,20 +113,6 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo
|
||||
B = d_tmpB1; ldb = nV;
|
||||
C = &(d_B1[nV*nV*(bet-iblock)]) ; ldc = nV;
|
||||
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nV, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
|
||||
// for (size_t b=0 ; b<nV ; ++b) {
|
||||
/*
|
||||
cublasDcopy(handle, nV, &(d_tmpB1[nV*(bet-iblock + BLOCK_SIZE*b)]), 1,
|
||||
&(d_B1[nV*(b + nV*(bet-iblock))]),1);
|
||||
*/
|
||||
/*
|
||||
for (size_t a=0 ; a<nV ; ++a) {
|
||||
B1[a + nV*(b + nV*(bet-iblock))] =
|
||||
tmpB1[a + nV*(bet-iblock + BLOCK_SIZE*b)];
|
||||
}
|
||||
*/
|
||||
// }
|
||||
}
|
||||
// cublasSetMatrix(nV*nV, mbs, sizeof(double), B1, nV*nV, d_B1, nV*nV);
|
||||
|
||||
alpha=1.0; beta=1.0;
|
||||
m=nO*nO; n=mbs; k=nV*nV;
|
||||
@ -158,13 +125,11 @@ void compute_r2_space_chol_gpu(const int nO, const int nV, const int cholesky_mo
|
||||
}
|
||||
}
|
||||
|
||||
free(tmp_cc2);
|
||||
free(B1);
|
||||
free(tmpB1);
|
||||
}
|
||||
lda=nO*nO;
|
||||
cublasGetMatrix(nO*nO, nV*nV, sizeof(double), d_r2, lda, r2, lda);
|
||||
free(tmp_cc);
|
||||
|
||||
cudaFree(d_cc_space_v_vo_chol);
|
||||
cudaFree(d_cc_space_v_vv_chol);
|
||||
|
Loading…
Reference in New Issue
Block a user