1
0
mirror of https://gitlab.com/scemama/qp_plugins_scemama.git synced 2024-11-07 14:43:41 +01:00
qp_plugins_scemama/devel/ccsd_gpu/gpu.c

2119 lines
73 KiB
C
Raw Normal View History

2023-07-16 09:54:58 +02:00
#include <stdio.h>
#include <stdlib.h>
2023-07-16 20:34:35 +02:00
#include <string.h>
2023-07-16 17:27:14 +02:00
#include <omp.h>
2023-07-16 09:54:58 +02:00
#include <cublas_v2.h>
2023-07-16 11:42:42 +02:00
#include <cuda_runtime.h>
2023-08-02 16:17:43 +02:00
#include "gpu.h"
2023-08-21 12:34:55 +02:00
#include "assert.h"
2023-07-16 11:42:42 +02:00
2023-08-03 17:29:57 +02:00
void gpu_upload(gpu_data* data,
int nO, int nV,
2023-07-16 09:54:58 +02:00
double* t1,
2023-08-05 00:50:58 +02:00
double* t2)
2023-07-16 09:54:58 +02:00
{
2023-08-21 12:34:55 +02:00
size_t lda;
2023-07-16 18:05:37 +02:00
2023-08-02 16:17:43 +02:00
int ngpus = 1;
2023-08-03 17:29:57 +02:00
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
2023-08-05 00:50:58 +02:00
double * tau = malloc(nO*nO*nV*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (tau != NULL);
2023-08-05 00:50:58 +02:00
double * tau_x = malloc(nO*nO*nV*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (tau_x != NULL);
2023-08-05 00:50:58 +02:00
2023-08-03 17:29:57 +02:00
#pragma omp parallel num_threads(ngpus)
{
2023-08-21 12:34:55 +02:00
cudaError_t cudaStat = cudaSuccess;
size_t igpu = omp_get_thread_num();
2023-08-03 17:29:57 +02:00
cudaSetDevice(igpu);
double* d_t1 = data[igpu].t1;
lda = nO;
2023-08-21 12:34:55 +02:00
cudaStat = cublasSetMatrix(nO, nV, sizeof(double), t1, lda, d_t1, lda);
assert (cudaStat == cudaSuccess);
2023-08-03 17:29:57 +02:00
double* d_t2 = data[igpu].t2;
lda = nO*nO;
2023-08-21 12:34:55 +02:00
cudaStat = cublasSetMatrix(nO*nO, nV*nV, sizeof(double), t2, lda, d_t2, lda);
assert (cudaStat == cudaSuccess);
2023-08-03 17:29:57 +02:00
2023-08-21 12:34:55 +02:00
size_t lda, ldb, ldc;
2023-08-05 00:50:58 +02:00
double alpha, beta;
double* A;
double* B;
double* C;
cublasHandle_t handle;
cublasCreate(&handle);
cudaStream_t stream[nV];
double* d_tau = data[igpu].tau;
double* d_tau_x = data[igpu].tau_x;
lda = nO * nO;
2023-08-21 12:34:55 +02:00
cudaStat = cublasSetMatrix(nO*nO, nV*nV, sizeof(double), tau_x, lda, d_tau_x, lda);
assert (cudaStat == cudaSuccess);
2023-08-05 00:50:58 +02:00
if (igpu == 0) {
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-05 00:50:58 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nO ; ++j) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-05 00:50:58 +02:00
cublasSetStream(handle, stream[b]);
beta = t1[j+b*nO];
A = &(d_t2[nO*(j + nO*nV*b)]); lda = nO*nO;
B = d_t1; ldb = nO;
C = &(d_tau[nO*(j + nO*nV*b)]); ldc = nO*nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
cudaDeviceSynchronize();
alpha = 2.0;
beta = -1.0;
2023-08-21 12:34:55 +02:00
for (size_t b=0 ; b<nV ; ++b) {
for (size_t a=0 ; a<nV ; ++a) {
2023-08-05 00:50:58 +02:00
cublasSetStream(handle, stream[a]);
A = &(d_tau[nO*nO*(a + nV*b)]); lda = nO;
B = &(d_tau[nO*nO*(b + nV*a)]); ldb = nO;
C = &(d_tau_x[nO*nO*(a + nV*b)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-05 00:50:58 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
lda = nO*nO;
cublasGetMatrix(nO*nO, nV*nV, sizeof(double), d_tau, lda, tau, lda);
cublasGetMatrix(nO*nO, nV*nV, sizeof(double), d_tau_x, lda, tau_x, lda);
}
#pragma omp barrier
if (igpu > 0) {
lda = nO * nO;
cublasSetMatrix(nO*nO, nV*nV, sizeof(double), tau, lda, d_tau, lda);
cublasSetMatrix(nO*nO, nV*nV, sizeof(double), tau_x, lda, d_tau_x, lda);
}
cublasDestroy(handle);
}
free(tau);
free(tau_x);
2023-08-03 17:29:57 +02:00
}
2023-08-04 14:07:49 +02:00
2023-08-05 00:50:58 +02:00
2023-08-04 15:49:48 +02:00
void compute_h_oo_chol_gpu(gpu_data* data, int igpu)
2023-08-04 14:07:49 +02:00
{
2023-08-21 12:34:55 +02:00
cudaError_t cudaStat;
2023-08-04 14:07:49 +02:00
int ngpus = 1;
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
igpu = igpu % ngpus;
2023-08-21 12:34:55 +02:00
const size_t cholesky_mo_num = data[igpu].cholesky_mo_num;
const size_t nO = data[igpu].nO;
const size_t nV = data[igpu].nV;
2023-08-04 14:07:49 +02:00
cudaSetDevice(igpu);
2023-08-21 12:34:55 +02:00
size_t m,n,k, lda, ldb, ldc;
2023-08-04 14:07:49 +02:00
double alpha, beta;
double* A;
double* B;
double* C;
cudaStream_t stream[nV];
cublasHandle_t handle;
cublasCreate(&handle);
double* d_H_oo = data[igpu].H_oo;
double* d_tau_x = data[igpu].tau_x;
double* d_cc_space_f_oo = data[igpu].cc_space_f_oo;
double* d_cc_space_v_vo_chol = data[igpu].cc_space_v_vo_chol;
double* d_cc_space_v_ov_chol = data[igpu].cc_space_v_ov_chol;
double* d_tau_kau;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tau_kau, cholesky_mo_num*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert(cudaStat == cudaSuccess);
2023-08-04 14:07:49 +02:00
double* d_tmp_ovv;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tmp_ovv, nO*nV*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert(cudaStat == cudaSuccess);
2023-08-04 14:07:49 +02:00
double* d_tmp_vov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tmp_vov, nV*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert(cudaStat == cudaSuccess);
2023-08-04 14:07:49 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 14:07:49 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0;
beta = 0.0;
2023-08-21 12:34:55 +02:00
for (size_t u=0 ; u<nO ; ++u) {
2023-08-04 14:07:49 +02:00
cublasDcopy(handle, nO*nV*nV, &(d_tau_x[u]), nO, d_tmp_ovv, 1);
2023-08-21 12:34:55 +02:00
for (size_t b=0 ; b<nV ; ++b) {
2023-08-04 14:07:49 +02:00
cublasSetStream(handle, stream[b]);
A = &(d_tmp_ovv[nO*nV*b]); lda = nO;
B = &(d_tmp_ovv[nO*nV*b]); ldb = nO;
C = &(d_tmp_vov[nV*nO*b]); ldc = nV;
cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, nV, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
cudaDeviceSynchronize();
cublasSetStream(handle, NULL);
alpha = 1.0;
beta = 0.0;
m=cholesky_mo_num; n=nV; k=nO*nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num;
B=d_tmp_vov; ldb=nV;
C=&(d_tau_kau[cholesky_mo_num*nV*u]); ldc=cholesky_mo_num;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 14:07:49 +02:00
cudaStreamDestroy(stream[i]);
}
cudaFree(d_tmp_vov);
cudaFree(d_tmp_ovv);
cublasDcopy(handle, nO*nO, d_cc_space_f_oo, 1, d_H_oo, 1);
alpha = 1.0;
beta = 1.0;
m=nO; n=nO; k=cholesky_mo_num*nV;
A=d_tau_kau; lda=cholesky_mo_num*nV;
B=d_cc_space_v_vo_chol; ldb=cholesky_mo_num*nV;
C=d_H_oo; ldc=nO;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_tau_kau);
2023-08-04 15:49:48 +02:00
double* H_oo = malloc(nO*nO*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (H_oo != NULL);
2023-08-04 14:07:49 +02:00
cublasGetMatrix(nO, nO, sizeof(double), d_H_oo, nO, H_oo, nO);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<ngpus ; ++i) {
2023-08-04 14:07:49 +02:00
if (i != igpu) {
double* d_H_oo = data[i].H_oo;
cudaSetDevice(i);
cublasSetMatrix(nO, nO, sizeof(double), H_oo, nO, d_H_oo, nO);
}
}
2023-08-04 15:49:48 +02:00
free(H_oo);
2023-08-04 14:07:49 +02:00
cublasDestroy(handle);
}
2023-08-04 15:49:48 +02:00
void compute_h_vo_chol_gpu(gpu_data* data, int igpu)
2023-08-04 14:07:49 +02:00
{
2023-08-21 12:34:55 +02:00
cudaError_t cudaStat;
2023-08-04 14:07:49 +02:00
int ngpus = 1;
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
igpu = igpu % ngpus;
2023-08-21 12:34:55 +02:00
const size_t cholesky_mo_num = data[igpu].cholesky_mo_num;
const size_t nO = data[igpu].nO;
const size_t nV = data[igpu].nV;
2023-08-04 14:07:49 +02:00
cudaSetDevice(igpu);
2023-08-21 12:34:55 +02:00
size_t m,n,k, lda, ldb, ldc;
2023-08-04 14:07:49 +02:00
double alpha, beta;
double* A;
double* B;
double* C;
cudaStream_t stream[nV];
cublasHandle_t handle;
cublasCreate(&handle);
double* d_t1 = data[igpu].t1;
double* d_H_vo = data[igpu].H_vo;
double* d_cc_space_f_vo = data[igpu].cc_space_f_vo;
double* d_cc_space_v_ov_chol = data[igpu].cc_space_v_ov_chol;
double* d_cc_space_v_vo_chol = data[igpu].cc_space_v_vo_chol;
cublasDcopy(handle, nV*nO, d_cc_space_f_vo, 1, d_H_vo, 1);
double* d_tmp_k;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tmp_k, cholesky_mo_num * sizeof(double));
2023-08-21 12:34:55 +02:00
assert(cudaStat == cudaSuccess);
2023-08-04 14:07:49 +02:00
alpha = 2.0;
beta = 0.0;
m=cholesky_mo_num; n=1; k=nO*nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num;
B=d_t1; ldb=nO*nV;
C=d_tmp_k; ldc=cholesky_mo_num;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
alpha = 1.0;
beta = 1.0;
m=nV*nO; n=1; k=cholesky_mo_num;
A=d_cc_space_v_vo_chol; lda=cholesky_mo_num;
B=d_tmp_k; ldb=cholesky_mo_num;
C=d_H_vo; ldc=nV*nO;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_tmp_k);
double* d_tmp;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tmp, cholesky_mo_num*nO*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert(cudaStat == cudaSuccess);
2023-08-04 14:07:49 +02:00
alpha = 1.0;
beta = 0.0;
m=cholesky_mo_num*nO; n=nO; k=nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num*nO;
B=d_t1; ldb=nO;
C=d_tmp; ldc=cholesky_mo_num*nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
double* d_tmp2;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tmp2, cholesky_mo_num*nO*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert(cudaStat == cudaSuccess);
2023-08-04 14:07:49 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 14:07:49 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
for (size_t j=0 ; j<nO ; ++j) {
2023-08-04 14:07:49 +02:00
cublasSetStream(handle, stream[j]);
cublasDcopy(handle, cholesky_mo_num, &(d_tmp [cholesky_mo_num*(i+nO*j)]), 1,
&(d_tmp2[cholesky_mo_num*(j+nO*i)]), 1);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 14:07:49 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-12-05 18:28:26 +01:00
cudaFree(d_tmp);
2023-08-04 14:07:49 +02:00
alpha = -1.0;
beta = 1.0;
m=nV; n=nO; k=cholesky_mo_num*nO;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num*nO;
B=d_tmp2; ldb=cholesky_mo_num*nO;
C=d_H_vo; ldc=nV;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-12-05 18:28:26 +01:00
cudaFree(d_tmp2);
2023-08-04 14:07:49 +02:00
2023-08-04 15:49:48 +02:00
double* H_vo = malloc(nV*nO*sizeof(double));
2023-08-04 14:07:49 +02:00
cublasGetMatrix(nV, nO, sizeof(double), d_H_vo, nV, H_vo, nV);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<ngpus ; ++i) {
2023-08-04 14:07:49 +02:00
if (i != igpu) {
double* d_H_vo = data[i].H_vo;
cudaSetDevice(i);
cublasSetMatrix(nV, nO, sizeof(double), H_vo, nV, d_H_vo, nV);
}
}
2023-08-04 15:49:48 +02:00
free(H_vo);
2023-08-04 14:07:49 +02:00
cublasDestroy(handle);
}
2023-08-04 15:49:48 +02:00
void compute_h_vv_chol_gpu(gpu_data* data, int igpu)
2023-08-04 14:07:49 +02:00
{
2023-08-21 12:34:55 +02:00
cudaError_t cudaStat;
2023-08-04 14:07:49 +02:00
int ngpus = 1;
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
igpu = igpu % ngpus;
2023-08-21 12:34:55 +02:00
const size_t cholesky_mo_num = data[igpu].cholesky_mo_num;
const size_t nO = data[igpu].nO;
const size_t nV = data[igpu].nV;
2023-08-04 14:07:49 +02:00
cudaSetDevice(igpu);
2023-08-21 12:34:55 +02:00
size_t m,n,k, lda, ldb, ldc;
2023-08-04 14:07:49 +02:00
double alpha, beta;
double* A;
double* B;
double* C;
cudaStream_t stream[nV];
cublasHandle_t handle;
cublasCreate(&handle);
double* d_H_vv = data[igpu].H_vv;
double* d_tau_x = data[igpu].tau_x;
double* d_cc_space_f_vv = data[igpu].cc_space_f_vv;
double* d_cc_space_v_ov_chol = data[igpu].cc_space_v_ov_chol;
double* d_tau_kia;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tau_kia, cholesky_mo_num*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert(cudaStat == cudaSuccess);
2023-08-04 14:07:49 +02:00
double* d_tmp_oov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tmp_oov, nO*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert(cudaStat == cudaSuccess);
2023-08-04 14:07:49 +02:00
alpha = 1.0;
beta = 0.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 14:07:49 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t a=0 ; a<nV ; ++a) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-04 14:07:49 +02:00
cublasSetStream(handle, stream[b]);
cublasDcopy(handle, nO*nO, &(d_tau_x[nO*nO*(a+nV*b)]), 1, &(d_tmp_oov[nO*nO*b]), 1);
}
cudaDeviceSynchronize();
cublasSetStream(handle, NULL);
alpha = 1.0;
beta = 0.0;
m=cholesky_mo_num; n=nO; k=nO*nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num;
B=d_tmp_oov; ldb=nO;
C=&(d_tau_kia[cholesky_mo_num*nO*a]); ldc=cholesky_mo_num;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 14:07:49 +02:00
cudaStreamDestroy(stream[i]);
}
cudaFree(d_tmp_oov);
cublasDcopy(handle, nV*nV, d_cc_space_f_vv, 1, d_H_vv, 1);
alpha = -1.0;
beta = 1.0;
m=nV; n=nV; k=cholesky_mo_num*nO;
A=d_tau_kia; lda=cholesky_mo_num*nO;
B=d_cc_space_v_ov_chol; ldb=cholesky_mo_num*nO;
C=d_H_vv; ldc=nV;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_tau_kia);
2023-08-04 15:49:48 +02:00
double* H_vv = malloc(nV*nV*sizeof(double));
2023-08-04 14:07:49 +02:00
cublasGetMatrix(nV, nV, sizeof(double), d_H_vv, nV, H_vv, nV);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<ngpus ; ++i) {
2023-08-04 14:07:49 +02:00
if (i != igpu) {
double* d_H_vv = data[i].H_vv;
cudaSetDevice(i);
cublasSetMatrix(nV, nV, sizeof(double), H_vv, nV, d_H_vv, nV);
}
}
2023-08-04 15:49:48 +02:00
free(H_vv);
2023-08-04 14:07:49 +02:00
cublasDestroy(handle);
}
2023-08-03 17:29:57 +02:00
void compute_r2_space_chol_gpu(gpu_data* data, int nO, int nV, double* t1, double* r2, double* max_r2)
{
2023-08-21 12:34:55 +02:00
const size_t cholesky_mo_num = data->cholesky_mo_num;
2023-08-03 17:29:57 +02:00
int ngpus = 1;
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
2023-08-02 16:17:43 +02:00
2023-08-03 02:12:15 +02:00
double* J1 = malloc(nO*nV*nV*nO*sizeof(double));
2023-08-03 03:21:32 +02:00
double* K1 = malloc(nO*nV*nV*nO*sizeof(double));
2023-08-03 02:12:15 +02:00
2023-07-16 18:05:37 +02:00
#pragma omp parallel num_threads(ngpus)
2023-07-16 09:54:58 +02:00
{
2023-08-21 12:34:55 +02:00
cudaError_t cudaStat;
size_t m,n,k, lda, ldb, ldc;
2023-07-16 18:05:37 +02:00
double alpha, beta;
double* A;
double* B;
double* C;
2023-08-03 17:29:57 +02:00
cudaStream_t stream[nV];
2023-07-16 15:39:37 +02:00
2023-08-21 12:34:55 +02:00
size_t igpu = omp_get_thread_num();
2023-07-16 18:05:37 +02:00
cudaSetDevice(igpu);
2023-07-16 17:27:14 +02:00
2023-08-03 19:46:41 +02:00
cublasHandle_t handle;
2023-07-16 18:05:37 +02:00
cublasCreate(&handle);
2023-07-16 17:27:14 +02:00
2023-08-03 17:29:57 +02:00
double* d_r2;
lda = nO * nO;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_r2, lda * nV * nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 17:29:57 +02:00
cudaMemset(d_r2, 0, nO*nO*nV*nV*sizeof(double));
memset(r2, 0, nO*nO*nV*nV*sizeof(double));
2023-08-02 18:31:44 +02:00
2023-08-02 16:17:43 +02:00
double* d_cc_space_v_oo_chol = data[igpu].cc_space_v_oo_chol;
double* d_cc_space_v_ov_chol = data[igpu].cc_space_v_ov_chol;
double* d_cc_space_v_vo_chol = data[igpu].cc_space_v_vo_chol;
double* d_cc_space_v_vv_chol = data[igpu].cc_space_v_vv_chol;
double* d_cc_space_v_oooo = data[igpu].cc_space_v_oooo;
double* d_cc_space_v_vooo = data[igpu].cc_space_v_vooo;
2023-08-03 19:46:41 +02:00
double* d_cc_space_v_oovv = data[igpu].cc_space_v_oovv;
2023-08-02 16:17:43 +02:00
double* d_cc_space_v_vvoo = data[igpu].cc_space_v_vvoo;
double* d_cc_space_v_oovo = data[igpu].cc_space_v_oovo;
double* d_cc_space_v_ovvo = data[igpu].cc_space_v_ovvo;
2023-08-03 01:21:16 +02:00
double* d_cc_space_v_ovov = data[igpu].cc_space_v_ovov;
2023-08-02 16:17:43 +02:00
double* d_cc_space_v_ovoo = data[igpu].cc_space_v_ovoo;
double* d_cc_space_f_vo = data[igpu].cc_space_f_vo;
2023-08-03 17:29:57 +02:00
double* d_tau = data[igpu].tau;
double* d_t1 = data[igpu].t1;
double* d_t2 = data[igpu].t2;
double* d_H_oo = data[igpu].H_oo;
double* d_H_vv = data[igpu].H_vv;
2023-07-16 17:27:14 +02:00
2023-08-03 02:53:50 +02:00
double* d_K1;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_K1, nO*nV*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
2023-07-16 21:18:57 +02:00
#pragma omp sections
2023-07-16 20:34:35 +02:00
{
#pragma omp section
{
2023-08-03 02:53:50 +02:00
double* d_J1;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_J1, nO*nV*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-16 20:34:35 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
A = d_cc_space_v_ovvo; lda = nO*nV;
B = d_cc_space_v_ovvo; ldb = nO*nV;
C = d_J1; ldc = nO*nV;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO*nV, nV*nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-07-16 20:34:35 +02:00
2023-08-03 02:53:50 +02:00
double* d_X_ovoo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_ovoo, nO*nV*nO*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
alpha = 0.0;
beta = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 19:46:41 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nO ; ++j) {
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
A = &(d_X_ovoo[nO*nV*(i+nO*j)]); lda = nO;
B = &(d_cc_space_v_ovoo[nO*nV*(j+nO*i)]); ldb = nO;
C = &(d_X_ovoo[nO*nV*(i+nO*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-07-16 20:34:35 +02:00
2023-08-03 02:53:50 +02:00
double* d_Y_ovov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Y_ovov, nO*nV*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-16 20:34:35 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
m=nO*nV*nO; n=nV; k=nO;
A=d_X_ovoo; lda=nO*nV*nO;
B=d_t1; ldb=nO;
C=d_Y_ovov; ldc=nO*nV*nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_X_ovoo);
2023-07-17 02:55:14 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = -1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nO ; ++j) {
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
A = &(d_J1[nO*nV*(i+nV*j)]); lda = nO;
B = &(d_Y_ovov[nO*nV*(j+nO*i)]); ldb = nO;
C = &(d_J1[nO*nV*(i+nV*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-07-17 02:55:14 +02:00
2023-08-03 02:53:50 +02:00
double* d_tmp_cc;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tmp_cc, cholesky_mo_num*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-17 02:55:14 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
m=cholesky_mo_num*nV; n=nO; k=nV;
A=d_cc_space_v_vv_chol; lda=cholesky_mo_num*nV;
B=d_t1; ldb=nO;
C=d_tmp_cc; ldc=cholesky_mo_num*nV;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-07-17 02:55:14 +02:00
2023-08-03 02:53:50 +02:00
double* d_J1_tmp;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_J1_tmp, nV*nO*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-17 02:55:14 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
m=nV*nO; n=nV*nO; k=cholesky_mo_num;
A=d_tmp_cc; lda=cholesky_mo_num;
B=d_cc_space_v_vo_chol; ldb=cholesky_mo_num;
C=d_J1_tmp; ldc=nV*nO;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-07-17 02:55:14 +02:00
2023-08-03 02:53:50 +02:00
cudaFree(d_tmp_cc);
2023-07-17 02:55:14 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
A = &(d_J1[nO*nV*nV*i]); lda = nO*nV;
B = &(d_J1_tmp[nV*nO*nV*i]); ldb = nV;
C = &(d_J1[nO*nV*nV*i]); ldc = nO*nV;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO*nV, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
cudaFree(d_J1_tmp);
2023-07-17 02:55:14 +02:00
2023-08-03 02:53:50 +02:00
double* d_X_voov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_voov, nV*nO*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-17 02:55:14 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 0.5;
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nO ; ++j) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[b]);
beta = t1[j+b*nO];
A = &(d_t2[nO*(j + nO*nV*b)]); lda = nO*nO;
B = d_t1; ldb = nO;
C = &(d_Y_ovov[nO*(b+nV*j)]); ldc = nO*nV*nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
alpha = 1.0;
beta = 0.0;
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nO ; ++j) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[b]);
A = &(d_cc_space_v_vvoo[nV*(b+nV*nO*j)]); lda = nV*nV;
B = &(d_cc_space_v_vvoo[nV*(b+nV*nO*j)]); ldb = nV*nV;
C = &(d_X_voov[nV*nO*(j+nO*b)]); ldc = nV;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nV, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-07-20 18:17:46 +02:00
2023-08-03 02:53:50 +02:00
double* d_Z_ovvo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Z_ovvo, nO*nV*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-17 14:00:54 +02:00
2023-08-03 02:53:50 +02:00
alpha = -1.0;
2023-07-17 14:00:54 +02:00
beta = 0.0;
2023-08-03 02:53:50 +02:00
m=nO*nV; n=nV*nO; k=nO*nV;
A=d_Y_ovov; lda=nO*nV;
B=d_X_voov; ldb=nV*nO;
C=d_Z_ovvo; ldc=nO*nV;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_X_voov);
cudaFree(d_Y_ovov);
2023-07-17 14:51:50 +02:00
alpha = 1.0;
2023-08-03 02:53:50 +02:00
beta = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[b]);
A = &(d_J1[nO*nV*(b+nV*i)]); lda = nO;
B = &(d_Z_ovvo[nO*(b+nV*nV*i)]); ldb=nO*nV;
C = &(d_J1[nO*nV*(b+nV*i)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-07-17 14:51:50 +02:00
2023-08-03 02:53:50 +02:00
double* d_Y_vovo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Y_vovo, nV*nO*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-17 14:51:50 +02:00
alpha = 1.0;
2023-08-03 02:53:50 +02:00
beta = -0.5;
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nO ; ++j) {
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
A = &(d_cc_space_v_vvoo[nV*nV*(i+nO*j)]); lda = nV;
B = &(d_cc_space_v_vvoo[nV*nV*(i+nO*j)]); ldb = nV;
C = &(d_Y_vovo[nV*(i+nO*nV*j)]); ldc = nV*nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nV, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-07-17 14:51:50 +02:00
double* d_X_ovvo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_ovvo, nO*nV*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-17 14:51:50 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nO ; ++j) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[b]);
A = &(d_t2[nO*(j+nO*nV*b)]); lda = nO*nO;
B = &(d_t2[nO*(j+nO*nV*b)]); ldb = nO*nO;
C = &(d_X_ovvo[nO*nV*(b+nV*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-07-17 14:51:50 +02:00
alpha = 1.0;
beta = 0.0;
2023-08-03 02:53:50 +02:00
m=nO*nV; n=nV*nO; k=nV*nO;
A=d_X_ovvo; lda=nO*nV;
B=d_Y_vovo; ldb=nV*nO;
C=d_Z_ovvo; ldc=nO*nV;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_X_ovvo);
cudaFree(d_Y_vovo);
2023-07-17 14:51:50 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-02 18:31:44 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0;
2023-08-03 02:53:50 +02:00
beta = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[b]);
A = &(d_J1[nO*nV*(b+nV*i)]); lda = nO;
B = &(d_Z_ovvo[nO*(b+nV*nV*i)]); ldb = nO*nV;
C = &(d_J1[nO*nV*(b+nV*i)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-07-17 14:51:50 +02:00
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-02 18:31:44 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-08-03 02:53:50 +02:00
cudaFree(d_Z_ovvo);
lda = nO*nV;
cublasGetMatrix(nO*nV, nV*nO, sizeof(double), d_J1, lda, J1, lda);
cudaFree(d_J1);
2023-07-17 14:21:22 +02:00
}
2023-07-20 14:53:40 +02:00
#pragma omp section
{
alpha = 1.0;
2023-08-03 02:53:50 +02:00
beta = 0.0;
A = d_cc_space_v_ovov; lda = nO*nV;
B = d_cc_space_v_ovov; ldb = nO*nV;
C = d_K1; ldc = nO*nV;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO*nV, nO*nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
alpha = -1.0;
beta = 1.0;
m=nO*nV*nO; n=nV; k=nO;
A=d_cc_space_v_ovoo; lda=nO*nV*nO;
2023-07-20 14:53:40 +02:00
B=d_t1; ldb=nO;
2023-08-03 02:53:50 +02:00
C=d_K1; ldc=nO*nV*nO;
2023-07-20 14:53:40 +02:00
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-08-03 02:53:50 +02:00
double* d_X;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X, nV*nO*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
double* d_Y;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Y, nO*nV*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-02 18:31:44 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-03 02:53:50 +02:00
alpha =-1.0;
beta = 0.0;
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nO ; ++j) {
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
A = &(d_cc_space_v_vvoo[nV*nV*(i+nO*j)]); lda = nV;
B = &(d_cc_space_v_vvoo[nV*nV*(i+nO*j)]); ldb = nV;
C = &(d_X[nV*(j+nO*nV*i)]); ldc = nV*nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nV, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-07-20 14:53:40 +02:00
}
}
2023-08-03 02:53:50 +02:00
alpha = 0.5;
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nO ; ++j) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[b]);
beta = t1[j+b*nO];
A = &(d_t2[nO*(j+nO*nV*b)]); lda = nO*nO;
B = d_t1; ldb = nO;
C = &(d_Y[nO*(b+nV*nV*j)]); ldc = nO*nV;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-07-20 14:53:40 +02:00
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-02 18:31:44 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-07-20 14:53:40 +02:00
2023-08-03 02:53:50 +02:00
double* d_Z;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Z, nO*nV*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-20 14:53:40 +02:00
alpha = 1.0;
beta = 0.0;
2023-08-03 02:53:50 +02:00
m=nV*nO; n=nO*nV; k=nV*nO;
A=d_Y; lda=nO*nV;
B=d_X; ldb=nV*nO;
C=d_Z; ldc=nO*nV;
2023-07-20 14:53:40 +02:00
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-08-03 02:53:50 +02:00
cudaFree(d_X);
cudaFree(d_Y);
double* d_t1v;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_t1v, cholesky_mo_num*nO*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-20 14:53:40 +02:00
alpha = 1.0;
beta = 0.0;
2023-08-03 02:53:50 +02:00
m=cholesky_mo_num*nO; n=nO; k=nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num*nO;
2023-07-20 14:53:40 +02:00
B=d_t1; ldb=nO;
2023-08-03 02:53:50 +02:00
C=d_t1v; ldc=cholesky_mo_num*nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-07-20 14:53:40 +02:00
2023-08-03 02:53:50 +02:00
double* d_K1tmp;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_K1tmp, nO*nO*nV*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-20 14:53:40 +02:00
2023-08-02 18:31:44 +02:00
alpha = 1.0;
2023-08-03 02:53:50 +02:00
beta = 0.0;
m=nO*nO; n=nV*nV; k=cholesky_mo_num;
A=d_t1v; lda=cholesky_mo_num;
B=d_cc_space_v_vv_chol; ldb=cholesky_mo_num;
C=d_K1tmp; ldc=nO*nO;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_t1v);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-02 18:31:44 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[b]);
A = &(d_K1[nO*nV*(i+nO*b)]); lda = nO;
B = &(d_K1tmp[nO*(i+nO*nV*b)]); ldb = nO*nO;
C = &(d_K1[nO*nV*(i+nO*b)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-02 18:31:44 +02:00
}
2023-08-03 02:53:50 +02:00
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[b]);
A = &(d_K1[nO*nV*(i+nO*b)]); lda = nO;
B = &(d_Z[nO*(b+nV*nV*i)]); ldb = nO*nV;
C = &(d_K1[nO*nV*(i+nO*b)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-07-20 14:53:40 +02:00
}
}
2023-08-03 02:53:50 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-02 18:31:44 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-08-03 02:53:50 +02:00
cudaFree(d_K1tmp);
cudaFree(d_Z);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
lda = nO*nV;
cublasGetMatrix(nO*nV, nO*nV, sizeof(double), d_K1, lda, K1, lda);
2023-08-02 16:17:43 +02:00
}
2023-07-20 17:54:10 +02:00
#pragma omp section
{
2023-08-03 02:53:50 +02:00
double* d_Y_oooo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void**)&d_Y_oooo, nO*nO*nO*nO*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
m=nO ; n=nO*nO*nO; k=nV;
A = d_t1 ; lda = nO;
B = d_cc_space_v_vooo ; ldb = nV;
C = d_Y_oooo; ldc = nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
double* d_A1;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void**)&d_A1, nO*nO*nO*nO*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 1.0;
A = d_cc_space_v_oooo; lda = nO*nO;
B = d_Y_oooo; ldb = nO*nO;
C = d_A1; ldc = nO*nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO*nO, nO*nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
2023-08-02 18:31:44 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nO ; ++j) {
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
alpha = 1.0;
beta = 1.0;
A = &(d_A1[nO*nO*(i+nO*j)]); lda = nO;
B = &(d_Y_oooo[nO*nO*(j+nO*i)]); ldb = nO;
C = &(d_A1[nO*nO*(i+nO*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
cudaFree(d_Y_oooo);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 1.0;
m=nO*nO ; n=nO*nO; k=nV*nV;
A = d_tau ; lda = nO*nO;
B = d_cc_space_v_vvoo ; ldb = nV*nV;
C = d_A1; ldc = nO*nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
m=nO*nO ; n=nV*nV; k=nO*nO;
A = d_A1 ; lda = nO*nO;
B = d_tau ; ldb = nO*nO;
C = d_r2; ldc = nO*nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_A1);
}
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
// g_vir
#pragma omp section
{
double* d_g_vir;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void**)&d_g_vir, nV*nV*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 17:29:57 +02:00
cublasDcopy(handle, nV*nV, d_H_vv, 1, d_g_vir, 1);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
alpha = -1.0;
beta = 1.0;
m=nV ; n=nV; k=nO;
A = d_cc_space_f_vo ; lda = nV;
B = d_t1 ; ldb = nO;
C = d_g_vir; ldc = nV;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
double* d_tmp_k;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void**)&d_tmp_k, cholesky_mo_num*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
m=cholesky_mo_num ; n=1; k=nO*nV;
A = d_cc_space_v_ov_chol; lda = cholesky_mo_num;
B = d_t1 ; ldb = nO*nV;
C = d_tmp_k; ldc = cholesky_mo_num;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
alpha = 2.0;
beta = 1.0;
m=nV*nV; n=1; k=cholesky_mo_num;
A = d_cc_space_v_vv_chol; lda = cholesky_mo_num;
B = d_tmp_k ; ldb = cholesky_mo_num;
C = d_g_vir; ldc = nV*nV;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_tmp_k);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
double* d_tmp_vo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void**)&d_tmp_vo, cholesky_mo_num*nV*nO*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
m=cholesky_mo_num*nV ; n=nO; k=nV;
A = d_cc_space_v_vv_chol; lda = cholesky_mo_num*nV;
B = d_t1 ; ldb = nO;
C = d_tmp_vo; ldc = cholesky_mo_num*nV;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
double* d_tmp_vo2;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void**)&d_tmp_vo2, cholesky_mo_num*nV*nO*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
alpha = -1.0;
beta = 0.0;
A = &(d_tmp_vo[cholesky_mo_num*nV*i]); lda = cholesky_mo_num;
B = &(d_tmp_vo[cholesky_mo_num*nV*i]); ldb = cholesky_mo_num;
C = &(d_tmp_vo2[cholesky_mo_num*i]); ldc = cholesky_mo_num*nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, cholesky_mo_num, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
cudaFree(d_tmp_vo);
alpha = 1.0;
beta = 1.0;
m=nV ; n=nV; k=nO*cholesky_mo_num;
A = d_cc_space_v_ov_chol; lda = cholesky_mo_num*nO;
B = d_tmp_vo2 ; ldb = cholesky_mo_num*nO;
C = d_g_vir; ldc = nV;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_tmp_vo2);
double* d_Y_oovv;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void**)&d_Y_oovv, nO*nO*nV*nV*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
m=nO*nO*nV ; n=nV; k=nV;
A = d_t2; lda = nO*nO*nV;
B = d_g_vir; ldb = nV;
C = d_Y_oovv; ldc = nO*nO*nV;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_g_vir);
alpha = 1.0;
beta = 1.0;
A = d_r2; lda = nO*nO;
B = d_Y_oovv; ldb = nO*nO;
C = d_r2; ldc = nO*nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO*nO, nV*nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nV ; ++j) {
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
alpha = 1.0;
beta = 1.0;
A = &(d_r2[nO*nO*(i+nV*j)]); lda = nO;
B = &(d_Y_oovv[nO*nO*(j+nV*i)]); ldb = nO;
C = &(d_r2[nO*nO*(i+nV*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
cudaFree(d_Y_oovv);
}
// g_occ
#pragma omp section
{
double* d_g_occ;
lda = nO;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_g_occ, nO*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 17:29:57 +02:00
cublasDcopy(handle, nO*nO, d_H_oo, 1, d_g_occ, 1);
2023-08-03 12:00:09 +02:00
double* d_X;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X, cholesky_mo_num*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 12:00:09 +02:00
alpha = 2.0;
beta = 0.0;
m=cholesky_mo_num; n=nO*nV;
A=d_cc_space_v_ov_chol; lda=cholesky_mo_num;
B=d_t1; ldb=1;
C=d_X; ldc=1;
cublasDgemv(handle, CUBLAS_OP_N, m, n, &alpha, A, lda, B, ldb, &beta, C, ldc);
alpha = 1.0;
beta = 1.0;
m=cholesky_mo_num; n=nO*nO;
A=d_cc_space_v_oo_chol; lda=cholesky_mo_num;
B=d_X; ldb=1;
C=d_g_occ; ldc=1;
cublasDgemv(handle, CUBLAS_OP_T, m, n, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_X);
alpha = -1.0;
beta = 1.0;
m=nO*nV; n=nO*nO;
A=d_cc_space_v_ovoo; lda=nO*nV;
B=d_t1; ldb=1;
C=d_g_occ; ldc=1;
cublasDgemv(handle, CUBLAS_OP_T, m, n, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-07-20 17:54:10 +02:00
2023-08-02 18:31:44 +02:00
alpha = 1.0;
2023-08-03 02:53:50 +02:00
beta = 1.0;
m=nO; n=nO; k=nV;
A=d_t1; lda=nO;
B=d_cc_space_f_vo; ldb=nV;
C=d_g_occ; ldc=nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-07-20 17:54:10 +02:00
2023-08-03 02:53:50 +02:00
double* d_X_oovv;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_oovv, nO*nO*nV*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-02 19:51:12 +02:00
alpha = 1.0;
beta = 0.0;
2023-08-03 02:53:50 +02:00
m=nO; n=nO*nV*nV; k=nO;
A=d_g_occ; lda=nO;
B=d_t2; ldb=nO;
C=d_X_oovv; ldc=nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_g_occ);
2023-08-02 19:51:12 +02:00
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = -1.0;
A = d_r2; lda = nO*nO;
B = d_X_oovv; ldb = nO*nO;
C = d_r2; ldc = nO*nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO*nO, nV*nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-02 19:51:12 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nV ; ++j) {
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
alpha = 1.0;
beta = -1.0;
A = &(d_r2[nO*nO*(i+nV*j)]); lda = nO;
B = &(d_X_oovv[nO*nO*(j+nV*i)]); ldb = nO;
C = &(d_r2[nO*nO*(i+nV*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
cudaFree(d_X_oovv);
}
2023-08-02 19:51:12 +02:00
2023-08-03 03:21:32 +02:00
} // end sections
lda = nO*nV;
cublasSetMatrix(lda, nO*nV, sizeof(double), K1, lda, d_K1, lda);
2023-08-03 17:29:57 +02:00
#define BLOCK_SIZE 16
2023-08-03 03:21:32 +02:00
#pragma omp sections
{
2023-08-03 02:53:50 +02:00
#pragma omp section
{
double* d_X_vovv;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_vovv, nV*nO*nV*BLOCK_SIZE * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-02 23:50:28 +02:00
2023-08-03 02:53:50 +02:00
double* d_Y_oovv;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Y_oovv, nO*nO*nV*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-02 23:50:28 +02:00
2023-08-21 12:34:55 +02:00
for (size_t iblock=0 ; iblock<nV ; iblock += BLOCK_SIZE) {
size_t mbs = nV < iblock+BLOCK_SIZE ? nV : iblock+BLOCK_SIZE;
for (size_t gam=iblock ; gam<mbs ; ++gam) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[gam]));
}
2023-08-21 12:34:55 +02:00
for (size_t gam=iblock ; gam<mbs ; ++gam) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[gam]);
alpha = 1.0;
beta = 0.0;
m=nV; n=nO*nV; k=cholesky_mo_num;
A=&(d_cc_space_v_vv_chol[cholesky_mo_num*nV*gam]); lda=cholesky_mo_num;
B=d_cc_space_v_ov_chol; ldb=cholesky_mo_num;
C=&(d_X_vovv[nV*nO*nV*(gam-iblock)]); ldc=nV;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
}
2023-08-21 12:34:55 +02:00
for (size_t gam=iblock ; gam<mbs ; ++gam) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[gam]);
}
cublasSetStream(handle, NULL);
mbs = BLOCK_SIZE < nV-iblock ? BLOCK_SIZE : nV-iblock;
alpha = 1.0;
beta = 0.0;
m=nO; n=nO*nV*mbs; k=nV;
A=d_t1; lda=nO;
B=d_X_vovv; ldb=nV;
C=&(d_Y_oovv[nO*nO*nV*iblock]); ldc=nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
}
cudaFree(d_X_vovv);
alpha = 1.0;
beta = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nV ; ++j) {
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
A = &(d_r2[nO*nO*(i+nV*j)]); lda = nO;
B = &(d_Y_oovv[nO*nO*(i+nV*j)]); ldb = nO;
C = &(d_r2[nO*nO*(i+nV*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
A = &(d_r2[nO*nO*(i+nV*j)]); lda = nO;
B = &(d_Y_oovv[nO*nO*(j+nV*i)]); ldb = nO;
C = &(d_r2[nO*nO*(i+nV*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
cudaFree(d_Y_oovv);
}
2023-08-02 23:50:28 +02:00
2023-08-03 02:53:50 +02:00
#pragma omp section
{
double* d_tcc2;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tcc2, cholesky_mo_num*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-02 23:50:28 +02:00
alpha = 1.0;
2023-08-03 02:53:50 +02:00
beta = 0.0;
m=cholesky_mo_num*nV; n=nO; k=nV;
A=d_cc_space_v_vv_chol; lda=cholesky_mo_num*nV;
B=d_t1; ldb=nO;
C=d_tcc2; ldc=cholesky_mo_num*nV;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
double* d_tcc;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tcc, cholesky_mo_num*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-02 23:50:28 +02:00
alpha = 1.0;
beta = 0.0;
2023-08-03 02:53:50 +02:00
m=cholesky_mo_num*nO; n=nV; k=nO;
A=d_cc_space_v_oo_chol; lda=cholesky_mo_num*nO;
B=d_t1; ldb=nO;
C=d_tcc; ldc=cholesky_mo_num*nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
double* d_X_ovvo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_ovvo, nO*nV*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-02 23:50:28 +02:00
alpha = 1.0;
2023-08-03 02:53:50 +02:00
beta = 0.0;
m=nO*nV; n=nV*nO; k=cholesky_mo_num;
A=d_tcc; lda=cholesky_mo_num;
B=d_tcc2; ldb=cholesky_mo_num;
C=d_X_ovvo; ldc=nO*nV;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_tcc);
cudaFree(d_tcc2);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0;
beta = -1.0;
2023-08-21 12:34:55 +02:00
for(size_t gam = 0; gam < nV; gam++){
for(size_t bet = 0; bet < nV; bet++){
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[bet]);
A = &(d_r2[nO*nO*(bet+nV*gam)]); lda = nO;
B = &(d_X_ovvo[nO*(bet+nV*gam)]); ldb = nO*nV*nV;
C = &(d_r2[nO*nO*(bet+nV*gam)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
2023-08-21 12:34:55 +02:00
for(size_t bet = 0; bet < nV; bet++){
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[bet]);
A = &(d_r2[nO*nO*(bet+nV*gam)]); lda = nO;
B = &(d_X_ovvo[nO*(gam+nV*bet)]); ldb = nO*nV*nV;
C = &(d_r2[nO*nO*(bet+nV*gam)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-02 23:50:28 +02:00
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-02 23:50:28 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-08-03 02:53:50 +02:00
cudaFree(d_X_ovvo);
2023-07-20 17:54:10 +02:00
}
2023-08-02 18:31:44 +02:00
2023-08-03 01:21:16 +02:00
#pragma omp section
{
2023-08-03 02:53:50 +02:00
double* d_X_oovv;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_oovv, nO*nO*nV*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 01:21:16 +02:00
alpha = 1.0;
2023-08-03 02:53:50 +02:00
beta = 0.0;
m=nO*nO*nV; n=nV; k=nO;
A=d_cc_space_v_oovo; lda=nO*nO*nV;
2023-08-03 01:21:16 +02:00
B=d_t1; ldb=nO;
2023-08-03 02:53:50 +02:00
C=d_X_oovv; ldc=nO*nO*nV;
2023-08-03 01:21:16 +02:00
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = -1.0;
A = d_r2; lda = nO*nO;
B = d_X_oovv; ldb = nO*nO;
C = d_r2; ldc = nO*nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO*nO, nV*nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 01:21:16 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nV ; ++j) {
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
A = &(d_r2[nO*nO*(i+nV*j)]); lda = nO;
B = &(d_X_oovv[nO*nO*(j+nV*i)]); ldb = nO;
C = &(d_r2[nO*nO*(i+nV*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-03 01:21:16 +02:00
}
}
2023-08-03 02:53:50 +02:00
double* d_X_vovo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_vovo, nV*nO*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
alpha = 0.0;
beta = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
for (size_t gam=0 ; gam<nV ; ++gam) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[gam]);
A = &(d_X_vovo[nV*nO*(gam+nV*i)]); lda = nV;
B = &(d_cc_space_v_ovvo[nO*nV*(gam+nV*i)]); ldb = nO;
C = &(d_X_vovo[nV*nO*(gam+nV*i)]); ldc = nV;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nV, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-03 01:21:16 +02:00
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 01:21:16 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-08-03 02:53:50 +02:00
double* d_Y_oovo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Y_oovo, nO*nO*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 01:21:16 +02:00
alpha = 1.0;
beta = 0.0;
2023-08-03 02:53:50 +02:00
m=nO; n=nO*nV*nO; k=nV;
A=d_t1; lda=nO;
B=d_X_vovo; ldb=nV;
C=d_Y_oovo; ldc=nO;
2023-08-03 01:21:16 +02:00
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-08-03 02:53:50 +02:00
cudaFree(d_X_vovo);
2023-08-03 01:21:16 +02:00
alpha = 1.0;
beta = 0.0;
2023-08-03 02:53:50 +02:00
m=nO*nO*nV; n=nV; k=nO;
A=d_Y_oovo; lda=nO*nO*nV;
2023-08-03 01:21:16 +02:00
B=d_t1; ldb=nO;
2023-08-03 02:53:50 +02:00
C=d_X_oovv; ldc=nO*nO*nV;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-08-03 01:21:16 +02:00
2023-08-03 02:53:50 +02:00
cudaFree(d_Y_oovo);
2023-08-03 01:21:16 +02:00
alpha = 1.0;
2023-08-03 02:53:50 +02:00
beta = -1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 01:21:16 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t j=0 ; j<nV ; ++j) {
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
A = &(d_r2[nO*nO*(i+nV*j)]); lda = nO;
B = &(d_X_oovv[nO*nO*(i+nV*j)]); ldb = nO;
C = &(d_r2[nO*nO*(i+nV*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-03 01:21:16 +02:00
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[i]);
A = &(d_r2[nO*nO*(i+nV*j)]); lda = nO;
B = &(d_X_oovv[nO*nO*(j+nV*i)]); ldb = nO;
C = &(d_r2[nO*nO*(i+nV*j)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-03 01:21:16 +02:00
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 01:21:16 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-08-03 02:53:50 +02:00
cudaFree(d_X_oovv);
2023-08-03 01:21:16 +02:00
}
2023-08-02 18:31:44 +02:00
2023-08-03 02:53:50 +02:00
#pragma omp section
{
alpha = 1.0;
beta = 1.0;
A = d_r2; lda = nO*nO;
B = d_cc_space_v_oovv; ldb = nO*nO;
C = d_r2; ldc = nO*nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO*nO, nV*nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-07-16 21:18:57 +02:00
2023-08-03 02:53:50 +02:00
}
2023-08-03 02:12:15 +02:00
#pragma omp section
{
double* d_J1;
lda = nO*nV;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_J1, nO*nV*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:12:15 +02:00
cublasSetMatrix(lda, nV*nO, sizeof(double), J1, lda, d_J1, lda);
double* d_X_ovvo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_ovvo, nO*nV*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:12:15 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:12:15 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0;
beta = -0.5;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 02:12:15 +02:00
cublasSetStream(handle, stream[b]);
A = &(d_J1[nO*nV*(b+nV*i)]); lda = nO;
B = &(d_K1[nO*nV*(i+nO*b)]); ldb = nO;
C = &(d_X_ovvo[nO*(b+nV*nV*i)]); ldc = nO*nV;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:12:15 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
cudaFree(d_J1);
double* d_Y_voov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Y_voov, nV*nO*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:12:15 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:12:15 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 2.0;
beta = -1.0;
2023-08-21 12:34:55 +02:00
for (size_t v=0 ; v<nO ; ++v) {
for (size_t g=0 ; g<nV ; ++g) {
2023-08-03 02:12:15 +02:00
cublasSetStream(handle, stream[g]);
A = &(d_t2[nO*(v+nO*nV*g)]); lda = nO*nO;
B = &(d_t2[nO*(v+nO*g)]); ldb = nO*nO*nV;
C = &(d_Y_voov[nV*nO*(v+nO*g)]); ldc = nV;
cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, nV, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:12:15 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
double* d_Z_ovov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Z_ovov, nO*nV*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:12:15 +02:00
alpha = 1.0;
beta = 0.0;
m=nO*nV; n=nO*nV; k=nV*nO;
A=d_X_ovvo; lda=nO*nV;
B=d_Y_voov; ldb=nV*nO;
C=d_Z_ovov; ldc=nO*nV;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_X_ovvo);
cudaFree(d_Y_voov);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:12:15 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0;
beta = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t b=0 ; b<nV ; ++b) {
for (size_t g=0 ; g<nV ; ++g) {
2023-08-03 02:12:15 +02:00
cublasSetStream(handle, stream[g]);
A = &(d_r2[nO*nO*(b+nV*g)]); lda = nO;
B = &(d_Z_ovov[nO*(b+nV*nO*g)]); ldb = nO*nV;
C = &(d_r2[nO*nO*(b+nV*g)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
2023-08-21 12:34:55 +02:00
for (size_t g=0 ; g<nV ; ++g) {
2023-08-03 02:12:15 +02:00
cublasSetStream(handle, stream[g]);
A = &(d_r2[nO*nO*(b+nV*g)]); lda = nO;
B = &(d_Z_ovov[nO*(g+nV*nO*b)]); ldb = nO*nV;
C = &(d_r2[nO*nO*(b+nV*g)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:12:15 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
cudaFree(d_Z_ovov);
}
2023-08-03 02:53:50 +02:00
#pragma omp section
{
double* d_X_ovov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_ovov, nO*nV*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
double* d_Y_ovov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Y_ovov, nO*nV*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 0.5;
beta = 0.0;
2023-08-21 12:34:55 +02:00
for (size_t a=0 ; a<nV ; ++a) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[b]);
A = &(d_K1[nO*(a+nV*nO*b)]); lda = nO*nV;
B = &(d_K1[nO*(a+nV*nO*b)]); ldb = nO*nV;
C = &(d_X_ovov[nO*(a+nV*nO*b)]); ldc = nO*nV;
cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
alpha = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t v=0 ; v<nO ; ++v) {
for (size_t g=0 ; g<nV ; ++g) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[g]);
A = &(d_t2[nO*(v+nO*g)]); lda = nO*nO*nV;
B = &(d_t2[nO*(v+nO*g)]); ldb = nO*nO*nV;
C = &(d_Y_ovov[nO*nV*(v+nO*g)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
double* d_Z_ovov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Z_ovov, nO*nV*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 02:53:50 +02:00
alpha = 1.0;
beta = 0.0;
m=nO*nV; n=nO*nV; k=nO*nV;
A=d_X_ovov; lda=nO*nV;
B=d_Y_ovov; ldb=nO*nV;
C=d_Z_ovov; ldc=nO*nV;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_X_ovov);
cudaFree(d_Y_ovov);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0;
beta = -1.0;
2023-08-21 12:34:55 +02:00
for (size_t b=0 ; b<nV ; ++b) {
for (size_t g=0 ; g<nV ; ++g) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[g]);
A = &(d_r2[nO*nO*(b+nV*g)]); lda = nO;
B = &(d_Z_ovov[nO*(b+nV*nO*g)]); ldb = nO*nV;
C = &(d_r2[nO*nO*(b+nV*g)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
2023-08-21 12:34:55 +02:00
for (size_t g=0 ; g<nV ; ++g) {
2023-08-03 02:53:50 +02:00
cublasSetStream(handle, stream[g]);
A = &(d_r2[nO*nO*(b+nV*g)]); lda = nO;
B = &(d_Z_ovov[nO*(g+nV*nO*b)]); ldb = nO*nV;
C = &(d_r2[nO*nO*(b+nV*g)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 02:53:50 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
cudaFree(d_Z_ovov);
}
2023-08-03 02:12:15 +02:00
2023-08-03 02:53:50 +02:00
#pragma omp section
{
2023-08-03 03:21:32 +02:00
double* d_X_ovov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_ovov, nO*nV*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 03:21:32 +02:00
double* d_Y_ovov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Y_ovov, nO*nV*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 03:21:32 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 03:21:32 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0;
beta = 0.0;
2023-08-21 12:34:55 +02:00
for (size_t a=0 ; a<nV ; ++a) {
for (size_t g=0 ; g<nV ; ++g) {
2023-08-03 03:21:32 +02:00
cublasSetStream(handle, stream[g]);
A = &(d_K1[nO*(a+nV*nO*g)]); lda = nO*nV;
B = &(d_K1[nO*(a+nV*nO*g)]); ldb = nO*nV;
C = &(d_X_ovov[nO*(g+nV*nO*a)]); ldc = nO*nV;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
alpha = 1.0;
2023-08-21 12:34:55 +02:00
for (size_t v=0 ; v<nO ; ++v) {
for (size_t b=0 ; b<nV ; ++b) {
2023-08-03 03:21:32 +02:00
cublasSetStream(handle, stream[b]);
A = &(d_t2[nO*(v+nO*b)]); lda = nO*nO*nV;
B = &(d_t2[nO*(v+nO*b)]); ldb = nO*nO*nV;
C = &(d_Y_ovov[nO*nV*(v+nO*b)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 03:21:32 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
double* d_Z_ovov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_Z_ovov, nO*nV*nO*nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-03 03:21:32 +02:00
alpha = 1.0;
beta = 0.0;
m=nO*nV; n=nO*nV; k=nO*nV;
A=d_X_ovov; lda=nO*nV;
B=d_Y_ovov; ldb=nO*nV;
C=d_Z_ovov; ldc=nO*nV;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_X_ovov);
cudaFree(d_Y_ovov);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 03:21:32 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = 1.0;
beta = -1.0;
2023-08-21 12:34:55 +02:00
for (size_t b=0 ; b<nV ; ++b) {
for (size_t g=0 ; g<nV ; ++g) {
2023-08-03 03:21:32 +02:00
cublasSetStream(handle, stream[g]);
A = &(d_r2[nO*nO*(b+nV*g)]); lda = nO;
B = &(d_Z_ovov[nO*(g+nV*nO*b)]); ldb = nO*nV;
C = &(d_r2[nO*nO*(b+nV*g)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
2023-08-21 12:34:55 +02:00
for (size_t g=0 ; g<nV ; ++g) {
2023-08-03 03:21:32 +02:00
cublasSetStream(handle, stream[g]);
A = &(d_r2[nO*nO*(b+nV*g)]); lda = nO;
B = &(d_Z_ovov[nO*(b+nV*nO*g)]); ldb = nO*nV;
C = &(d_r2[nO*nO*(b+nV*g)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-03 03:21:32 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
cudaFree(d_Z_ovov);
2023-08-03 02:53:50 +02:00
}
2023-08-03 02:12:15 +02:00
} // end sections
2023-08-03 03:21:32 +02:00
cudaFree(d_K1);
2023-08-03 01:21:16 +02:00
2023-08-03 17:29:57 +02:00
double* d_tmp_cc;
2023-07-16 21:18:57 +02:00
lda = cholesky_mo_num * nV;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tmp_cc, lda * nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-16 21:18:57 +02:00
alpha=1.0; beta=0.0;
m=cholesky_mo_num*nV; n=nV; k=nO;
A = d_cc_space_v_vo_chol; B = d_t1; C = d_tmp_cc;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, m, B, k, &beta, C, m);
double* d_tmp_cc2;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_tmp_cc2, cholesky_mo_num*nV*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-16 21:18:57 +02:00
double* d_B1;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void**)&d_B1, nV*nV*BLOCK_SIZE*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-16 21:18:57 +02:00
double* d_tmpB1;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void**)&d_tmpB1, nV*BLOCK_SIZE*nV*sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-07-16 21:18:57 +02:00
2023-07-16 17:27:14 +02:00
#pragma omp for
2023-08-21 12:34:55 +02:00
for (size_t gam=0 ; gam<nV ; ++gam)
2023-07-16 17:27:14 +02:00
{
double* d_tmp_cc_ = &(d_tmp_cc[gam*nV*cholesky_mo_num]);
double* d_cc_space_v_vv_chol_ = &(d_cc_space_v_vv_chol[gam*nV*cholesky_mo_num]);
alpha = 1.0;
beta = -1.0;
A = d_cc_space_v_vv_chol_; lda = cholesky_mo_num;
B = d_tmp_cc_; ldb = cholesky_mo_num;
C = d_tmp_cc2 ; ldc = cholesky_mo_num;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, cholesky_mo_num, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
2023-08-21 12:34:55 +02:00
for (size_t iblock=0 ; iblock<nV ; iblock += BLOCK_SIZE)
2023-07-16 17:27:14 +02:00
{
2023-08-21 12:34:55 +02:00
const size_t mbs = BLOCK_SIZE < nV-iblock ? BLOCK_SIZE : nV-iblock;
2023-07-16 17:27:14 +02:00
alpha=-1.0; beta=0.0;
m=nV*mbs; n=nV; k=cholesky_mo_num;
A=&(d_tmp_cc[iblock*cholesky_mo_num*nV]); lda=cholesky_mo_num;
B=d_cc_space_v_vv_chol_; ldb=cholesky_mo_num;
C=d_tmpB1 ; ldc=nV*BLOCK_SIZE;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, lda, &beta, C, ldc);
alpha=1.0; beta=1.0;
m=nV*mbs; n=nV; k=cholesky_mo_num;
A=&(d_cc_space_v_vv_chol[iblock*cholesky_mo_num*nV]); lda=cholesky_mo_num;
B=d_tmp_cc2; ldb=cholesky_mo_num;
C=d_tmpB1 ; ldc=nV*BLOCK_SIZE;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, lda, &beta, C, ldc);
for (size_t bet=iblock ; bet<(nV < iblock+BLOCK_SIZE ? nV : iblock+BLOCK_SIZE) ; ++bet)
{
alpha = 1.0;
beta = 0.0;
A = &(d_tmpB1[nV*(bet-iblock)]); lda = nV*BLOCK_SIZE;
B = d_tmpB1; ldb = nV;
C = &(d_B1[nV*nV*(bet-iblock)]) ; ldc = nV;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, nV, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
alpha=1.0; beta=1.0;
m=nO*nO; n=mbs; k=nV*nV;
A=d_tau; lda=nO*nO;
B=d_B1 ; ldb=nV*nV;
C=&(d_r2[nO*nO*(iblock + nV*gam)]); ldc=nO*nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
}
2023-08-04 14:07:49 +02:00
}
cudaFree(d_tmpB1);
cudaFree(d_B1);
cudaFree(d_tmp_cc2);
cudaFree(d_tmp_cc);
2023-08-04 12:09:07 +02:00
2023-08-04 14:07:49 +02:00
double * r2_tmp = malloc(nO*nO*nV*nV*sizeof(double));
lda=nO*nO;
cublasGetMatrix(nO*nO, nV*nV, sizeof(double), d_r2, lda, r2_tmp, lda);
#pragma omp critical
{
for (size_t i=0 ; i<(size_t) nO*nO*nV*nV ; ++i) {
r2[i] -= r2_tmp[i];
}
}
free(r2_tmp);
2023-08-04 12:09:07 +02:00
2023-08-04 14:07:49 +02:00
cudaFree(d_r2);
2023-08-04 12:09:07 +02:00
2023-08-04 14:07:49 +02:00
cublasDestroy(handle);
}
free(K1);
free(J1);
2023-08-04 12:09:07 +02:00
2023-08-04 14:07:49 +02:00
*max_r2 = 0.;
for (size_t i=0 ; i<(size_t) nO*nO*nV*nV ; ++i) {
const double x = r2[i] > 0. ? r2[i] : -r2[i];
*max_r2 = *max_r2 > x ? *max_r2 : x;
}
2023-08-04 12:09:07 +02:00
2023-08-04 14:07:49 +02:00
}
2023-08-04 12:09:07 +02:00
2023-08-04 14:07:49 +02:00
void compute_r1_space_chol_gpu(gpu_data* data, int nO, int nV, double* t1, double* r1, double* max_r1)
{
2023-08-21 12:34:55 +02:00
const size_t cholesky_mo_num = data->cholesky_mo_num;
2023-08-04 12:09:07 +02:00
2023-08-04 14:07:49 +02:00
int ngpus = 1;
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
2023-08-04 12:09:07 +02:00
2023-08-04 14:07:49 +02:00
#pragma omp parallel num_threads(ngpus)
{
2023-08-21 12:34:55 +02:00
cudaError_t cudaStat;
size_t m,n,k, lda, ldb, ldc;
2023-08-04 14:07:49 +02:00
double alpha, beta;
double* A;
double* B;
double* C;
cudaStream_t stream[nV];
2023-08-04 12:09:07 +02:00
2023-08-21 12:34:55 +02:00
size_t igpu = omp_get_thread_num();
2023-08-04 14:07:49 +02:00
cudaSetDevice(igpu);
2023-08-04 12:09:07 +02:00
2023-08-04 14:07:49 +02:00
cublasHandle_t handle;
cublasCreate(&handle);
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
double* d_r1;
lda = nO ;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_r1, lda * nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-04 14:07:49 +02:00
cudaMemset(d_r1, 0, nO*nV*sizeof(double));
memset(r1, 0, nO*nV*sizeof(double));
2023-08-04 15:49:48 +02:00
double* d_cc_space_v_vo_chol = data[igpu].cc_space_v_vo_chol;
double* d_cc_space_v_vv_chol = data[igpu].cc_space_v_vv_chol;
double* d_cc_space_v_oovo = data[igpu].cc_space_v_oovo;
double* d_cc_space_v_ovov = data[igpu].cc_space_v_ovov;
double* d_cc_space_v_voov = data[igpu].cc_space_v_voov;
2023-08-04 14:07:49 +02:00
double* d_cc_space_f_ov = data[igpu].cc_space_f_ov;
double* d_cc_space_f_vo = data[igpu].cc_space_f_vo;
2023-08-04 15:49:48 +02:00
double* d_tau = data[igpu].tau;
2023-08-04 14:07:49 +02:00
double* d_t1 = data[igpu].t1;
double* d_t2 = data[igpu].t2;
double* d_H_oo = data[igpu].H_oo;
double* d_H_vo = data[igpu].H_vo;
double* d_H_vv = data[igpu].H_vv;
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
#pragma omp sections
{
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
#pragma omp section
{
cublasDcopy(handle, nO*nV, d_cc_space_f_ov, 1, d_r1, 1);
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
double* d_X_oo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_oo, nO*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
alpha = -2.0;
beta = 0.0;
m=nO; n=nO; k=nV;
A=d_t1; lda=nO;
B=d_cc_space_f_vo; ldb=nV;
C=d_X_oo; ldc=nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
alpha = 1.0;
beta = 1.0;
m=nO; n=nV; k=nO;
A=d_X_oo; lda=nO;
B=d_t1; ldb=nO;
C=d_r1; ldc=nO;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
cudaFree(d_X_oo);
}
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
#pragma omp section
{
alpha = 1.0;
beta = 1.0;
m=nO; n=nV; k=nV;
A=d_t1; lda=nO;
B=d_H_vv; ldb=nV;
C=d_r1; ldc=nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
}
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
#pragma omp section
{
alpha = -1.0;
beta = 1.0;
m=nO; n=nV; k=nO;
A=d_H_oo; lda=nO;
B=d_t1; ldb=nO;
C=d_r1; ldc=nO;
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
}
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
#pragma omp section
{
double* d_X_voov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_voov, nV* nO* nO* nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-04 12:33:52 +02:00
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 14:07:49 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = -1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
for (size_t bet=0 ; bet<nV ; ++bet) {
2023-08-04 14:07:49 +02:00
cublasSetStream(handle, stream[bet]);
beta = t1[i+bet*nO];
A = &(d_t2[nO*(i+nO*nV*bet)]); lda = nO*nO;
B = &(d_t1[0]); ldb = nO;
C = &(d_X_voov[nV*(i+nO*nO*bet)]); ldc = nV*nO;
cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, nV, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
cudaDeviceSynchronize();
alpha = 1.0;
beta = 2.0;
2023-08-21 12:34:55 +02:00
for (size_t bet=0 ; bet<nV ; ++bet) {
2023-08-04 14:07:49 +02:00
cublasSetStream(handle, stream[bet]);
A = &(d_X_voov[nV*nO*nO*bet]); lda = nV;
B = &(d_t2[nO*nO*nV*bet]); ldb = nO*nO;
C = A ; ldc = lda;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nV, nO*nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 14:07:49 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
alpha = 1.0;
beta = 1.0;
m=nV*nO; n=nO*nV;
A=d_X_voov; lda=nV * nO;
B=d_H_vo; ldb=1;
C=d_r1; ldc=1;
cublasDgemv(handle, CUBLAS_OP_T, m, n, &alpha, A, lda, B, ldb, &beta, C, ldc);
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
cudaFree(d_X_voov);
}
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
#pragma omp section
{
2023-08-04 15:49:48 +02:00
double* d_X_ovov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_X_ovov, nO* nV* nO* nV * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-04 15:49:48 +02:00
cublasDcopy(handle, nO*nV*nO*nV, d_cc_space_v_ovov, 1, d_X_ovov, 1);
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 15:49:48 +02:00
cudaStreamCreate(&(stream[i]));
}
alpha = -1.0;
beta = 2.0;
2023-08-21 12:34:55 +02:00
for (size_t u=0 ; u<nO ; ++u) {
for (size_t bet=0 ; bet<nV ; ++bet) {
2023-08-04 15:49:48 +02:00
cublasSetStream(handle, stream[bet]);
A = &(d_X_ovov[nO*nV*(u+nO*bet)]); lda = nO;
B = &(d_cc_space_v_voov[(nV*(u+nO*nO*bet))]); ldb = nV*nO;
C = A ; ldc = lda;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 15:49:48 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
alpha = 1.0;
beta = 1.0;
m=nO*nV; n=nO*nV;
A=d_X_ovov; lda=nO * nV;
B=d_t1; ldb=1;
C=d_r1; ldc=1;
cublasDgemv(handle, CUBLAS_OP_T, m, n, &alpha, A, lda, B, ldb, &beta, C, ldc);
cudaFree(d_X_ovov);
}
#pragma omp section
{
double* d_T_vvoo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_T_vvoo, nV*nV*nO*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-04 15:49:48 +02:00
alpha = 0.0;
beta = 1.0;
A = d_T_vvoo; lda = nV*nV;
B = d_tau; ldb = nO*nO;
C = A ; ldc = lda;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nV*nV, nO*nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
double* d_W_vvov;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_W_vvov, nV*nV*nO*BLOCK_SIZE * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-04 15:49:48 +02:00
double* d_W_vvov_tmp;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_W_vvov_tmp, nV*nO*nV*BLOCK_SIZE * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-04 15:49:48 +02:00
2023-08-21 12:34:55 +02:00
for (size_t iblock=0 ; iblock<nV ; iblock += BLOCK_SIZE) {
const size_t mbs = BLOCK_SIZE < nV-iblock ? BLOCK_SIZE : nV-iblock;
2023-08-04 15:49:48 +02:00
alpha = 1.0;
beta = 0.0;
m=nV*nO; n=nV*mbs; k=cholesky_mo_num;
A=d_cc_space_v_vo_chol; lda=cholesky_mo_num;
B=&(d_cc_space_v_vv_chol[cholesky_mo_num*nV*iblock]); ldb=cholesky_mo_num;
C=d_W_vvov_tmp; ldc=nV*nO;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
alpha = 2.0;
beta = -1.0;
2023-08-21 12:34:55 +02:00
size_t kk=0;
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 15:49:48 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nO ; ++i) {
for (size_t bet=0 ; bet<mbs ; ++bet) {
2023-08-04 15:49:48 +02:00
cublasSetStream(handle, stream[kk]);
++kk;
if (kk >= nV) kk = 0;
A = &(d_W_vvov_tmp[nV*(i+nO*nV*bet)]); lda = nV*nO;
B = &(d_W_vvov_tmp[nV*(i+nO*nV*bet)]); ldb = nV*nO;
C = &(d_W_vvov[nV*nV*(i+nO*bet)]); ldc = nV;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nV, nV, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 15:49:48 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
alpha = 1.0;
beta = 1.0;
m=nO; n=mbs; k=nO*nV*nV;
A=d_T_vvoo; lda=nV*nV*nO;
B=d_W_vvov; ldb=nO*nV*nV;
C=&(d_r1[nO*iblock]); ldc=nO;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
}
cudaFree(d_W_vvov);
cudaFree(d_W_vvov_tmp);
cudaFree(d_T_vvoo);
2023-08-04 14:07:49 +02:00
}
2023-08-04 12:33:52 +02:00
2023-08-04 15:49:48 +02:00
#pragma omp section
{
double* d_W_oovo;
2023-12-06 10:42:52 +01:00
cudaStat = gpu_malloc((void **)&d_W_oovo, nO*nO*nV*nO * sizeof(double));
2023-08-21 12:34:55 +02:00
assert (cudaStat == cudaSuccess);
2023-08-04 15:49:48 +02:00
alpha = 2.0;
beta = -1.0;
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 15:49:48 +02:00
cudaStreamCreate(&(stream[i]));
}
2023-08-21 12:34:55 +02:00
for (size_t u=0 ; u<nO ; ++u) {
for (size_t a=0 ; a<nV ; ++a) {
2023-08-04 15:49:48 +02:00
cublasSetStream(handle, stream[a]);
A = &(d_cc_space_v_oovo[nO*nO*(a+nV*u)]); lda = nO;
B = &(d_cc_space_v_oovo[nO*nO*(a+nV*u)]); ldb = nO;
C = &(d_W_oovo[nO*nO*(a+nV*u)]); ldc = nO;
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_T, nO, nO, &alpha, A, lda, &beta, B, ldb, C, ldc);
}
}
2023-08-21 12:34:55 +02:00
for (size_t i=0 ; i<nV ; ++i) {
2023-08-04 15:49:48 +02:00
cudaStreamDestroy(stream[i]);
}
cublasSetStream(handle, NULL);
alpha = -1.0;
beta = 1.0;
m=nO; n=nV; k=nO*nO*nV;
A=d_W_oovo; lda=nO * nO * nV;
B=d_tau; ldb=nO * nO * nV;
C=d_r1; ldc=nO;
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
2024-01-30 09:17:51 +01:00
cudaFree(d_W_oovo);
2023-08-04 15:49:48 +02:00
}
2023-08-04 12:33:52 +02:00
}
2023-08-04 14:07:49 +02:00
double * r1_tmp = malloc(nO*nV*sizeof(double));
lda=nO;
cublasGetMatrix(nO, nV, sizeof(double), d_r1, lda, r1_tmp, lda);
#pragma omp critical
{
for (size_t i=0 ; i<(size_t) nO*nV ; ++i) {
2023-08-04 15:49:48 +02:00
r1[i] -= r1_tmp[i];
2023-08-04 14:07:49 +02:00
}
}
free(r1_tmp);
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
cudaFree(d_r1);
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
cublasDestroy(handle);
}
2023-08-04 12:33:52 +02:00
2023-08-04 14:07:49 +02:00
*max_r1 = 0.;
for (size_t i=0 ; i<(size_t) nO*nV ; ++i) {
const double x = r1[i] > 0. ? r1[i] : -r1[i];
*max_r1 = *max_r1 > x ? *max_r1 : x;
}
2023-08-04 12:33:52 +02:00
}
2023-08-05 00:50:58 +02:00
double ccsd_energy_space_gpu(gpu_data* data)
{
double result = 0.0;
2023-08-21 12:34:55 +02:00
const size_t nO = data->nO;
const size_t nV = data->nV;
2023-08-05 00:50:58 +02:00
int ngpus = 1;
if (MULTIGPU == 1) cudaGetDeviceCount(&ngpus);
#pragma omp parallel num_threads(ngpus)
{
2023-08-21 12:34:55 +02:00
cudaError_t cudaStat;
size_t igpu = omp_get_thread_num();
2023-08-05 00:50:58 +02:00
cudaSetDevice(igpu);
cublasHandle_t handle;
cublasCreate(&handle);
double result_local = 0.0;
#pragma omp sections
{
#pragma omp section
{
double* d_cc_space_f_ov = data[igpu].cc_space_f_ov;
double* d_t1 = data[igpu].t1;
double x;
cublasDdot(handle, nO*nV, d_cc_space_f_ov, 1, d_t1, 1, &x);
result_local += 2.0*x;
}
#pragma omp section
{
double* d_tau_x = data[igpu].tau_x;
double* d_cc_space_v_oovv = data[igpu].cc_space_v_oovv;
double x;
cublasDdot(handle, nO*nO*nV*nV, d_tau_x, 1, d_cc_space_v_oovv, 1, &x);
result_local += x;
}
}
cublasDestroy(handle);
#pragma omp critical
{
result += result_local;
}
}
return result;
}