Sherman-Morrison-Woodbury
Table of Contents
- 1. Headers
- 2. Naïve Sherman-Morrison
- 3. Sherman-Morrison with Slagel Splitting (core)
- 4. Sherman-Morrison with Slagel Splitting
- 5. End of files
1 Headers
#include "qmckl.h" #include "assert.h" #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <math.h> int main() { qmckl_context context; context = qmckl_context_create(); qmckl_exit_code rc;
This is the range that determines the how many high performance kernel instantces will be generated, using the C-function templates defined in the sections below. If the name of the C-function template is called qmckl_kernel_{Dim}
, then range(K, L+1)
will results in kernel instances from qmckl_kernel_K
to qmckl_kernel_L
.
2 Naïve Sherman-Morrison
2.1 qmckl_sm_naive
2.1.1 Introduction
This is the simplest of the available Sherman-Morrison-Woodbury kernels. It applies rank-1 updates one by one in the order that is given. It only checks if the denominator in the Sherman-Morrison formula is not too close to zero when an update is evaluated. It will exit with an error code of the denominator is too close to zero.
#+TODO Change the math notation so that the update vectors appear as row in the math so that it is consistent with the representation in C (memory)
The formula for any update \(u_j\) (index \(j\) is suppresed for clarity) that is applied is \[ (S + uv^T)^{-1} = S^{-1} - \frac{S^{-1} uv^T S^{-1}}{1 + v^T S^{-1} u} \]
where \(S\) is the Slater-matrix, \(u\) and \(v^T\) are the column and row vectors containing the updates, \(S^{-1}\) is the inverse of the Slater-matrix.
Even though the Slater-matrix \(S\) with all updates applied at once is invertable, during the course of applying updates to the inverse Slater-matrix \(S^{-1}\) one-by-one it can happen that one of the intermediate inverse matrices \(S^{-1}\) becomes singular. Therefore a global threshold value \(\epsilon\) is defined that is used to evaluate each individual update \(u_j\) when it is applied.
This value sets the lower bound for which the denominator \(1+v_j^TS^{-1}u_j\) is considered to be too small and will most probably result in a singular matrix \(S\), or at least in an inverse of \(S\) of very poor numerical quality. Therefore, when \(1+v_j^TS^{-1}u_j \geq \epsilon\), the update is applied as usual and the kernel exits with return code \texttt{QMCKL_SUCCESS}. If \(1+v_j^TS^{-1}u_j \leq \epsilon\) the update is rejected and the kernel exits with return code \texttt{QMCKL_FAILURE}.
If the determinant of the Slater-matrix is passed, it will be updated to the determinant resulting from applying the updates to the original matrix.
2.1.2 API
Variable | Type | In/Out | Description |
---|---|---|---|
context |
qmckl_context |
in | Global state |
LDS |
uint64_t |
in | Leading dimension of Slaterinv |
Dim |
uint64_t |
in | Dimension of Slaterinv |
N_updates |
uint64_t |
in | Number of rank-1 updates to be applied to Slaterinv |
Updates |
double[N_updates*LDS] |
in | Array containing the updates |
Updates_index |
uint64_t[N_updates] |
in | Array containing the rank-1 updates |
breakdown |
double |
in | Break-down parameter on which to fail or not |
Slater_inv |
double[Dim*LDS] |
inout | Array containing the inverse of a Slater-matrix |
determinant |
double |
inout | Determinant of the Slater-matrix |
2.1.3 Requirements
context
is notQMCKL_NULL_CONTEXT
LDS >= 2
Dim >= 2
N_updates >= 1
Updates
is allocated with \(N_updates \times Dim\) elementsUpdates_index
is allocated with \(N_updates\) elementsbreakdown
is a small number such that \(0 < breakdown << 1\)Slater_inv
is allocated with \(Dim \times Dim\) elementsdeterminant > 0
2.1.4 Pedagogical kernel source (in Fortran)
The following source code written in Fortran is inteded to illustrate how the kernel works. Even though the kernel is able to do numerically correct computations, it does not do it in the most efficient way possible. It should therefore not be used in real workloads.
integer function qmckl_sm_naive_doc_f(context, & lds, dim, & nupdates, & upds, & updates_index, & breakdown, & s_inv, & determinant) result(info) use qmckl implicit none integer*8 , intent(in) :: context integer*8 , intent(in) :: lds, dim integer*8 , intent(in) :: nupdates integer*8 , intent(in) :: updates_index(nupdates) real*8 , intent(in) :: upds(nupdates * lds) real*8 , intent(in) :: breakdown real*8 , intent(inout) :: s_inv(dim * lds) real*8 , intent(inout) :: determinant real*8 , dimension(lds, nupdates) :: Updates real*8 , dimension(dim, lds) :: Inverse real*8 , dimension(dim) :: C real*8 , dimension(lds) :: D real*8 :: denominator, idenominator, update integer*8 :: i, j, l, row info = QMCKL_FAILURE if (context == QMCKL_NULL_CONTEXT) then info = QMCKL_INVALID_CONTEXT return endif ! Convert 'upds' and 's_inv' into the more easily readable Fortran ! matrices 'Updates' and 'Inverse'. call convert(upds, s_inv, Updates, Inverse, nupdates, lds, dim) l = 1; ! For each update do... do while (l < nupdates + 1) ! Compute C = S^{-1}U(l) do i = 1, dim C(i) = 0 do j = 1, dim C(i) = C(i) + Inverse(i, j) * Updates(j, l) end do end do ! Compute denominator = 1 + V(l)^TC row = updates_index(l) denominator = 1 + C(row) ! Return early if denominator is too small if (abs(denominator) < breakdown) return idenominator = 1 / denominator ! Update det(S) determinant = determinant * denominator ! selecting column: v_l^T * S_inv D = Inverse(row, :) ! A^{-1} = A^{-1} - C x D / denominator do i = 1, dim do j = 1, dim update = C(i) * D(j) * idenominator Inverse(i, j) = Inverse(i, j) - update end do end do l = l + 1 end do ! Copy updated inverse back to s_inv call copy_back_inv(Inverse, s_inv, lds, dim) info = QMCKL_SUCCESS end function qmckl_sm_naive_doc_f
2.1.4.1 C interface to the pedagogical kernel (not directly exposed)
The following Fortran function qmckl_sm_naive_doc
makes sure
that the pedagogical kernel qmckl_sm_naive_doc_f
, written in
Fortran, can be called from C using the ISO_C_BINDING
. The Fortran function qmckl_sm_naive_doc
will be exposed in the header file 'qmckl.h'
for C users and in the module file 'qmcklf.F90' for Fortran users.
2.1.5 C headers (exposed in qmckl.h)
qmckl_exit_code qmckl_sm_naive ( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* determinant );
qmckl_exit_code qmckl_sm_naive_hpc ( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* determinant );
qmckl_exit_code qmckl_sm_naive_doc ( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* determinant );
2.1.6 C sources
Common includes and macros used by all the Sherman-Morrison-Woodbury kernels.
#include <stdbool.h> #include <math.h> #include "qmckl.h" #include "config.h" #include "assert.h" #include "stdio.h" // Order important because // __GNUC__ also set in ICC, ICX and CLANG // __clang__ also set in ICX #if defined(__INTEL_COMPILER) #define IVDEP _Pragma("ivdep") #define ALIGNED _Pragma("vector aligned") #elif defined(__INTEL_LLVM_COMPILER) #define IVDEP _Pragma("ivdep") #define ALIGNED _Pragma("vector aligned") #elif defined(__clang__) #define IVDEP _Pragma("clang loop vectorize(enable)") #define ALIGNED #elif defined(__GNUC__) #define IVDEP _Pragma("GCC ivdep") #define ALIGNED #endif
qmckl_sm_naive_hpc
is a high performance variation of
qmckl_sm_naive
written in C. It is used in cases when Dim
is
smaller than the leading dimension LDS
, irrespective of whetether LDS
includes zero padding to benefit from SIMD instructions or not. Cases like this
include situations where one wants to apply updates to a square submatrix of the
full matrix.
It takes advantage of memory aligned data and assumes no data dependencies
inside the loops. The loops are fully vectorised whenever Dim
is an integer
multiple of SIMD_LENGTH
.
qmckl_exit_code qmckl_sm_naive_hpc( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_hpc", NULL); } double __attribute__((aligned(8))) C[Dim]; double __attribute__((aligned(8))) D[LDS]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x u_l for (uint64_t i = 0; i < Dim; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < Dim; j++) { C[i] += Slater_inv[i * LDS + j] * Updates[l * LDS + j]; } } // Denominator: v_l^T * C const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) return QMCKL_FAILURE; double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < Dim; j++) { D[j] = Slater_inv[cui * LDS + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < Dim; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < Dim; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * LDS + j] -= update; } } l += 1; } return QMCKL_SUCCESS; }
qmckl_exit_code qmckl_sm_naive_{Dim}
is a C function-template that is used to genereate instances of C fucntions based on the range given above. The advantage of this method is that for each of these instances all the dimensions and loop-bounds are known at compile time, allowing the compiler to optimize more aggressively.
static inline qmckl_exit_code qmckl_sm_naive_{Dim}( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_{Dim}", NULL); } #define D{Dim}_P ((1+({Dim}-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[{Dim}]; double __attribute__((aligned(8))) D[D{Dim}_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < {Dim}; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { C[i] += Slater_inv[i * D{Dim}_P + j] * Updates[l * D{Dim}_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { D[j] = Slater_inv[cui * D{Dim}_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < {Dim}; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D{Dim}_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; }
This is the kernel generator written in Python. It uses the kernel generator range and templates defined above to generate the C kernel instances.
text=""" static inline qmckl_exit_code qmckl_sm_naive_{Dim}( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_{Dim}", NULL); } #define D{Dim}_P ((1+({Dim}-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[{Dim}]; double __attribute__((aligned(8))) D[D{Dim}_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < {Dim}; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { C[i] += Slater_inv[i * D{Dim}_P + j] * Updates[l * D{Dim}_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { D[j] = Slater_inv[cui * D{Dim}_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < {Dim}; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D{Dim}_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } """ result = [] for Dim in range(2, 22): Dim=str(Dim) result.append(text.replace("{Dim}",Dim)) return ''.join(result)
Python script that generated C switch cases that call individual kernel instances.
text=""" case {Dim}: return qmckl_sm_naive_{Dim}(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant);""" result = [] for Dim in range(2, 22): Dim=str(Dim) result.append(text.replace("{Dim}",Dim)) return ''.join(result)
static inline qmckl_exit_code qmckl_sm_naive_2( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_2", NULL); } #define D2_P ((1+(2-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[2]; double __attribute__((aligned(8))) D[D2_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 2; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D2_P; j++) { C[i] += Slater_inv[i * D2_P + j] * Updates[l * D2_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D2_P; j++) { D[j] = Slater_inv[cui * D2_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 2; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D2_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D2_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_3( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_3", NULL); } #define D3_P ((1+(3-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[3]; double __attribute__((aligned(8))) D[D3_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 3; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D3_P; j++) { C[i] += Slater_inv[i * D3_P + j] * Updates[l * D3_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D3_P; j++) { D[j] = Slater_inv[cui * D3_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 3; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D3_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D3_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_4( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_4", NULL); } #define D4_P ((1+(4-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[4]; double __attribute__((aligned(8))) D[D4_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 4; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D4_P; j++) { C[i] += Slater_inv[i * D4_P + j] * Updates[l * D4_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D4_P; j++) { D[j] = Slater_inv[cui * D4_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 4; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D4_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D4_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_5( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_5", NULL); } #define D5_P ((1+(5-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[5]; double __attribute__((aligned(8))) D[D5_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 5; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D5_P; j++) { C[i] += Slater_inv[i * D5_P + j] * Updates[l * D5_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D5_P; j++) { D[j] = Slater_inv[cui * D5_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 5; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D5_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D5_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_6( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_6", NULL); } #define D6_P ((1+(6-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[6]; double __attribute__((aligned(8))) D[D6_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 6; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D6_P; j++) { C[i] += Slater_inv[i * D6_P + j] * Updates[l * D6_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D6_P; j++) { D[j] = Slater_inv[cui * D6_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 6; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D6_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D6_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_7( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_7", NULL); } #define D7_P ((1+(7-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[7]; double __attribute__((aligned(8))) D[D7_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 7; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D7_P; j++) { C[i] += Slater_inv[i * D7_P + j] * Updates[l * D7_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D7_P; j++) { D[j] = Slater_inv[cui * D7_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 7; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D7_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D7_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_8( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_8", NULL); } #define D8_P ((1+(8-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[8]; double __attribute__((aligned(8))) D[D8_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 8; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D8_P; j++) { C[i] += Slater_inv[i * D8_P + j] * Updates[l * D8_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D8_P; j++) { D[j] = Slater_inv[cui * D8_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 8; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D8_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D8_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_9( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_9", NULL); } #define D9_P ((1+(9-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[9]; double __attribute__((aligned(8))) D[D9_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 9; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D9_P; j++) { C[i] += Slater_inv[i * D9_P + j] * Updates[l * D9_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D9_P; j++) { D[j] = Slater_inv[cui * D9_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 9; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D9_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D9_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_10( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_10", NULL); } #define D10_P ((1+(10-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[10]; double __attribute__((aligned(8))) D[D10_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 10; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D10_P; j++) { C[i] += Slater_inv[i * D10_P + j] * Updates[l * D10_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D10_P; j++) { D[j] = Slater_inv[cui * D10_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 10; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D10_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D10_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_11( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_11", NULL); } #define D11_P ((1+(11-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[11]; double __attribute__((aligned(8))) D[D11_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 11; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D11_P; j++) { C[i] += Slater_inv[i * D11_P + j] * Updates[l * D11_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D11_P; j++) { D[j] = Slater_inv[cui * D11_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 11; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D11_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D11_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_12( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_12", NULL); } #define D12_P ((1+(12-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[12]; double __attribute__((aligned(8))) D[D12_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 12; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D12_P; j++) { C[i] += Slater_inv[i * D12_P + j] * Updates[l * D12_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D12_P; j++) { D[j] = Slater_inv[cui * D12_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 12; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D12_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D12_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_13( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_13", NULL); } #define D13_P ((1+(13-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[13]; double __attribute__((aligned(8))) D[D13_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 13; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D13_P; j++) { C[i] += Slater_inv[i * D13_P + j] * Updates[l * D13_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D13_P; j++) { D[j] = Slater_inv[cui * D13_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 13; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D13_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D13_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_14( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_14", NULL); } #define D14_P ((1+(14-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[14]; double __attribute__((aligned(8))) D[D14_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 14; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D14_P; j++) { C[i] += Slater_inv[i * D14_P + j] * Updates[l * D14_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D14_P; j++) { D[j] = Slater_inv[cui * D14_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 14; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D14_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D14_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_15( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_15", NULL); } #define D15_P ((1+(15-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[15]; double __attribute__((aligned(8))) D[D15_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 15; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D15_P; j++) { C[i] += Slater_inv[i * D15_P + j] * Updates[l * D15_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D15_P; j++) { D[j] = Slater_inv[cui * D15_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 15; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D15_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D15_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_16( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_16", NULL); } #define D16_P ((1+(16-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[16]; double __attribute__((aligned(8))) D[D16_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 16; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D16_P; j++) { C[i] += Slater_inv[i * D16_P + j] * Updates[l * D16_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D16_P; j++) { D[j] = Slater_inv[cui * D16_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 16; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D16_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D16_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_17( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_17", NULL); } #define D17_P ((1+(17-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[17]; double __attribute__((aligned(8))) D[D17_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 17; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D17_P; j++) { C[i] += Slater_inv[i * D17_P + j] * Updates[l * D17_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D17_P; j++) { D[j] = Slater_inv[cui * D17_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 17; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D17_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D17_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_18( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_18", NULL); } #define D18_P ((1+(18-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[18]; double __attribute__((aligned(8))) D[D18_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 18; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D18_P; j++) { C[i] += Slater_inv[i * D18_P + j] * Updates[l * D18_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D18_P; j++) { D[j] = Slater_inv[cui * D18_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 18; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D18_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D18_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_19( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_19", NULL); } #define D19_P ((1+(19-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[19]; double __attribute__((aligned(8))) D[D19_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 19; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D19_P; j++) { C[i] += Slater_inv[i * D19_P + j] * Updates[l * D19_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D19_P; j++) { D[j] = Slater_inv[cui * D19_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 19; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D19_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D19_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_20( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_20", NULL); } #define D20_P ((1+(20-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[20]; double __attribute__((aligned(8))) D[D20_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 20; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D20_P; j++) { C[i] += Slater_inv[i * D20_P + j] * Updates[l * D20_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D20_P; j++) { D[j] = Slater_inv[cui * D20_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 20; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D20_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D20_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_naive_21( const qmckl_context context, const uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith(context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive_21", NULL); } #define D21_P ((1+(21-1)/SIMD_LENGTH)*SIMD_LENGTH) double __attribute__((aligned(8))) C[21]; double __attribute__((aligned(8))) D[D21_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = A^{-1} x U_l for (uint64_t i = 0; i < 21; i++) { C[i] = 0; IVDEP ALIGNED for (uint64_t j = 0; j < D21_P; j++) { C[i] += Slater_inv[i * D21_P + j] * Updates[l * D21_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { return QMCKL_FAILURE; } double iden = 1.0f / den; // Update det(A) if (determinant) *determinant *= den; // selecting column: D = v_l^T * S_inv IVDEP ALIGNED for (uint64_t j = 0; j < D21_P; j++) { D[j] = Slater_inv[cui * D21_P + j]; } // A^{-1} = A^{-1} - C x D / den for (uint64_t i = 0; i < 21; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D21_P; j++) { double update = C[i] * D[j] * iden; Slater_inv[i * D21_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; }
qmckl_sm_naive
is a generic function that contains decision making logic that calls the proper kernel based on the used library configuration (--enable-doc
and --enable-hpc
) and the passed array dimensions LDS
and Dim
.
qmckl_exit_code qmckl_sm_naive(const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_naive", NULL); } #ifdef HAVE_HPC if (LDS == (1+(Dim-1)/SIMD_LENGTH)*SIMD_LENGTH) { // Most cases switch (Dim) { case 2: return qmckl_sm_naive_2(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 3: return qmckl_sm_naive_3(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 4: return qmckl_sm_naive_4(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 5: return qmckl_sm_naive_5(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 6: return qmckl_sm_naive_6(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 7: return qmckl_sm_naive_7(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 8: return qmckl_sm_naive_8(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 9: return qmckl_sm_naive_9(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 10: return qmckl_sm_naive_10(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 11: return qmckl_sm_naive_11(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 12: return qmckl_sm_naive_12(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 13: return qmckl_sm_naive_13(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 14: return qmckl_sm_naive_14(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 15: return qmckl_sm_naive_15(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 16: return qmckl_sm_naive_16(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 17: return qmckl_sm_naive_17(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 18: return qmckl_sm_naive_18(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 19: return qmckl_sm_naive_19(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 20: return qmckl_sm_naive_20(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); case 21: return qmckl_sm_naive_21(context, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); } } else { // Updating smaller sub-matrix return qmckl_sm_naive_hpc( context, LDS, Dim, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); } #else return qmckl_sm_naive_doc( context, LDS, Dim, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); #endif return QMCKL_FAILURE; }
2.1.7 Fortran interfaces (exposed in qmcklf.F90)
2.1.8 Performance
This function performs best when there is only 1 rank-1 update in the update cycle. It is not useful to use Sherman-Morrison with update splitting for these cycles since splitting can never resolve a situation where applying the update causes singular behaviour.
2.1.9 Tests
The tests for the kernels are executed on datasets that are extracted from a run of QMC=Chem on Benzene (21 spin-up/21 spin down electrons) using 329 unique alpha determinants. The tests are run such that the kernels reject the computed inverse whenever the computed intermediate determinants or denominators are smaller than 1e-3. This is the default value in QMC=Chem. The tests will return QMCKLSUCCESS whenever all the elements of the final matrix \(R=S.S^-1 - 1\) are smaller than the given tolerance value of 1e-3, and will return QMCKLFAILURE if the values are larger than this tolerance value.
const uint64_t Dim = 21; const uint64_t LDS = (1+(Dim-1)/SIMD_LENGTH)*SIMD_LENGTH; const double breakdown = 1e-3; const double tolerance = 1e-3; double res[441]; #include "sm_test.h" assert(Updates1 != NULL); assert(Updates_index1 != NULL); assert(Slater_inv1 != NULL); // original determinant of Slater1 (before applying updates) double det = 3.407025646103221e-10; rc = qmckl_sm_naive(context, LDS, Dim, N_updates1, Updates1, Updates_index1, breakdown, Slater_inv1, &det); // Check that the determinant is updated properly assert(fabs(det + 4.120398385068217e-10) < 1e-15); for (unsigned int i = 0; i < Dim; i++) { for (unsigned int j = 0; j < Dim; j++) { res[i * Dim + j] = 0; for (unsigned int k = 0; k < Dim; k++) { res[i * Dim + j] += Slater1[i * Dim + k] * Slater_inv1[k * LDS + j]; } } } rc = QMCKL_SUCCESS; for (unsigned int i = 0; i < Dim; i++) { for (unsigned int j = 0; j < Dim; j++) { if (i == j && fabs(res[i * Dim + j] - 1) > tolerance) { rc = QMCKL_FAILURE; } if (i != j && fabs(res[i * Dim + j]) > tolerance) { rc = QMCKL_FAILURE; } } } assert(rc == QMCKL_SUCCESS);
3 Sherman-Morrison with Slagel Splitting (core)
3.1 qmckl_sm_splitting_core
3.1.1 Introduction
qmckl_sm_splitting_core
is the inner core part of 'Sherman-Morrison with update splitting' in the next section.
It is not normally used by itself but it is possible to use it nonetheless.
It has three extra parameters in its API:
later_updates
initially empty array that will contain the second halves of updates that were split during kernel executionlater_index
initially empty array that will contain the row/column numbers of the updates that were split during executionlater
initially zero integer that records the number of updates that were split during exection.
It is up to the user to decide what to do with these updates once the kernel returns. Normally qmckl_sm_splitting_core
is
used as the core part of a recursive function, as is done in qmckl_sm_splitting
or as part of a more complex
kernel like qmckl_sherman_morrison_smw32s
.
If the determinant is passed it will only be partially updated if there were any update splits.
3.1.2 API
Variable | Type | In/Out | Description |
---|---|---|---|
context |
qmckl_context |
in | Global state |
LDS |
uint64_t |
in | Leading dimension of Slaterinv |
Dim |
uint64_t |
in | Dimension of Slaterinv |
N_updates |
uint64_t |
in | Number of rank-1 updates to be applied to Slaterinv |
Updates |
double[LDS*N_updates] |
in | Array containing the rank-1 updates |
Updates_index |
uint64_t[N_updates] |
in | Array containing positions of the rank-1 updates |
breakdown |
double |
in | Break-down parameter on which to fail or not |
Slater_inv |
double[Dim*LDS] |
inout | Array containing the inverse Slater-matrix |
later_updates |
double[LDS*N_updates] |
inout | Array containing the split updates for later |
later_index |
uint64_t[N_updates] |
inout | Array containing the positions of the split updates for later |
later |
uint64_t |
inout | Number of split updates for later |
determinant |
double |
inout | Determinant of the Slater-matrix |
3.1.3 Requirements
LDS >= 2
Dim >= 2
N_updates >= 1
Updates
is allocated with \(N_updates \times Dim\) elementsUpdates_index
is allocated with \(N_updates\) elementsbreakdown
is a small number such that \(0 < breakdown << 1\)Slater_inv
is allocated with \(Dim \times Dim\) elementslater_updates
is allocated with \(later \times Dim\) elementslater_index
is allocated with \(N_updates\) elementslater >= 0
3.1.4 Pedagogical kernel source (in Fortran)
The following source code written in Fortran is inteded to illustrate how the kernel works. Even though the kernel is able to do numerically correct computations, it does not do it in the most efficient way possible. It should therefore not be used in real workloads.
integer function qmckl_sm_splitting_core_doc_f( & context, & lds, dim, & nupdates, & upds, & updates_index, & breakdown, & s_inv, & later_upds, & Later_index, & Later, & determinant) result(info) use qmckl implicit none integer*8 , intent(in) :: context integer*8 , intent(in) :: lds, dim integer*8 , intent(in) :: nupdates integer*8 , intent(in) :: updates_index(nupdates) real*8 , intent(in) :: upds(lds * nupdates) real*8 , intent(in) :: breakdown real*8 , intent(inout) :: s_inv(dim * lds) real*8 , intent(inout) :: determinant integer*8 , intent(inout) :: Later integer*8 , intent(inout) :: Later_index(nupdates) real*8 , intent(inout) :: later_upds(lds * nupdates) real*8 , dimension(lds, nupdates) :: Updates real*8 , dimension(lds, nupdates) :: Later_updates real*8 , dimension(dim, lds) :: Inverse real*8 , dimension(dim) :: C real*8 , dimension(lds) :: D real*8 :: denominator, idenominator, update integer*8 :: i, j, l, row write(*,*) "Entering 'qmckl_sm_splittinig_core_doc_f'" info = QMCKL_FAILURE if (context == QMCKL_NULL_CONTEXT) then info = QMCKL_INVALID_CONTEXT return endif ! Convert 'upds' and 's_inv' into the more easily readable Fortran ! matrices 'Updates' and 'Inverse'. call convert(upds, s_inv, Updates, Inverse, nupdates, lds, dim) l = 1; ! For each update do... do while (l < nupdates + 1) ! Compute C = S^{-1}U(l) do i = 1, dim C(i) = 0 do j = 1, dim C(i) = C(i) + Inverse(i, j) * Updates(j, l) end do end do ! Compute denominator = 1 + V(l)^TC row = updates_index(l) denominator = 1 + C(row) ! If denominator is too close to zero: ! - Split update in 2 before storing in Later_updates ! - Split previously computed vector C in 2 ! - Recompute the denominator if (abs(denominator) < breakdown) then do i = 1, dim Later_updates(i, l) = Updates(i, l) / 2 C(i) = C(i) / 2 end do Later_index(Later + 1) = updates_index(l) Later = Later + 1 denominator = 1 + C(row) end if idenominator = 1 / denominator ! Update det(S) determinant = determinant * denominator ! selecting column: v_l^T * S_inv D = Inverse(row, :) ! A^{-1} = A^{-1} - C x D / denominator do i = 1, dim do j = 1, dim update = C(i) * D(j) * idenominator Inverse(i, j) = Inverse(i, j) - update end do end do l = l + 1 end do ! Copy updated inverse and later updates ! back to s_inv and later_upds call copy_back_inv(Inverse, s_inv, lds, dim) call copy_back_lu(Later_Updates, later_upds, lds, nupdates) info = QMCKL_SUCCESS write(*,*) "Leaving 'qmckl_sm_splittinig_core_doc_f'" end function qmckl_sm_splitting_core_doc_f
3.1.4.1 C interface to the pedagogical kernel (not directly exposed)
The function qmckl_sm_splitting_core_doc
makes sure that
qmckl_sm_splitting_core_doc_f
can be called from C using the
ISO_C_BINDING
. Function qmckl_sm_splitting_core_doc
will be
exposed in qmckl.h
and qmckl_f.F90
, but
qmckl_sm_splitting_core_doc_f
will not.
3.1.5 C headers (exposed in qmckl.h)
qmckl_exit_code qmckl_sm_splitting_core ( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* later_updates, uint64_t* later_index, uint64_t* later, double* determinant );
qmckl_exit_code qmckl_sm_splitting_core_hpc ( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* later_updates, uint64_t* later_index, uint64_t* later, double* determinant );
qmckl_exit_code qmckl_sm_splitting_core_doc ( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* later_updates, uint64_t* later_index, uint64_t* later, double* determinant );
3.1.6 C sources
qmckl_exit_code qmckl_sm_splitting_core_hpc( const qmckl_context context, uint64_t LDS, uint64_t Dim, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_hpc", NULL); } double __attribute__((aligned(8))) C[LDS]; double __attribute__((aligned(8))) D[LDS]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < Dim; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < LDS; j++) { C[i] += Slater_inv[i * LDS + j] * Updates[l * LDS + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < LDS; i++) { later_updates[*later * LDS + i] = Updates[l * LDS + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x LDS IVDEP ALIGNED for (uint64_t j = 0; j < LDS; j++) { D[j] = Slater_inv[cui * LDS + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < Dim; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < LDS; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * LDS + j] -= update; } } l += 1; } return QMCKL_SUCCESS; }
static inline qmckl_exit_code qmckl_sm_splitting_core_{Dim}( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_{Dim}", NULL); } double __attribute__((aligned(8))) C[D{Dim}_P]; double __attribute__((aligned(8))) D[D{Dim}_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < {Dim}; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { C[i] += Slater_inv[i * D{Dim}_P + j] * Updates[l * D{Dim}_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D{Dim}_P; i++) { later_updates[*later * D{Dim}_P + i] = Updates[l * D{Dim}_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D{Dim}_P IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { D[j] = Slater_inv[cui * D{Dim}_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < {Dim}; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D{Dim}_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; }
text=""" static inline qmckl_exit_code qmckl_sm_splitting_core_{Dim}( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_{Dim}", NULL); } double __attribute__((aligned(8))) C[D{Dim}_P]; double __attribute__((aligned(8))) D[D{Dim}_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < {Dim}; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { C[i] += Slater_inv[i * D{Dim}_P + j] * Updates[l * D{Dim}_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D{Dim}_P; i++) { later_updates[*later * D{Dim}_P + i] = Updates[l * D{Dim}_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D{Dim}_P IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { D[j] = Slater_inv[cui * D{Dim}_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < {Dim}; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D{Dim}_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D{Dim}_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } """ result = [] for Dim in range(2, 22): Dim=str(Dim) result.append(text.replace("{Dim}",Dim) ) return ''.join(result)
text=""" case {Dim}: { return qmckl_sm_splitting_core_{Dim}( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; }""" result = [] for Dim in range(2, 22): Dim=str(Dim) result.append(text.replace("{Dim}",Dim) ) return ''.join(result)
static inline qmckl_exit_code qmckl_sm_splitting_core_2( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_2", NULL); } double __attribute__((aligned(8))) C[D2_P]; double __attribute__((aligned(8))) D[D2_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 2; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D2_P; j++) { C[i] += Slater_inv[i * D2_P + j] * Updates[l * D2_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D2_P; i++) { later_updates[*later * D2_P + i] = Updates[l * D2_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D2_P IVDEP ALIGNED for (uint64_t j = 0; j < D2_P; j++) { D[j] = Slater_inv[cui * D2_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 2; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D2_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D2_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_3( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_3", NULL); } double __attribute__((aligned(8))) C[D3_P]; double __attribute__((aligned(8))) D[D3_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 3; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D3_P; j++) { C[i] += Slater_inv[i * D3_P + j] * Updates[l * D3_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D3_P; i++) { later_updates[*later * D3_P + i] = Updates[l * D3_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D3_P IVDEP ALIGNED for (uint64_t j = 0; j < D3_P; j++) { D[j] = Slater_inv[cui * D3_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 3; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D3_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D3_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_4( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_4", NULL); } double __attribute__((aligned(8))) C[D4_P]; double __attribute__((aligned(8))) D[D4_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 4; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D4_P; j++) { C[i] += Slater_inv[i * D4_P + j] * Updates[l * D4_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D4_P; i++) { later_updates[*later * D4_P + i] = Updates[l * D4_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D4_P IVDEP ALIGNED for (uint64_t j = 0; j < D4_P; j++) { D[j] = Slater_inv[cui * D4_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 4; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D4_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D4_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_5( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_5", NULL); } double __attribute__((aligned(8))) C[D5_P]; double __attribute__((aligned(8))) D[D5_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 5; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D5_P; j++) { C[i] += Slater_inv[i * D5_P + j] * Updates[l * D5_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D5_P; i++) { later_updates[*later * D5_P + i] = Updates[l * D5_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D5_P IVDEP ALIGNED for (uint64_t j = 0; j < D5_P; j++) { D[j] = Slater_inv[cui * D5_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 5; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D5_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D5_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_6( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_6", NULL); } double __attribute__((aligned(8))) C[D6_P]; double __attribute__((aligned(8))) D[D6_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 6; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D6_P; j++) { C[i] += Slater_inv[i * D6_P + j] * Updates[l * D6_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D6_P; i++) { later_updates[*later * D6_P + i] = Updates[l * D6_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D6_P IVDEP ALIGNED for (uint64_t j = 0; j < D6_P; j++) { D[j] = Slater_inv[cui * D6_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 6; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D6_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D6_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_7( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_7", NULL); } double __attribute__((aligned(8))) C[D7_P]; double __attribute__((aligned(8))) D[D7_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 7; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D7_P; j++) { C[i] += Slater_inv[i * D7_P + j] * Updates[l * D7_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D7_P; i++) { later_updates[*later * D7_P + i] = Updates[l * D7_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D7_P IVDEP ALIGNED for (uint64_t j = 0; j < D7_P; j++) { D[j] = Slater_inv[cui * D7_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 7; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D7_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D7_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_8( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_8", NULL); } double __attribute__((aligned(8))) C[D8_P]; double __attribute__((aligned(8))) D[D8_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 8; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D8_P; j++) { C[i] += Slater_inv[i * D8_P + j] * Updates[l * D8_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D8_P; i++) { later_updates[*later * D8_P + i] = Updates[l * D8_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D8_P IVDEP ALIGNED for (uint64_t j = 0; j < D8_P; j++) { D[j] = Slater_inv[cui * D8_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 8; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D8_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D8_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_9( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_9", NULL); } double __attribute__((aligned(8))) C[D9_P]; double __attribute__((aligned(8))) D[D9_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 9; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D9_P; j++) { C[i] += Slater_inv[i * D9_P + j] * Updates[l * D9_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D9_P; i++) { later_updates[*later * D9_P + i] = Updates[l * D9_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D9_P IVDEP ALIGNED for (uint64_t j = 0; j < D9_P; j++) { D[j] = Slater_inv[cui * D9_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 9; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D9_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D9_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_10( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_10", NULL); } double __attribute__((aligned(8))) C[D10_P]; double __attribute__((aligned(8))) D[D10_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 10; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D10_P; j++) { C[i] += Slater_inv[i * D10_P + j] * Updates[l * D10_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D10_P; i++) { later_updates[*later * D10_P + i] = Updates[l * D10_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D10_P IVDEP ALIGNED for (uint64_t j = 0; j < D10_P; j++) { D[j] = Slater_inv[cui * D10_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 10; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D10_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D10_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_11( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_11", NULL); } double __attribute__((aligned(8))) C[D11_P]; double __attribute__((aligned(8))) D[D11_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 11; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D11_P; j++) { C[i] += Slater_inv[i * D11_P + j] * Updates[l * D11_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D11_P; i++) { later_updates[*later * D11_P + i] = Updates[l * D11_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D11_P IVDEP ALIGNED for (uint64_t j = 0; j < D11_P; j++) { D[j] = Slater_inv[cui * D11_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 11; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D11_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D11_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_12( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_12", NULL); } double __attribute__((aligned(8))) C[D12_P]; double __attribute__((aligned(8))) D[D12_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 12; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D12_P; j++) { C[i] += Slater_inv[i * D12_P + j] * Updates[l * D12_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D12_P; i++) { later_updates[*later * D12_P + i] = Updates[l * D12_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D12_P IVDEP ALIGNED for (uint64_t j = 0; j < D12_P; j++) { D[j] = Slater_inv[cui * D12_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 12; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D12_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D12_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_13( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_13", NULL); } double __attribute__((aligned(8))) C[D13_P]; double __attribute__((aligned(8))) D[D13_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 13; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D13_P; j++) { C[i] += Slater_inv[i * D13_P + j] * Updates[l * D13_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D13_P; i++) { later_updates[*later * D13_P + i] = Updates[l * D13_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D13_P IVDEP ALIGNED for (uint64_t j = 0; j < D13_P; j++) { D[j] = Slater_inv[cui * D13_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 13; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D13_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D13_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_14( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_14", NULL); } double __attribute__((aligned(8))) C[D14_P]; double __attribute__((aligned(8))) D[D14_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 14; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D14_P; j++) { C[i] += Slater_inv[i * D14_P + j] * Updates[l * D14_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D14_P; i++) { later_updates[*later * D14_P + i] = Updates[l * D14_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D14_P IVDEP ALIGNED for (uint64_t j = 0; j < D14_P; j++) { D[j] = Slater_inv[cui * D14_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 14; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D14_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D14_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_15( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_15", NULL); } double __attribute__((aligned(8))) C[D15_P]; double __attribute__((aligned(8))) D[D15_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 15; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D15_P; j++) { C[i] += Slater_inv[i * D15_P + j] * Updates[l * D15_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D15_P; i++) { later_updates[*later * D15_P + i] = Updates[l * D15_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D15_P IVDEP ALIGNED for (uint64_t j = 0; j < D15_P; j++) { D[j] = Slater_inv[cui * D15_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 15; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D15_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D15_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_16( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_16", NULL); } double __attribute__((aligned(8))) C[D16_P]; double __attribute__((aligned(8))) D[D16_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 16; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D16_P; j++) { C[i] += Slater_inv[i * D16_P + j] * Updates[l * D16_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D16_P; i++) { later_updates[*later * D16_P + i] = Updates[l * D16_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D16_P IVDEP ALIGNED for (uint64_t j = 0; j < D16_P; j++) { D[j] = Slater_inv[cui * D16_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 16; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D16_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D16_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_17( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_17", NULL); } double __attribute__((aligned(8))) C[D17_P]; double __attribute__((aligned(8))) D[D17_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 17; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D17_P; j++) { C[i] += Slater_inv[i * D17_P + j] * Updates[l * D17_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D17_P; i++) { later_updates[*later * D17_P + i] = Updates[l * D17_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D17_P IVDEP ALIGNED for (uint64_t j = 0; j < D17_P; j++) { D[j] = Slater_inv[cui * D17_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 17; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D17_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D17_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_18( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_18", NULL); } double __attribute__((aligned(8))) C[D18_P]; double __attribute__((aligned(8))) D[D18_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 18; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D18_P; j++) { C[i] += Slater_inv[i * D18_P + j] * Updates[l * D18_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D18_P; i++) { later_updates[*later * D18_P + i] = Updates[l * D18_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D18_P IVDEP ALIGNED for (uint64_t j = 0; j < D18_P; j++) { D[j] = Slater_inv[cui * D18_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 18; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D18_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D18_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_19( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_19", NULL); } double __attribute__((aligned(8))) C[D19_P]; double __attribute__((aligned(8))) D[D19_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 19; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D19_P; j++) { C[i] += Slater_inv[i * D19_P + j] * Updates[l * D19_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D19_P; i++) { later_updates[*later * D19_P + i] = Updates[l * D19_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D19_P IVDEP ALIGNED for (uint64_t j = 0; j < D19_P; j++) { D[j] = Slater_inv[cui * D19_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 19; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D19_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D19_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_20( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_20", NULL); } double __attribute__((aligned(8))) C[D20_P]; double __attribute__((aligned(8))) D[D20_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 20; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D20_P; j++) { C[i] += Slater_inv[i * D20_P + j] * Updates[l * D20_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D20_P; i++) { later_updates[*later * D20_P + i] = Updates[l * D20_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D20_P IVDEP ALIGNED for (uint64_t j = 0; j < D20_P; j++) { D[j] = Slater_inv[cui * D20_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 20; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D20_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D20_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; } static inline qmckl_exit_code qmckl_sm_splitting_core_21( const qmckl_context context, uint64_t N_updates, const double* __restrict Updates, const uint64_t* __restrict Updates_index, const double breakdown, double* __restrict Slater_inv, double* __restrict later_updates, uint64_t* __restrict later_index, uint64_t* __restrict later, double* __restrict determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_core_21", NULL); } double __attribute__((aligned(8))) C[D21_P]; double __attribute__((aligned(8))) D[D21_P]; uint64_t l = 0; // For each update while (l < N_updates) { // C = S^{-1} x U_l for (uint64_t i = 0; i < 21; i++) { C[i] = 0.0f; IVDEP ALIGNED for (uint64_t j = 0; j < D21_P; j++) { C[i] += Slater_inv[i * D21_P + j] * Updates[l * D21_P + j]; } } // Denominator const int cui = Updates_index[l] - 1; double den = 1.0f + C[cui]; if (fabs(den) < breakdown) { // U_l = U_l / 2: split the update in 2 equal halves and save the // second halve in later_updates IVDEP ALIGNED for (uint64_t i = 0; i < D21_P; i++) { later_updates[*later * D21_P + i] = Updates[l * D21_P + i] * 0.5f; C[i] *= 0.5f; } later_index[*later] = Updates_index[l]; (*later)++; den = 1.0f + C[cui]; } // From here onwards we continue with applying the first halve of the // update to Slater_inv double iden = 1.0f / den; if (determinant) *determinant *= den; // D = v^T x S^{-1} : 1 x D21_P IVDEP ALIGNED for (uint64_t j = 0; j < D21_P; j++) { D[j] = Slater_inv[cui * D21_P + j]; } // S^{-1} = S^{-1} - C x D / den for (uint64_t i = 0; i < 21; i++) { IVDEP ALIGNED for (uint64_t j = 0; j < D21_P; j++) { const double update = C[i] * D[j] * iden; Slater_inv[i * D21_P + j] -= update; } } l += 1; } return QMCKL_SUCCESS; }
qmckl_exit_code qmckl_sm_splitting_core( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* later_updates, uint64_t* later_index, uint64_t* later, double* determinant) { #ifdef HAVE_HPC if (LDS == (1+(Dim-1)/SIMD_LENGTH)*SIMD_LENGTH) { // Most cases switch (Dim) { case 2: { return qmckl_sm_splitting_core_2( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 3: { return qmckl_sm_splitting_core_3( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 4: { return qmckl_sm_splitting_core_4( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 5: { return qmckl_sm_splitting_core_5( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 6: { return qmckl_sm_splitting_core_6( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 7: { return qmckl_sm_splitting_core_7( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 8: { return qmckl_sm_splitting_core_8( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 9: { return qmckl_sm_splitting_core_9( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 10: { return qmckl_sm_splitting_core_10( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 11: { return qmckl_sm_splitting_core_11( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 12: { return qmckl_sm_splitting_core_12( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 13: { return qmckl_sm_splitting_core_13( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 14: { return qmckl_sm_splitting_core_14( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 15: { return qmckl_sm_splitting_core_15( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 16: { return qmckl_sm_splitting_core_16( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 17: { return qmckl_sm_splitting_core_17( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 18: { return qmckl_sm_splitting_core_18( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 19: { return qmckl_sm_splitting_core_19( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 20: { return qmckl_sm_splitting_core_20( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } case 21: { return qmckl_sm_splitting_core_21( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); break; } default: { assert(0 == 1 && "TEMPLATE NOT IMPLEMENTED!"); break; } } } else { // Updating smaller sub-matrix return qmckl_sm_splitting_core_hpc( context, LDS, Dim, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); } #else return qmckl_sm_splitting_core_doc( context, LDS, Dim, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, later, determinant); #endif return QMCKL_FAILURE; }
3.1.7 Fortran interfaces (exposed in qmcklf.F90)
3.1.8 Performance
This function cannot be used by itself and is used in Sherman-Morrison with update splitting and Woodbury 3x3 and 2x2 with Sherman-Morrison and update splitting. Please look at the performance reccomendations for those two kernels.
4 Sherman-Morrison with Slagel Splitting
4.1 qmckl_sm_splitting
4.1.1 Introduction
This is a variation on the 'Naive' Sherman-Morrison kernel. Whenever the denominator \(1+v_j^T S^{-1} u_j\) in the Sherman-Morrison formula is deemed to be too close to zero, the update \(u_j\) is split in half: \(u_j \rightarrow \frac{1}{2} u_j\). One half is applied immediately –necessarily increasing the value of the denominator because of the split– while the other halve is put in a queue that will be applied when all the remaining updates have been treated.
The kernel is executed recursively until the queue is eiter empty and all updates are applied successfully, or the size of the queue equals the number of initial updates. In the last case the Slater-matrix that would have resulted from applying the updates is singular and therefore the kernel exits with an exit code.
If the determinant of the Slater-matrix is passed, it will be updated to the determinant resulting from applying the updates to the original matrix.
4.1.2 API
Variable | Type | In/Out | Description |
---|---|---|---|
context | qmcklcontext | in | Global state |
LDS | uint64t | in | Leading dimension of Slaterinv |
Dim | uint64t | in | Dimension of Slaterinv |
Nupdates | uint64t | in | Number of rank-1 updates to be applied to Slaterinv |
Updates | double[Nupdates*LDS] | in | Array containing the updates |
Updatesindex | uint64t[Nupdates] | in | Array containing the rank-1 updates |
breakdown | double | in | Break-down parameter on which to fail or not |
Slaterinv | double[Dim*LDS] | inout | Array containing the inverse of a Slater-matrix |
determinant | double | inout | Determinant of the Slater-matrix |
4.1.3 Requirements
context
is notQMCKL_NULL_CONTEXT
LDS >= 2
Dim >= 2
N_updates >= 1
Updates
is allocated with \(N_updates \times Dim\) elementsUpdates_index
is allocated with \(N_updates\) elementsbreakdown
is a small number such that \(0 < breakdown << 1\)Slater_inv
is allocated with \(Dim \times Dim\) elements
4.1.4 Pedagogical kernel source (in Fortran)
The following source code written in Fortran is inteded to illustrate how the kernel works. Even though the kernel is able to do numerically correct computations, it does not do it in the most efficient way possible. It should therefore not be used in real workloads.
integer recursive function qmckl_sm_splitting_doc_f( & context, & lds, dim, & nupdates, & upds, & updates_index, & breakdown, & s_inv, & determinant) result(info) use qmckl implicit none integer*8 , intent(in) :: context integer*8 , intent(in) :: lds, dim integer*8 , intent(in) :: nupdates integer*8 , intent(in) :: updates_index(nupdates) real*8 , intent(in) :: upds(lds * nupdates) real*8 , intent(in) :: breakdown real*8 , intent(inout) :: s_inv(dim * lds) real*8 , intent(inout) :: determinant integer , external :: qmckl_sm_splitting_core_doc_f integer*8 :: Later integer*8 , dimension(nupdates) :: Later_index real*8 , dimension(lds * nupdates) :: Later_updates write(*,*) "Entering 'qmckl_sm_splitting_doc_f'" info = QMCKL_FAILURE if (context == QMCKL_NULL_CONTEXT) then info = QMCKL_INVALID_CONTEXT return endif Later = 0 Later_index = 0 Later_updates = 0 info = qmckl_sm_splitting_core_doc_f( & context, & lds, dim, & nupdates, & upds, & updates_index, & breakdown, & s_inv, & Later_updates, & Later_index, & Later, & determinant) if (Later > 0) then info = qmckl_sm_splitting_doc_f( & context, & lds, dim, & Later, & Later_updates, & Later_index, & breakdown, & s_inv, & determinant) end if info = QMCKL_SUCCESS write(*,*) "Leaving 'qmckl_sm_splitting_doc_f'" end function qmckl_sm_splitting_doc_f
4.1.4.1 C interface to the pedagogical kernel (not directly exposed)
The following Fortran function qmckl_sm_splitting_core_doc
makes sure
that the pedagogical kernel qmckl_sm_splitting_core_doc_f
, written in
Fortran, can be called from C using the ISO_C_BINDING
. The Fortran function
qmckl_sm_splitting_core_doc
will be exposed in the header file 'qmckl.h'
for C users and in the module file 'qmcklf.F90' for Fortran users.
4.1.5 C headers (exposed in qmckl.h)
qmckl_exit_code qmckl_sm_splitting ( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* determinant );
qmckl_exit_code qmckl_sm_splitting_hpc ( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* determinant );
qmckl_exit_code qmckl_sm_splitting_doc ( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* determinant );
4.1.6 C source
text=""" case {Dim}: { rc = qmckl_sm_splitting_core_{Dim}( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } """ result = [] for Dim in range(2, 22): Dim=str(Dim) result.append(text.replace("{Dim}",Dim) ) return '\n'.join(result)
qmckl_exit_code qmckl_sm_splitting_hpc( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* determinant) { if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting_hpc", NULL); } double __attribute__((aligned(8))) later_updates[LDS * N_updates]; uint64_t later_index[N_updates]; uint64_t later = 0; qmckl_exit_code rc; if (LDS == (1+(Dim-1)/SIMD_LENGTH)*SIMD_LENGTH) { switch (Dim) { case 2: { rc = qmckl_sm_splitting_core_2( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 3: { rc = qmckl_sm_splitting_core_3( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 4: { rc = qmckl_sm_splitting_core_4( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 5: { rc = qmckl_sm_splitting_core_5( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 6: { rc = qmckl_sm_splitting_core_6( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 7: { rc = qmckl_sm_splitting_core_7( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 8: { rc = qmckl_sm_splitting_core_8( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 9: { rc = qmckl_sm_splitting_core_9( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 10: { rc = qmckl_sm_splitting_core_10( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 11: { rc = qmckl_sm_splitting_core_11( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 12: { rc = qmckl_sm_splitting_core_12( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 13: { rc = qmckl_sm_splitting_core_13( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 14: { rc = qmckl_sm_splitting_core_14( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 15: { rc = qmckl_sm_splitting_core_15( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 16: { rc = qmckl_sm_splitting_core_16( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 17: { rc = qmckl_sm_splitting_core_17( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 18: { rc = qmckl_sm_splitting_core_18( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 19: { rc = qmckl_sm_splitting_core_19( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 20: { rc = qmckl_sm_splitting_core_20( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } case 21: { rc = qmckl_sm_splitting_core_21( context, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); break; } default: { assert(0 == 1 && "TEMPLATE NOT IMPLEMENTED!"); break; } } } else { rc = qmckl_sm_splitting_core_hpc( context, LDS, Dim, N_updates, Updates, Updates_index, breakdown, Slater_inv, later_updates, later_index, &later, determinant); } if (rc != QMCKL_SUCCESS) return QMCKL_FAILURE; if (later > 0) { qmckl_exit_code rc = qmckl_sm_splitting_hpc( context, LDS, Dim, later, later_updates, later_index, breakdown, Slater_inv, determinant); if (rc != QMCKL_SUCCESS) return QMCKL_FAILURE; } return QMCKL_SUCCESS; }
qmckl_exit_code qmckl_sm_splitting( const qmckl_context context, const uint64_t LDS, const uint64_t Dim, const uint64_t N_updates, const double* Updates, const uint64_t* Updates_index, const double breakdown, double* Slater_inv, double* determinant) { printf("Entering 'qmckl_sm_splitting'\n"); if (qmckl_context_check(context) == QMCKL_NULL_CONTEXT) { return qmckl_failwith( context, QMCKL_NULL_CONTEXT, "qmckl_sm_splitting", NULL); } #ifdef HAVE_HPC return qmckl_sm_splitting_hpc( context, LDS, Dim, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); #else return qmckl_sm_splitting_doc( context, LDS, Dim, N_updates, Updates, Updates_index, breakdown, Slater_inv, determinant); #endif printf("Leaving 'qmckl_sm_splitting'\n"); return QMCKL_SUCCESS; }
4.1.7 Fortran interfaces (exposed in qmcklf.F90)
4.1.8 Performance…
This kernel performs best when there are 2 or more rank-1 update cycles and fail-rate is high.
4.1.9 Test
assert(Updates3 != NULL); assert(Updates_index3 != NULL); assert(Slater_inv3_2 != NULL); det = -1.23743195512859e-09; rc = qmckl_sm_splitting(context, LDS, Dim, N_updates3, Updates3, Updates_index3, breakdown, Slater_inv3_2, &det); assert(fabs(det - 1.602708950725074e-10) < 1e-15); for (unsigned int i = 0; i < Dim; i++) { for (unsigned int j = 0; j < Dim; j++) { res[i * Dim + j] = 0; for (unsigned int k = 0; k < Dim; k++) { res[i * Dim + j] += Slater3[i * Dim + k] * Slater_inv3_2[k * LDS + j]; } } } rc = QMCKL_SUCCESS; for (unsigned int i = 0; i < Dim; i++) { for (unsigned int j = 0; j < Dim; j++) { if (i == j && fabs(res[i * Dim + j] - 1) > tolerance) { rc = QMCKL_FAILURE; } if (i != j && fabs(res[i * Dim + j]) > tolerance) { rc = QMCKL_FAILURE; } } } assert(rc == QMCKL_SUCCESS);
5 End of files
assert (qmckl_context_destroy(context) == QMCKL_SUCCESS); return 0; }