diff --git a/src/det.irp.f b/src/det.irp.f index 0982d07..85494f2 100644 --- a/src/det.irp.f +++ b/src/det.irp.f @@ -1114,6 +1114,9 @@ end endif !DIR$ FORCEINLINE call bitstring_to_list ( psi_det_alpha(1,det_i), mo_list_alpha_curr, l, N_int ) + if (l /= elec_alpha_num) then + stop 'error in number of alpha electrons' + endif END_PROVIDER @@ -1132,8 +1135,12 @@ END_PROVIDER else mo_list_beta_prev = 0 endif + !DIR$ FORCEINLINE call bitstring_to_list ( psi_det_beta(1,det_j), mo_list_beta_curr, l, N_int ) + if (l /= elec_beta_num) then + stop 'error in number of beta electrons' + endif END_PROVIDER BEGIN_PROVIDER [ double precision, det_alpha_value_curr ] diff --git a/src/mo.irp.f b/src/mo.irp.f index eb300af..4b9bba7 100644 --- a/src/mo.irp.f +++ b/src/mo.irp.f @@ -701,13 +701,13 @@ subroutine sparse_full_mv(A,LDA, & ! LDC and LDA have to be factors of simd_sp - IRP_IF NO_PREFETCH - IRP_ELSE - call MM_PREFETCH (A(j,indices(1)),3) - call MM_PREFETCH (A(j,indices(2)),3) - call MM_PREFETCH (A(j,indices(3)),3) - call MM_PREFETCH (A(j,indices(4)),3) - IRP_ENDIF +! IRP_IF NO_PREFETCH +! IRP_ELSE +! call MM_PREFETCH (A(1,indices(1)),3) +! call MM_PREFETCH (A(1,indices(2)),3) +! call MM_PREFETCH (A(1,indices(3)),3) +! call MM_PREFETCH (A(1,indices(4)),3) +! IRP_ENDIF !DIR$ SIMD do j=1,LDC @@ -757,13 +757,13 @@ subroutine sparse_full_mv(A,LDA, & !DIR$ VECTOR ALIGNED !DIR$ SIMD FIRSTPRIVATE(d11,d21,d31,d41) do j=1,$IRP_ALIGN/4 - IRP_IF NO_PREFETCH - IRP_ELSE - call MM_PREFETCH (A(j+k,indices(kao+4)),3) - call MM_PREFETCH (A(j+k,indices(kao+5)),3) - call MM_PREFETCH (A(j+k,indices(kao+6)),3) - call MM_PREFETCH (A(j+k,indices(kao+7)),3) - IRP_ENDIF +! IRP_IF NO_PREFETCH +! IRP_ELSE +! call MM_PREFETCH (A(j+k,indices(kao+4)),3) +! call MM_PREFETCH (A(j+k,indices(kao+5)),3) +! call MM_PREFETCH (A(j+k,indices(kao+6)),3) +! call MM_PREFETCH (A(j+k,indices(kao+7)),3) +! IRP_ENDIF C1(j+k) = C1(j+k) + A(j+k,k_vec(1))*d11 + A(j+k,k_vec(2))*d21& + A(j+k,k_vec(3))*d31 + A(j+k,k_vec(4))*d41 enddo