diff --git a/src/csf/obtain_I_foralpha.irp.f b/src/csf/obtain_I_foralpha.irp.f index 3a216066..5723c6fa 100644 --- a/src/csf/obtain_I_foralpha.irp.f +++ b/src/csf/obtain_I_foralpha.irp.f @@ -68,9 +68,9 @@ subroutine obtain_connected_I_foralpha(idxI, Ialpha, connectedI, idxs_connectedI !call debug_spindet(Jdomo,1) diffSOMO = IEOR(Isomo,Jsomo) ndiffSOMO = POPCNT(diffSOMO) - if(ndiffSOMO .NE. 2 .AND. ndiffSOMO .NE. 0) then - cycle - endif + !if(ndiffSOMO .NE. 2 .AND. ndiffSOMO .NE. 0) then + ! cycle + !endif diffDOMO = IEOR(Idomo,Jdomo) xordiffSOMODOMO = IEOR(diffSOMO,diffDOMO) ndiffDOMO = POPCNT(diffDOMO) diff --git a/src/csf/sigma_vector.irp.f b/src/csf/sigma_vector.irp.f index 1ba63c1d..078565ae 100644 --- a/src/csf/sigma_vector.irp.f +++ b/src/csf/sigma_vector.irp.f @@ -1101,7 +1101,7 @@ subroutine calculate_sigma_vector_cfg_nst_naive_store(psi_out, psi_in, n_st, sze ! Initialize the inegral container ! dims : (totcolsTKI, nconnectedI) allocate(GIJpqrs(totcolsTKI,nconnectedI)) ! gpqrs - allocate(TKIGIJ(rowsTKI,n_st,nconnectedI)) ! gpqrs + allocate(TKIGIJ(rowsTKI,n_st,nconnectedI)) ! TKI * gpqrs totcolsTKI = 0 do j = 1,nconnectedI @@ -1151,6 +1151,23 @@ subroutine calculate_sigma_vector_cfg_nst_naive_store(psi_out, psi_in, n_st, sze TKI, size(TKI,1)*n_st, GIJpqrs, size(GIJpqrs,1), 0.d0, & TKIGIJ , size(TKIGIJ,1)*n_st ) + !print *,"DIMs = ",rowsTKI,n_st,totcolsTKI,nconnectedI + !print *,"TKI mat" + !do kk=1,n_st + ! do j=1,totcolsTKI + ! print *,TKI(:,kk,j) + ! enddo + ! print *,"--" + !enddo + + !print *,"TKIGIJ mat" + !do kk=1,n_st + ! do j=1,nconnectedI + ! print *,TKIGIJ(:,kk,j) + ! enddo + ! print *,"--" + !enddo + ! Collect the result totcolsTKI = 0 @@ -1186,12 +1203,15 @@ subroutine calculate_sigma_vector_cfg_nst_naive_store(psi_out, psi_in, n_st, sze ! Add the diagonal contribution + do kk=1,n_st do i = 1,n_CSF - psi_out(i,1) += 1.0d0*diag_energies(i)*psi_in(i,1) + psi_out(i,kk) += 1.0d0*diag_energies(i)*psi_in(i,kk) + enddo enddo -end subroutine calculate_sigma_vector_cfg_nst +end subroutine calculate_sigma_vector_cfg_nst_naive_store + subroutine calculate_sigma_vector_cfg_nst(psi_out, psi_in, n_st, sze, istart, iend, ishift, istep) implicit none use bitmasks @@ -1525,8 +1545,10 @@ subroutine calculate_sigma_vector_cfg_nst(psi_out, psi_in, n_st, sze, istart, ie ! Add the diagonal contribution + do kk=1,n_st do i = 1,n_CSF - psi_out(i,1) += 1.0d0*diag_energies(i)*psi_in(i,1) + psi_out(i,kk) += 1.0d0*diag_energies(i)*psi_in(i,kk) + enddo enddo diff --git a/src/davidson/diagonalization_hcsf_dressed.irp.f b/src/davidson/diagonalization_hcsf_dressed.irp.f index 77f21f2b..3735a227 100644 --- a/src/davidson/diagonalization_hcsf_dressed.irp.f +++ b/src/davidson/diagonalization_hcsf_dressed.irp.f @@ -88,7 +88,7 @@ subroutine davidson_diag_csf_hjj(dets_in,u_in,H_jj,energies,dim_in,sze,sze_csf,N double precision, intent(out) :: energies(N_st_diag_in) integer :: iter, N_st_diag - integer :: i,j,k,l,m,kk + integer :: i,j,k,l,m,kk,ii logical, intent(inout) :: converged double precision, external :: u_dot_v, u_dot_u @@ -110,6 +110,7 @@ subroutine davidson_diag_csf_hjj(dets_in,u_in,H_jj,energies,dim_in,sze,sze_csf,N integer :: order(N_st_diag_in) double precision :: cmax double precision, allocatable :: U(:,:), U_csf(:,:), overlap(:,:) + double precision, allocatable :: tmpU(:,:), tmpW(:,:) double precision, pointer :: W(:,:), W_csf(:,:) logical :: disk_based double precision :: energy_shift(N_st_diag_in*davidson_sze_max) @@ -313,9 +314,24 @@ subroutine davidson_diag_csf_hjj(dets_in,u_in,H_jj,energies,dim_in,sze,sze_csf,N !call convertWFfromDETtoCSF(N_st_diag,W,W_csf(1,shift+1)) ! call calculate_sigma_vector_cfg_nst(W_csf(1,shift+1),U_csf(1,shift+1),N_st_diag,sze_csf,1,sze_csf,0,1) ! ! TODO : psi_det_size ? for psi_det + allocate(tmpW(sze_csf,N_st_diag)) + allocate(tmpU(sze_csf,N_st_diag)) do kk=1,N_st_diag - call calculate_sigma_vector_cfg_nst_naive_store(W_csf(1,shift+kk),U_csf(1,shift+kk),1,sze_csf,1,sze_csf,0,1) + do ii=1,sze_csf + tmpU(ii,kk) = U_csf(ii,shift+kk) + enddo enddo + call calculate_sigma_vector_cfg_nst(tmpW,tmpU,N_st_diag,sze_csf,1,sze_csf,0,1) + do kk=1,N_st_diag + do ii=1,sze_csf + W_csf(ii,shift+kk)=tmpW(ii,kk) + enddo + enddo + deallocate(tmpW) + deallocate(tmpU) + !do kk=1,N_st_diag + ! call calculate_sigma_vector_cfg_nst_naive_store(W_csf(1,shift+kk),U_csf(1,shift+kk),1,sze_csf,1,sze_csf,0,1) + !enddo else !call convertWFfromCSFtoDET(N_st_diag,U_csf(1,shift+1),U) !call convertWFfromCSFtoDET(N_st_diag,W_csf(1,shift+1),W) @@ -323,9 +339,24 @@ subroutine davidson_diag_csf_hjj(dets_in,u_in,H_jj,energies,dim_in,sze,sze_csf,N !call convertWFfromDETtoCSF(N_st_diag,U,U_csf(1,shift+1)) !call convertWFfromDETtoCSF(N_st_diag,W,W_csf(1,shift+1)) ! call calculate_sigma_vector_cfg_nst(W_csf(1,shift+1),U_csf(1,shift+1),N_st_diag,sze_csf,1,sze_csf,0,1) + allocate(tmpW(sze_csf,N_st_diag)) + allocate(tmpU(sze_csf,N_st_diag)) do kk=1,N_st_diag - call calculate_sigma_vector_cfg_nst_naive_store(W_csf(1,shift+kk),U_csf(1,shift+kk),1,sze_csf,1,sze_csf,0,1) + do ii=1,sze_csf + tmpU(ii,kk) = U_csf(ii,shift+kk) + enddo enddo + call calculate_sigma_vector_cfg_nst(tmpW,tmpU,N_st_diag,sze_csf,1,sze_csf,0,1) + do kk=1,N_st_diag + do ii=1,sze_csf + W_csf(ii,shift+kk)=tmpW(ii,kk) + enddo + enddo + deallocate(tmpW) + deallocate(tmpU) + !do kk=1,N_st_diag + ! call calculate_sigma_vector_cfg_nst_naive_store(W_csf(1,shift+kk),U_csf(1,shift+kk),1,sze_csf,1,sze_csf,0,1) + !enddo endif else ! Already computed in update below