10
1
mirror of https://gitlab.com/scemama/qmcchem.git synced 2024-12-22 20:33:32 +01:00

Optimizations and better time step error

This commit is contained in:
Anthony Scemama 2016-06-24 09:18:50 +02:00
commit 59d0186209
8 changed files with 735 additions and 332 deletions

View File

@ -11,9 +11,6 @@ mo_basis
mo_tot_num integer mo_tot_num integer
mo_coef real (ao_basis_ao_num,mo_basis_mo_tot_num) mo_coef real (ao_basis_ao_num,mo_basis_mo_tot_num)
mo_classif character (mo_basis_mo_tot_num) mo_classif character (mo_basis_mo_tot_num)
mo_closed_num integer =n_count_ch(mo_basis_mo_classif,size(mo_basis_mo_classif),'c')
mo_active_num integer =n_count_ch(mo_basis_mo_classif,size(mo_basis_mo_classif),'a')
mo_virtual_num integer =n_count_ch(mo_basis_mo_classif,size(mo_basis_mo_classif),'v')
mo_energy real (mo_basis_mo_tot_num) mo_energy real (mo_basis_mo_tot_num)
mo_occ real (mo_basis_mo_tot_num) mo_occ real (mo_basis_mo_tot_num)
mo_symmetry character*(8) (mo_basis_mo_tot_num) mo_symmetry character*(8) (mo_basis_mo_tot_num)

View File

@ -48,11 +48,11 @@ subroutine pow_l(r,a,x1,x2,x3)
end function end function
BEGIN_PROVIDER [ real, ao_axis_block, ((-2*simd_sp+1):ao_block_num_8) ] BEGIN_PROVIDER [ real, ao_axis_block, (ao_block_num_8) ]
&BEGIN_PROVIDER [ real, ao_axis_grad_block_x, ((-2*simd_sp+1):ao_block_num_8) ] &BEGIN_PROVIDER [ real, ao_axis_grad_block_x, (ao_block_num_8) ]
&BEGIN_PROVIDER [ real, ao_axis_grad_block_y, ((-2*simd_sp+1):ao_block_num_8) ] &BEGIN_PROVIDER [ real, ao_axis_grad_block_y, (ao_block_num_8) ]
&BEGIN_PROVIDER [ real, ao_axis_grad_block_z, ((-2*simd_sp+1):ao_block_num_8) ] &BEGIN_PROVIDER [ real, ao_axis_grad_block_z, (ao_block_num_8) ]
&BEGIN_PROVIDER [ real, ao_axis_lapl_block, ((-2*simd_sp+1):ao_block_num_8) ] &BEGIN_PROVIDER [ real, ao_axis_lapl_block, (ao_block_num_8) ]
implicit none implicit none
include '../types.F' include '../types.F'
@ -111,13 +111,13 @@ end function
ao_axis_block(idx) = p023 * p10 ao_axis_block(idx) = p023 * p10
p023 = real_of_int(pow1) * p023 p023 = real_of_int(pow1) * p023
ao_axis_grad_block_x(idx) = p023 * p11
ao_axis_grad_block_y(idx) = p013 * p21
ao_axis_grad_block_z(idx) = p012 * p31
ao_axis_lapl_block(idx) = real_of_int(pow1-1) * p023 * p12 & ao_axis_lapl_block(idx) = real_of_int(pow1-1) * p023 * p12 &
+ real_of_int(pow2-1) * p013 * p22 & + real_of_int(pow2-1) * p013 * p22 &
+ real_of_int(pow3-1) * p012 * p32 + real_of_int(pow3-1) * p012 * p32
ao_axis_grad_block_x(idx) = p023 * p11
ao_axis_grad_block_y(idx) = p013 * p21
ao_axis_grad_block_z(idx) = p012 * p31
enddo enddo

View File

@ -44,8 +44,8 @@ END_SHELL
real, allocatable :: elec_coord_tmp(:,:,:) real, allocatable :: elec_coord_tmp(:,:,:)
integer :: mod_align integer :: mod_align
double precision :: E_loc_save(walk_num_dmc_max) double precision :: E_loc_save(4,walk_num_dmc_max)
double precision :: E_loc_save_tmp(walk_num_dmc_max) double precision :: E_loc_save_tmp(4,walk_num_dmc_max)
double precision :: psi_value_save(walk_num) double precision :: psi_value_save(walk_num)
double precision :: psi_value_save_tmp(walk_num) double precision :: psi_value_save_tmp(walk_num)
double precision :: srmc_weight(walk_num) double precision :: srmc_weight(walk_num)
@ -124,7 +124,7 @@ END_SHELL
psi_grad_psi_inv_z(i) = psi_grad_psi_inv_save(i,3,i_walk) psi_grad_psi_inv_z(i) = psi_grad_psi_inv_save(i,3,i_walk)
enddo enddo
psi_value = psi_value_save(i_walk) psi_value = psi_value_save(i_walk)
E_loc = E_loc_save(i_walk) E_loc = E_loc_save(1,i_walk)
enddo enddo
SOFT_TOUCH elec_coord psi_grad_psi_inv_x psi_grad_psi_inv_y psi_grad_psi_inv_z psi_value E_loc SOFT_TOUCH elec_coord psi_grad_psi_inv_x psi_grad_psi_inv_y psi_grad_psi_inv_z psi_value E_loc
else else
@ -135,7 +135,7 @@ END_SHELL
enddo enddo
TOUCH elec_coord TOUCH elec_coord
psi_value_save(i_walk) = psi_value psi_value_save(i_walk) = psi_value
E_loc_save(i_walk) = E_loc E_loc_save(:,i_walk) = E_loc
endif endif
double precision :: p,q double precision :: p,q
@ -144,19 +144,54 @@ END_SHELL
call brownian_step(p,q,accepted,delta_x) call brownian_step(p,q,accepted,delta_x)
if ( psi_value * psi_value_save(i_walk) >= 0.d0 ) then if ( psi_value * psi_value_save(i_walk) >= 0.d0 ) then
delta = ((E_loc+E_loc_save(i_walk))*0.5d0 - E_ref) * p ! delta = (E_loc+E_loc_save(1,i_walk))*0.5d0
if ( delta > thr ) then ! delta = (5.d0 * E_loc + 8.d0 * E_loc_save(1,i_walk) - E_loc_save(2,i_walk))/12.d0
srmc_weight(i_walk) = dexp(-dtime_step*thr)
else if ( delta < -thr ) then delta = (9.d0*E_loc+19.d0*E_loc_save(1,i_walk)- &
srmc_weight(i_walk) = dexp(dtime_step*thr) 5.d0*E_loc_save(2,i_walk)+E_loc_save(3,i_walk))/24.d0
else
! delta = -((-251.d0*E_loc)-646.d0*E_loc_save(1,i_walk)+264.d0*E_loc_save(2,i_walk)-&
! 106.d0*E_loc_save(3,i_walk)+19.d0*E_loc_save(4,i_walk))/720.d0
delta = (delta - E_ref)*p
if (delta >= 0.d0) then
srmc_weight(i_walk) = dexp(-dtime_step*delta) srmc_weight(i_walk) = dexp(-dtime_step*delta)
else
srmc_weight(i_walk) = 2.d0-dexp(dtime_step*delta)
endif endif
! if (accepted) then
! ! Compute correction to past weights
! double precision :: delta_old, delta_new
! delta_old = (9.d0*E_loc_save(1,i_walk)+19.d0*E_loc_save(2,i_walk)-&
! 5.d0*E_loc_save(3,i_walk)+E_loc_save(4,i_walk))/24.d0 - E_ref
!
!
! if (delta_old >= 0.d0) then
! srmc_weight(i_walk) = srmc_weight(i_walk) * dexp(dtime_step*delta_old)
! else
! srmc_weight(i_walk) = srmc_weight(i_walk) * (2.d0-dexp(-dtime_step*delta_old))
! endif
!
! delta_new = (-(E_loc_save_tmp(3,i_walk)-13.d0*E_loc_save_tmp(2,i_walk)&
! -13.d0*E_loc_save_tmp(1,i_walk)+E_loc))/24.d0 - E_ref
!
! if (delta_new >= 0.d0) then
! srmc_weight(i_walk) = srmc_weight(i_walk) * dexp(-dtime_step*delta_new)
! else
! srmc_weight(i_walk) = srmc_weight(i_walk) * (2.d0-dexp(dtime_step*delta_new) )
! endif
!
! endif
elec_coord(elec_num+1,1) += p*time_step elec_coord(elec_num+1,1) += p*time_step
elec_coord(elec_num+1,2) = E_loc elec_coord(elec_num+1,2) = E_loc
elec_coord(elec_num+1,3) = srmc_weight(i_walk) * pop_weight_mult elec_coord(elec_num+1,3) = srmc_weight(i_walk) * pop_weight_mult
do l=1,3 do l=1,3
do i=1,elec_num+1 do i=1,elec_num+1
elec_coord_full(i,l,i_walk) = elec_coord(i,l) elec_coord_full(i,l,i_walk) = elec_coord(i,l)
enddo enddo
enddo enddo
@ -167,25 +202,30 @@ END_SHELL
enddo enddo
psi_value_save(i_walk) = psi_value psi_value_save(i_walk) = psi_value
E_loc_save(i_walk) = E_loc if (accepted) then
E_loc_save(4,i_walk) = E_loc_save(3,i_walk)
E_loc_save(3,i_walk) = E_loc_save(2,i_walk)
E_loc_save(2,i_walk) = E_loc_save(1,i_walk)
E_loc_save(1,i_walk) = E_loc
endif
BEGIN_SHELL [ /usr/bin/python ] BEGIN_SHELL [ /usr/bin/python ]
from properties import * from properties import *
t = """ t = """
if (calc_$X) then if (calc_$X) then
! Kahan's summation algorithm to compute these sums reducing the rounding error: ! Kahan's summation algorithm to compute these sums reducing the rounding error:
! $X_srmc_block_walk += $X * pop_weight_mult * srmc_weight(i_walk) ! $X_srmc_block_walk += $X * srmc_pop_weight_mult * srmc_weight(i_walk)
! $X_2_srmc_block_walk += $X_2 * pop_weight_mult * srmc_weight(i_walk) ! $X_2_srmc_block_walk += $X_2 * srmc_pop_weight_mult * srmc_weight(i_walk)
! see http://en.wikipedia.org/wiki/Kahan_summation_algorithm ! see http://en.wikipedia.org/wiki/Kahan_summation_algorithm
$X_srmc_block_walk_kahan($D2 3) = $X * pop_weight_mult * srmc_weight(i_walk) - $X_srmc_block_walk_kahan($D2 1) $X_srmc_block_walk_kahan($D2 3) = $X * srmc_pop_weight_mult * srmc_weight(i_walk) - $X_srmc_block_walk_kahan($D2 1)
$X_srmc_block_walk_kahan($D2 2) = $X_srmc_block_walk $D1 + $X_srmc_block_walk_kahan($D2 3) $X_srmc_block_walk_kahan($D2 2) = $X_srmc_block_walk $D1 + $X_srmc_block_walk_kahan($D2 3)
$X_srmc_block_walk_kahan($D2 1) = ($X_srmc_block_walk_kahan($D2 2) - $X_srmc_block_walk $D1 ) & $X_srmc_block_walk_kahan($D2 1) = ($X_srmc_block_walk_kahan($D2 2) - $X_srmc_block_walk $D1 ) &
- $X_srmc_block_walk_kahan($D2 3) - $X_srmc_block_walk_kahan($D2 3)
$X_srmc_block_walk $D1 = $X_srmc_block_walk_kahan($D2 2) $X_srmc_block_walk $D1 = $X_srmc_block_walk_kahan($D2 2)
$X_2_srmc_block_walk_kahan($D2 3) = $X_2 * pop_weight_mult * srmc_weight(i_walk) - $X_2_srmc_block_walk_kahan($D2 1) $X_2_srmc_block_walk_kahan($D2 3) = $X_2 * srmc_pop_weight_mult * srmc_weight(i_walk) - $X_2_srmc_block_walk_kahan($D2 1)
$X_2_srmc_block_walk_kahan($D2 2) = $X_2_srmc_block_walk $D1 + $X_2_srmc_block_walk_kahan($D2 3) $X_2_srmc_block_walk_kahan($D2 2) = $X_2_srmc_block_walk $D1 + $X_2_srmc_block_walk_kahan($D2 3)
$X_2_srmc_block_walk_kahan($D2 1) = ($X_2_srmc_block_walk_kahan($D2 2) - $X_2_srmc_block_walk $D1 ) & $X_2_srmc_block_walk_kahan($D2 1) = ($X_2_srmc_block_walk_kahan($D2 2) - $X_2_srmc_block_walk $D1 ) &
- $X_2_srmc_block_walk_kahan($D2 3) - $X_2_srmc_block_walk_kahan($D2 3)
@ -203,7 +243,7 @@ for p in properties:
END_SHELL END_SHELL
block_weight += pop_weight_mult * srmc_weight(i_walk) block_weight += srmc_pop_weight_mult * srmc_weight(i_walk)
else else
srmc_weight(i_walk) = 0.d0 srmc_weight(i_walk) = 0.d0
@ -221,15 +261,15 @@ END_SHELL
! Eventually, recompute the weight of the population ! Eventually, recompute the weight of the population
if (srmc_projection_step == 1) then if (srmc_projection_step == 1) then
pop_weight_mult = 1.d0 srmc_pop_weight_mult = 1.d0
do k=1,srmc_projection do k=1,srmc_projection
pop_weight_mult *= pop_weight(k) srmc_pop_weight_mult *= srmc_pop_weight(k)
enddo enddo
endif endif
! Remove contribution of the old value of the weight at the new ! Remove contribution of the old value of the weight at the new
! projection step ! projection step
pop_weight_mult *= 1.d0/pop_weight(srmc_projection_step) srmc_pop_weight_mult *= 1.d0/srmc_pop_weight(srmc_projection_step)
! Compute the new weight of the population ! Compute the new weight of the population
double precision :: sum_weight double precision :: sum_weight
@ -237,10 +277,10 @@ END_SHELL
do k=1,walk_num do k=1,walk_num
sum_weight += srmc_weight(k) sum_weight += srmc_weight(k)
enddo enddo
pop_weight(srmc_projection_step) = sum_weight/dble(walk_num) srmc_pop_weight(srmc_projection_step) = sum_weight/dble(walk_num)
! Update the running population weight ! Update the running population weight
pop_weight_mult *= pop_weight(srmc_projection_step) srmc_pop_weight_mult *= srmc_pop_weight(srmc_projection_step)
! Reconfiguration ! Reconfiguration
integer :: ipos(walk_num) integer :: ipos(walk_num)
@ -257,7 +297,7 @@ END_SHELL
enddo enddo
enddo enddo
psi_value_save_tmp(k) = psi_value_save(k) psi_value_save_tmp(k) = psi_value_save(k)
E_loc_save_tmp(k) = E_loc_save(k) E_loc_save_tmp(:,k) = E_loc_save(:,k)
enddo enddo
integer :: ipm integer :: ipm
@ -272,7 +312,7 @@ END_SHELL
enddo enddo
enddo enddo
psi_value_save(k) = psi_value_save_tmp(ipm) psi_value_save(k) = psi_value_save_tmp(ipm)
E_loc_save(k) = E_loc_save_tmp(ipm) E_loc_save(:,k) = E_loc_save_tmp(:,ipm)
enddo enddo
call system_clock(cpu1, count_rate, count_max) call system_clock(cpu1, count_rate, count_max)
@ -287,7 +327,7 @@ END_SHELL
cpu2 = cpu1 cpu2 = cpu1
endif endif
SOFT_TOUCH elec_coord_full pop_weight_mult SOFT_TOUCH elec_coord_full srmc_pop_weight_mult
first_loop = .False. first_loop = .False.
@ -314,7 +354,7 @@ END_SHELL
END_PROVIDER END_PROVIDER
BEGIN_PROVIDER [ double precision, pop_weight_mult ] BEGIN_PROVIDER [ double precision, srmc_pop_weight_mult ]
implicit none implicit none
BEGIN_DOC BEGIN_DOC
! Population weight of SRMC ! Population weight of SRMC
@ -335,12 +375,12 @@ END_PROVIDER
srmc_projection_step = 0 srmc_projection_step = 0
END_PROVIDER END_PROVIDER
BEGIN_PROVIDER [ double precision, pop_weight, (0:srmc_projection+1) ] BEGIN_PROVIDER [ double precision, srmc_pop_weight, (0:srmc_projection+1) ]
implicit none implicit none
BEGIN_DOC BEGIN_DOC
! Population weight of SRMC ! Population weight of SRMC
END_DOC END_DOC
pop_weight = 1.d0 srmc_pop_weight = 1.d0
END_PROVIDER END_PROVIDER

File diff suppressed because it is too large Load Diff

View File

@ -148,8 +148,8 @@ END_PROVIDER
BEGIN_PROVIDER [ real, elec_dist, (elec_num_8,elec_num) ] BEGIN_PROVIDER [ real, elec_dist, (elec_num_8,elec_num) ]
&BEGIN_PROVIDER [ real, elec_dist_vec_x, (elec_num_8,elec_num) ] &BEGIN_PROVIDER [ real, elec_dist_vec_x, (elec_num_8,elec_num) ]
&BEGIN_PROVIDER [ real, elec_dist_vec_y, ((-simd_sp+1):elec_num_8,elec_num) ] &BEGIN_PROVIDER [ real, elec_dist_vec_y, (elec_num_8,elec_num) ]
&BEGIN_PROVIDER [ real, elec_dist_vec_z, ((-2*simd_sp+1):elec_num_8,elec_num) ] &BEGIN_PROVIDER [ real, elec_dist_vec_z, (elec_num_8,elec_num) ]
implicit none implicit none
BEGIN_DOC BEGIN_DOC
! Electron-electron distances ! Electron-electron distances
@ -171,19 +171,15 @@ END_PROVIDER
do ie2 = 1,elec_num do ie2 = 1,elec_num
real :: x, y, z real :: x, y, z
real :: x2, y2, z2
x = elec_coord(ie2,1) x = elec_coord(ie2,1)
y = elec_coord(ie2,2) y = elec_coord(ie2,2)
z = elec_coord(ie2,3) z = elec_coord(ie2,3)
!DIR$ VECTOR ALIGNED !DIR$ VECTOR ALIGNED
!DIR$ LOOP COUNT (200)
do ie1 = 1,elec_num do ie1 = 1,elec_num
elec_dist_vec_x(ie1,ie2) = elec_coord(ie1,1) - x elec_dist_vec_x(ie1,ie2) = elec_coord(ie1,1) - x
elec_dist_vec_y(ie1,ie2) = elec_coord(ie1,2) - y elec_dist_vec_y(ie1,ie2) = elec_coord(ie1,2) - y
elec_dist_vec_z(ie1,ie2) = elec_coord(ie1,3) - z elec_dist_vec_z(ie1,ie2) = elec_coord(ie1,3) - z
enddo
!DIR$ VECTOR ALIGNED
!DIR$ LOOP COUNT (200)
do ie1 = 1,elec_num
elec_dist(ie1,ie2) = sqrt( & elec_dist(ie1,ie2) = sqrt( &
elec_dist_vec_x(ie1,ie2)*elec_dist_vec_x(ie1,ie2) + & elec_dist_vec_x(ie1,ie2)*elec_dist_vec_x(ie1,ie2) + &
elec_dist_vec_y(ie1,ie2)*elec_dist_vec_y(ie1,ie2) + & elec_dist_vec_y(ie1,ie2)*elec_dist_vec_y(ie1,ie2) + &

View File

@ -52,8 +52,6 @@ data = [ \
data_no_set = [\ data_no_set = [\
("mo_basis_mo_tot_num" , "integer" , ""), ("mo_basis_mo_tot_num" , "integer" , ""),
("mo_basis_mo_active_num" , "integer" , ""),
("mo_basis_mo_closed_num" , "integer" , ""),
("pseudo_ao_pseudo_grid" , "double precision" , "(ao_num,pseudo_lmax+pseudo_lmax+1,pseudo_lmax-0+1,nucl_num,pseudo_grid_size)"), ("pseudo_ao_pseudo_grid" , "double precision" , "(ao_num,pseudo_lmax+pseudo_lmax+1,pseudo_lmax-0+1,nucl_num,pseudo_grid_size)"),
("pseudo_mo_pseudo_grid" , "double precision" , "(ao_num,pseudo_lmax+pseudo_lmax+1,pseudo_lmax-0+1,nucl_num,pseudo_grid_size)"), ("pseudo_mo_pseudo_grid" , "double precision" , "(ao_num,pseudo_lmax+pseudo_lmax+1,pseudo_lmax-0+1,nucl_num,pseudo_grid_size)"),
("pseudo_pseudo_dz_k" , "double precision" , "(nucl_num,pseudo_klocmax)"), ("pseudo_pseudo_dz_k" , "double precision" , "(nucl_num,pseudo_klocmax)"),

View File

@ -76,7 +76,6 @@ BEGIN_PROVIDER [ real, mo_coef, (ao_num_8,mo_num_8) ]
f = 1./mo_scale f = 1./mo_scale
do j=1,mo_num do j=1,mo_num
!DIR$ VECTOR ALIGNED !DIR$ VECTOR ALIGNED
!DIR$ LOOP COUNT (2000)
do i=1,ao_num_8 do i=1,ao_num_8
mo_coef(i,j) *= f mo_coef(i,j) *= f
enddo enddo
@ -138,11 +137,11 @@ BEGIN_PROVIDER [ real, mo_coef_transp_present, (num_present_mos_8,ao_num_8) ]
END_PROVIDER END_PROVIDER
BEGIN_PROVIDER [ real, mo_value_transp, ((-simd_sp+1):mo_num_8,elec_num) ] BEGIN_PROVIDER [ real, mo_value_transp, (mo_num_8,elec_num) ]
&BEGIN_PROVIDER [ real, mo_grad_transp_x, ((-2*simd_sp+1):mo_num_8,elec_num) ] &BEGIN_PROVIDER [ real, mo_grad_transp_x, (mo_num_8,elec_num) ]
&BEGIN_PROVIDER [ real, mo_grad_transp_y, ((-3*simd_sp+1):mo_num_8,elec_num) ] &BEGIN_PROVIDER [ real, mo_grad_transp_y, (mo_num_8,elec_num) ]
&BEGIN_PROVIDER [ real, mo_grad_transp_z, ((-4*simd_sp+1):mo_num_8,elec_num) ] &BEGIN_PROVIDER [ real, mo_grad_transp_z, (mo_num_8,elec_num) ]
&BEGIN_PROVIDER [ real, mo_lapl_transp, ((-5*simd_sp+1):mo_num_8,elec_num) ] &BEGIN_PROVIDER [ real, mo_lapl_transp, (mo_num_8,elec_num) ]
implicit none implicit none
BEGIN_DOC BEGIN_DOC
@ -218,7 +217,6 @@ END_PROVIDER
cycle cycle
endif endif
r_inv = nucl_elec_dist_inv(k,i) r_inv = nucl_elec_dist_inv(k,i)
!DIR$ LOOP COUNT (500)
do j=1,mo_num do j=1,mo_num
mo_value_transp(j,i) = mo_value_transp(j,i) + nucl_fitcusp_param(1,j,k) +& mo_value_transp(j,i) = mo_value_transp(j,i) + nucl_fitcusp_param(1,j,k) +&
r * (nucl_fitcusp_param(2,j,k) + & r * (nucl_fitcusp_param(2,j,k) + &
@ -250,7 +248,6 @@ END_PROVIDER
! Scale off-diagonal elements ! Scale off-diagonal elements
t = prepare_walkers_t t = prepare_walkers_t
do i=1,mo_num do i=1,mo_num
!DIR$ LOOP COUNT (100)
do j=1,elec_alpha_num do j=1,elec_alpha_num
if (i /= j) then if (i /= j) then
mo_value_transp(i,j) *= t mo_value_transp(i,j) *= t
@ -260,7 +257,6 @@ END_PROVIDER
mo_lapl_transp(i,j) *= t mo_lapl_transp(i,j) *= t
endif endif
enddo enddo
!DIR$ LOOP COUNT (100)
do j=1,elec_beta_num do j=1,elec_beta_num
if (i /= j) then if (i /= j) then
mo_value_transp(i,j+elec_alpha_num) *= t mo_value_transp(i,j+elec_alpha_num) *= t
@ -284,7 +280,6 @@ END_PROVIDER
enddo enddo
END_PROVIDER END_PROVIDER
BEGIN_PROVIDER [ real, mo_value, (elec_num_8,mo_num) ] BEGIN_PROVIDER [ real, mo_value, (elec_num_8,mo_num) ]
implicit none implicit none
BEGIN_DOC BEGIN_DOC
@ -300,7 +295,7 @@ BEGIN_PROVIDER [ real, mo_value, (elec_num_8,mo_num) ]
!DIR$ VECTOR ALIGNED !DIR$ VECTOR ALIGNED
mo_value = 0. mo_value = 0.
endif endif
call transpose(mo_value_transp(1,1),mo_num_8+simd_sp,mo_value,elec_num_8,mo_num,elec_num) call transpose(mo_value_transp(1,1),mo_num_8,mo_value,elec_num_8,mo_num,elec_num)
END_PROVIDER END_PROVIDER
@ -328,26 +323,10 @@ END_PROVIDER
PROVIDE primitives_reduced PROVIDE primitives_reduced
endif endif
! Transpose x last for cache efficiency ! Transpose x last for cache efficiency
call transpose_to_dp(mo_grad_transp_y(1,1),mo_num_8+3*simd_sp,mo_grad_y(1,1),elec_num_8,mo_num,elec_num) call transpose_to_dp(mo_grad_transp_y(1,1),mo_num_8,mo_grad_y(1,1),elec_num_8,mo_num,elec_num)
call transpose_to_dp(mo_grad_transp_z(1,1),mo_num_8+4*simd_sp,mo_grad_z(1,1),elec_num_8,mo_num,elec_num) call transpose_to_dp(mo_grad_transp_z(1,1),mo_num_8,mo_grad_z(1,1),elec_num_8,mo_num,elec_num)
call transpose_to_dp(mo_grad_transp_x(1,1),mo_num_8+2*simd_sp,mo_grad_x(1,1),elec_num_8,mo_num,elec_num) call transpose_to_dp(mo_grad_transp_x(1,1),mo_num_8,mo_grad_x(1,1),elec_num_8,mo_num,elec_num)
END_PROVIDER
BEGIN_PROVIDER [ double precision, mo_grad_lapl, (4,elec_num,mo_num) ]
implicit none
BEGIN_DOC
! Gradients and laplacian
END_DOC
integer :: i,j
do j=1,mo_num
do i=1,elec_num
mo_grad_lapl(1,i,j) = mo_grad_x(i,j)
mo_grad_lapl(2,i,j) = mo_grad_y(i,j)
mo_grad_lapl(3,i,j) = mo_grad_z(i,j)
mo_grad_lapl(4,i,j) = mo_lapl (i,j)
enddo
enddo
END_PROVIDER END_PROVIDER
@ -365,7 +344,51 @@ BEGIN_PROVIDER [ double precision, mo_lapl, (elec_num_8,mo_num) ]
!DIR$ VECTOR ALIGNED !DIR$ VECTOR ALIGNED
mo_lapl = 0.d0 mo_lapl = 0.d0
endif endif
call transpose_to_dp(mo_lapl_transp(1,1),mo_num_8+5*simd_sp,mo_lapl,elec_num_8,mo_num,elec_num) call transpose_to_dp(mo_lapl_transp(1,1),mo_num_8,mo_lapl,elec_num_8,mo_num,elec_num)
END_PROVIDER
BEGIN_PROVIDER [ double precision, mo_grad_lapl_alpha, (4,elec_alpha_num,mo_num) ]
&BEGIN_PROVIDER [ double precision, mo_grad_lapl_beta , (4,elec_alpha_num+1:elec_num,mo_num) ]
implicit none
BEGIN_DOC
! Gradients and laplacian
END_DOC
integer :: i,j
do j=1,mo_num
do i=1,elec_alpha_num
mo_grad_lapl_alpha(1,i,j) = mo_grad_transp_x(j,i)
mo_grad_lapl_alpha(2,i,j) = mo_grad_transp_y(j,i)
mo_grad_lapl_alpha(3,i,j) = mo_grad_transp_z(j,i)
mo_grad_lapl_alpha(4,i,j) = mo_lapl_transp (j,i)
enddo
enddo
do j=1,mo_num
do i=elec_alpha_num+1,elec_num
mo_grad_lapl_beta(1,i,j) = mo_grad_transp_x(j,i)
mo_grad_lapl_beta(2,i,j) = mo_grad_transp_y(j,i)
mo_grad_lapl_beta(3,i,j) = mo_grad_transp_z(j,i)
mo_grad_lapl_beta(4,i,j) = mo_lapl_transp (j,i)
enddo
enddo
END_PROVIDER
BEGIN_PROVIDER [ double precision, mo_grad_lapl_transp, (4,mo_num,elec_num) ]
implicit none
BEGIN_DOC
! Gradients and laplacian
END_DOC
integer :: i,j
do i=1,elec_num
do j=1,mo_num
mo_grad_lapl_transp(1,j,i) = mo_grad_transp_x(j,i)
mo_grad_lapl_transp(2,j,i) = mo_grad_transp_y(j,i)
mo_grad_lapl_transp(3,j,i) = mo_grad_transp_z(j,i)
mo_grad_lapl_transp(4,j,i) = mo_lapl_transp (j,i)
enddo
enddo
END_PROVIDER END_PROVIDER
@ -419,7 +442,6 @@ BEGIN_PROVIDER [ double precision , mo_value_at_nucl, (mo_num_8,nucl_num) ]
PROVIDE ao_value_p PROVIDE ao_value_p
!DIR$ LOOP COUNT (2000)
do i=1,ao_num do i=1,ao_num
if (ao_nucl(i) /= k) then if (ao_nucl(i) /= k) then
ao_value_at_nucl_no_S(i) = ao_value_p(i) ao_value_at_nucl_no_S(i) = ao_value_p(i)
@ -461,7 +483,6 @@ END_PROVIDER
point(3) = nucl_coord(k,3)+ nucl_fitcusp_radius(k) point(3) = nucl_coord(k,3)+ nucl_fitcusp_radius(k)
TOUCH point TOUCH point
!DIR$ LOOP COUNT (2000)
do j=1,ao_num do j=1,ao_num
ao_value_at_fitcusp_radius(j,k) = ao_value_p(j) ao_value_at_fitcusp_radius(j,k) = ao_value_p(j)
ao_grad_at_fitcusp_radius(j,k) = ao_grad_p(j,3) ao_grad_at_fitcusp_radius(j,k) = ao_grad_p(j,3)
@ -615,7 +636,6 @@ BEGIN_PROVIDER [ real, nucl_fitcusp_param, (4,mo_num,nucl_num) ]
cycle cycle
endif endif
R = nucl_fitcusp_radius(k) R = nucl_fitcusp_radius(k)
!DIR$ LOOP COUNT (500)
do i=1,mo_num do i=1,mo_num
double precision :: lap_phi, grad_phi, phi, eta double precision :: lap_phi, grad_phi, phi, eta
lap_phi = mo_lapl_at_fitcusp_radius(i,k) lap_phi = mo_lapl_at_fitcusp_radius(i,k)
@ -681,9 +701,15 @@ subroutine sparse_full_mv(A,LDA, &
! LDC and LDA have to be factors of simd_sp ! LDC and LDA have to be factors of simd_sp
!DIR$ VECTOR ALIGNED IRP_IF NO_PREFETCH
!DIR$ LOOP COUNT (256) IRP_ELSE
!$OMP SIMD call MM_PREFETCH (A(j,indices(1)),3)
call MM_PREFETCH (A(j,indices(2)),3)
call MM_PREFETCH (A(j,indices(3)),3)
call MM_PREFETCH (A(j,indices(4)),3)
IRP_ENDIF
!DIR$ SIMD
do j=1,LDC do j=1,LDC
C1(j) = 0. C1(j) = 0.
C2(j) = 0. C2(j) = 0.
@ -691,11 +717,11 @@ subroutine sparse_full_mv(A,LDA, &
C4(j) = 0. C4(j) = 0.
C5(j) = 0. C5(j) = 0.
enddo enddo
!$OMP END SIMD
kmax2 = ishft(indices(0),-2) kmax2 = ishft(indices(0),-2)
kmax2 = ishft(kmax2,2) kmax2 = ishft(kmax2,2)
kmax3 = indices(0) kmax3 = indices(0)
!DIR$ LOOP COUNT (200)
do kao=1,kmax2,4 do kao=1,kmax2,4
k_vec(1) = indices(kao ) k_vec(1) = indices(kao )
k_vec(2) = indices(kao+1) k_vec(2) = indices(kao+1)
@ -712,19 +738,6 @@ subroutine sparse_full_mv(A,LDA, &
d32 = B2(kao+2) d32 = B2(kao+2)
d42 = B2(kao+3) d42 = B2(kao+3)
!DIR$ LOOP COUNT (256)
do k=0,LDA-1,$IRP_ALIGN/4
!DIR$ VECTOR ALIGNED
!$OMP SIMD
do j=1,$IRP_ALIGN/4
C1(j+k) = C1(j+k) + A(j+k,k_vec(1))*d11 + A(j+k,k_vec(2))*d21&
+ A(j+k,k_vec(3))*d31 + A(j+k,k_vec(4))*d41
C2(j+k) = C2(j+k) + A(j+k,k_vec(1))*d12 + A(j+k,k_vec(2))*d22&
+ A(j+k,k_vec(3))*d32 + A(j+k,k_vec(4))*d42
enddo
!$OMP END SIMD
enddo
d13 = B3(kao ) d13 = B3(kao )
d23 = B3(kao+1) d23 = B3(kao+1)
d33 = B3(kao+2) d33 = B3(kao+2)
@ -735,38 +748,47 @@ subroutine sparse_full_mv(A,LDA, &
d34 = B4(kao+2) d34 = B4(kao+2)
d44 = B4(kao+3) d44 = B4(kao+3)
!DIR$ LOOP COUNT (256)
do k=0,LDA-1,$IRP_ALIGN/4
!DIR$ VECTOR ALIGNED
!$OMP SIMD
do j=1,$IRP_ALIGN/4
C3(j+k) = C3(j+k) + A(j+k,k_vec(1))*d13 + A(j+k,k_vec(2))*d23&
+ A(j+k,k_vec(3))*d33 + A(j+k,k_vec(4))*d43
C4(j+k) = C4(j+k) + A(j+k,k_vec(1))*d14 + A(j+k,k_vec(2))*d24&
+ A(j+k,k_vec(3))*d34 + A(j+k,k_vec(4))*d44
enddo
!$OMP END SIMD
enddo
d15 = B5(kao ) d15 = B5(kao )
d25 = B5(kao+1) d25 = B5(kao+1)
d35 = B5(kao+2) d35 = B5(kao+2)
d45 = B5(kao+3) d45 = B5(kao+3)
!DIR$ LOOP COUNT (256)
do k=0,LDA-1,$IRP_ALIGN/4 do k=0,LDA-1,$IRP_ALIGN/4
!DIR$ VECTOR ALIGNED !DIR$ VECTOR ALIGNED
!$OMP SIMD !DIR$ SIMD FIRSTPRIVATE(d11,d21,d31,d41)
do j=1,$IRP_ALIGN/4 do j=1,$IRP_ALIGN/4
IRP_IF NO_PREFETCH
IRP_ELSE
call MM_PREFETCH (A(j+k,indices(kao+4)),3)
call MM_PREFETCH (A(j+k,indices(kao+5)),3)
call MM_PREFETCH (A(j+k,indices(kao+6)),3)
call MM_PREFETCH (A(j+k,indices(kao+7)),3)
IRP_ENDIF
C1(j+k) = C1(j+k) + A(j+k,k_vec(1))*d11 + A(j+k,k_vec(2))*d21&
+ A(j+k,k_vec(3))*d31 + A(j+k,k_vec(4))*d41
enddo
!DIR$ VECTOR ALIGNED
!DIR$ SIMD FIRSTPRIVATE(d12,d22,d32,d42,d13,d23,d33,d43)
do j=1,$IRP_ALIGN/4
C2(j+k) = C2(j+k) + A(j+k,k_vec(1))*d12 + A(j+k,k_vec(2))*d22&
+ A(j+k,k_vec(3))*d32 + A(j+k,k_vec(4))*d42
C3(j+k) = C3(j+k) + A(j+k,k_vec(1))*d13 + A(j+k,k_vec(2))*d23&
+ A(j+k,k_vec(3))*d33 + A(j+k,k_vec(4))*d43
enddo
!DIR$ VECTOR ALIGNED
!DIR$ SIMD FIRSTPRIVATE(d14,d24,d34,d44,d15,d25,d35,d45)
do j=1,$IRP_ALIGN/4
C4(j+k) = C4(j+k) + A(j+k,k_vec(1))*d14 + A(j+k,k_vec(2))*d24&
+ A(j+k,k_vec(3))*d34 + A(j+k,k_vec(4))*d44
C5(j+k) = C5(j+k) + A(j+k,k_vec(1))*d15 + A(j+k,k_vec(2))*d25& C5(j+k) = C5(j+k) + A(j+k,k_vec(1))*d15 + A(j+k,k_vec(2))*d25&
+ A(j+k,k_vec(3))*d35 + A(j+k,k_vec(4))*d45 + A(j+k,k_vec(3))*d35 + A(j+k,k_vec(4))*d45
enddo enddo
!$OMP END SIMD
enddo enddo
enddo enddo
!DIR$ LOOP COUNT (200)
do kao = kmax2+1, kmax3 do kao = kmax2+1, kmax3
k_vec(1) = indices(kao) k_vec(1) = indices(kao)
d11 = B1(kao) d11 = B1(kao)
@ -775,10 +797,9 @@ subroutine sparse_full_mv(A,LDA, &
d14 = B4(kao) d14 = B4(kao)
d15 = B5(kao) d15 = B5(kao)
!DIR$ VECTOR ALIGNED !DIR$ VECTOR ALIGNED
!DIR$ LOOP COUNT (256) do k=0,LDA-1,$IRP_ALIGN/4
do k=0,LDA-1,simd_sp
!DIR$ VECTOR ALIGNED !DIR$ VECTOR ALIGNED
!$OMP SIMD !DIR$ SIMD FIRSTPRIVATE(d11,d12,d13,d14,d15)
do j=1,$IRP_ALIGN/4 do j=1,$IRP_ALIGN/4
C1(j+k) = C1(j+k) + A(j+k,k_vec(1))*d11 C1(j+k) = C1(j+k) + A(j+k,k_vec(1))*d11
C2(j+k) = C2(j+k) + A(j+k,k_vec(1))*d12 C2(j+k) = C2(j+k) + A(j+k,k_vec(1))*d12
@ -786,7 +807,6 @@ subroutine sparse_full_mv(A,LDA, &
C4(j+k) = C4(j+k) + A(j+k,k_vec(1))*d14 C4(j+k) = C4(j+k) + A(j+k,k_vec(1))*d14
C5(j+k) = C5(j+k) + A(j+k,k_vec(1))*d15 C5(j+k) = C5(j+k) + A(j+k,k_vec(1))*d15
enddo enddo
!$OMP END SIMD
enddo enddo
enddo enddo