9
1
mirror of https://github.com/QuantumPackage/qp2.git synced 2024-10-16 04:31:32 +02:00

fixed conf

This commit is contained in:
Abdallah Ammar 2023-05-06 20:42:39 +02:00
commit cde3ea6fc1
54 changed files with 1017 additions and 620 deletions

View File

@ -6,7 +6,7 @@
# --align=32 : Align all provided arrays on a 32-byte boundary
#
[COMMON]
FC : ifort -fpic
FC : ifort -fpic -diag-disable 5462
LAPACK_LIB : -mkl=parallel
IRPF90 : irpf90
IRPF90_FLAGS : --ninja --align=64 -DINTEL

2
configure vendored
View File

@ -232,7 +232,7 @@ EOF
execute << EOF
cd "\${QP_ROOT}"/external
tar --gunzip --extract --file qp2-dependencies/f77-zmq-4.3.2.tar.gz
tar --gunzip --extract --file qp2-dependencies/f77-zmq-4.3.?.tar.gz
cd f77-zmq-*
./configure --prefix=\$QP_ROOT
export ZMQ_H="\$QP_ROOT"/include/zmq.h

View File

@ -25,7 +25,7 @@ except ImportError:
"quantum_package.rc"))
print("\n".join(["", "Error:", "source %s" % f, ""]))
sys.exit(1)
raise
# Compress path
def comp_path(path):

186
scripts/qp_exc_energy.py Executable file
View File

@ -0,0 +1,186 @@
#!/usr/bin/env python
# Computes the error on the excitation energy of a CIPSI run.
def student(p,df):
import scipy
from scipy.stats import t
return t.ppf(p, df)
def chi2cdf(x,k):
import scipy
import scipy.stats
return scipy.stats.chi2.cdf(x,k)
def jarque_bera(data):
n = max(len(data), 2)
norm = 1./ sum( [ w for (_,w) in data ] )
mu = sum( [ w* x for (x,w) in data ] ) * norm
sigma2 = sum( [ w*(x-mu)**2 for (x,w) in data ] ) * norm
if sigma2 > 0.:
S = ( sum( [ w*(x-mu)**3 for (x,w) in data ] ) * norm ) / sigma2**(3./2.)
K = ( sum( [ w*(x-mu)**4 for (x,w) in data ] ) * norm ) / sigma2**2
else:
S = 0.
K = 0.
# Value of the Jarque-Bera test
JB = n/6. * (S**2 + 1./4. * (K-3.)**2)
# Probability that the data comes from a Gaussian distribution
p = 1. - chi2cdf(JB,2)
return JB, mu, sqrt(sigma2/(n-1)), p
to_eV = 27.2107362681
import sys, os
import scipy
import scipy.stats
from math import sqrt, gamma, exp
import json
def read_data(filename,state):
""" Read energies and PT2 from input file """
with open(filename,'r') as f:
lines = json.load(f)['fci']
print(f"State: {state}")
gs = []
es = []
for l in lines:
try:
pt2_0 = l['states'][0]['pt2']
e_0 = l['states'][0]['energy']
pt2_1 = l['states'][state]['pt2']
e_1 = l['states'][state]['energy']
gs.append( (e_0, pt2_0) )
es.append( (e_1, pt2_1) )
except: pass
def f(p_1, p0, p1):
e, pt2 = p0
y0, x0 = p_1
y1, x1 = p1
try:
alpha = (y1-y0)/(x0-x1)
except ZeroDivisionError:
alpha = 1.
return [e, pt2, alpha]
for l in (gs, es):
p_1, p0, p1 = l[0], l[0], l[1]
l[0] = f(p_1, p0, p1)
for i in range(1,len(l)-1):
p_1 = (l[i-1][0], l[i-1][1])
p0 = l[i]
p1 = l[i+1]
l[i] = f(p_1, p0, p1)
i = len(l)-1
p_1 = (l[i-1][0], l[i-1][1])
p0 = l[i]
p1 = l[-1]
l[i] = f(p_1, p0, p1)
return [ x+y for x,y in zip(gs,es) ]
def compute(data):
d = []
for e0, p0, a0, e1, p1, a1 in data:
x = (e1+p1)-(e0+p0)
w = 1./sqrt(p0**2 + p1**2)
bias = (a1-1.)*p1 - (a0-1.)*p0
d.append( (x,w,bias) )
x = []
target = (scipy.stats.norm.cdf(1.)-0.5)*2 # = 0.6827
print("| %2s | %8s | %8s | %8s | %8s | %8s |"%( "N", "DE", "+/-", "bias", "P(G)", "J"))
print("|----+----------+----------+----------+----------+----------|")
xmax = (0.,0.,0.,0.,0.,0,0.)
for i in range(len(data)-1):
jb, mu, sigma, p = jarque_bera( [ (x,w) for (x,w,bias) in d[i:] ] )
bias = sum ( [ w * e for (_,w,e) in d[i:] ] ) / sum ( [ w for (_,w,_) in d[i:] ] )
mu = (mu+0.5*bias) * to_eV
sigma = sigma * to_eV
bias = bias * to_eV
n = len(data[i:])
beta = student(0.5*(1.+target/p) ,n)
err = sigma * beta + 0.5*abs(bias)
print("| %2d | %8.3f | %8.3f | %8.3f | %8.3f | %8.3f |"%( n, mu, err, bias, p, jb))
if n < 3 :
continue
y = (err, p, mu, err, jb,n,bias)
if p > xmax[1]: xmax = y
if p < 0.8:
continue
x.append(y)
x = sorted(x)
print("|----+----------+----------+----------+----------+----------|")
if x != []:
xmax = x[0]
_, p, mu, err, jb, n, bias = xmax
beta = student(0.5*(1.+target/p),n)
print("| %2d | %8.3f | %8.3f | %8.3f | %8.3f | %8.3f |\n"%(n, mu, err, bias, p, jb))
return mu, err, bias, p
filename = sys.argv[1]
print(filename)
if len(sys.argv) > 2:
state = int(sys.argv[2])
else:
state = 1
data = read_data(filename,state)
mu, err, bias, _ = compute(data)
print(" %s: %8.3f +/- %5.3f eV\n"%(filename, mu, err))
import numpy as np
A = np.array( [ [ data[-1][1], 1. ],
[ data[-2][1], 1. ] ] )
B = np.array( [ [ data[-1][0] ],
[ data[-2][0] ] ] )
E0 = np.linalg.solve(A,B)[1]
A = np.array( [ [ data[-1][4], 1. ],
[ data[-2][4], 1. ] ] )
B = np.array( [ [ data[-1][3] ],
[ data[-2][3] ] ] )
E1 = np.linalg.solve(A,B)[1]
average_2 = (E1-E0)*to_eV
A = np.array( [ [ data[-1][1], 1. ],
[ data[-2][1], 1. ],
[ data[-3][1], 1. ] ] )
B = np.array( [ [ data[-1][0] ],
[ data[-2][0] ],
[ data[-3][0] ] ] )
E0 = np.linalg.lstsq(A,B,rcond=None)[0][1]
A = np.array( [ [ data[-1][4], 1. ],
[ data[-2][4], 1. ],
[ data[-3][4], 1. ] ] )
B = np.array( [ [ data[-1][3] ],
[ data[-2][3] ],
[ data[-3][3] ] ] )
E1 = np.linalg.lstsq(A,B,rcond=None)[0][1]
average_3 = (E1-E0)*to_eV
exc = ((data[-1][3] + data[-1][4]) - (data[-1][0] + data[-1][1])) * to_eV
error_2 = abs(average_2 - average_3)
error_3 = abs(average_3 - exc)
print(" 2-3 points: %.3f +/- %.3f "% (average_3, error_2))
print(" largest wf: %.3f +/- %.3f "%(average_3, error_3))

View File

@ -18,3 +18,8 @@ interface: ezfio,provider,ocaml
default: False
ezfio_name: direct
[do_ao_cholesky]
type: logical
doc: Perform Cholesky decomposition of AO integrals
interface: ezfio,provider,ocaml
default: True

View File

@ -0,0 +1,100 @@
BEGIN_PROVIDER [ integer, cholesky_ao_num_guess ]
implicit none
BEGIN_DOC
! Number of Cholesky vectors in AO basis
END_DOC
integer :: i,j,k,l
double precision :: xnorm0, x, integral
double precision, external :: ao_two_e_integral
cholesky_ao_num_guess = 0
xnorm0 = 0.d0
x = 0.d0
do j=1,ao_num
do i=1,ao_num
integral = ao_two_e_integral(i,i,j,j)
if (integral > ao_integrals_threshold) then
cholesky_ao_num_guess += 1
else
x += integral
endif
enddo
enddo
print *, 'Cholesky decomposition of AO integrals'
print *, '--------------------------------------'
print *, ''
print *, 'Estimated Error: ', x
print *, 'Guess size: ', cholesky_ao_num_guess, '(', 100.d0*dble(cholesky_ao_num_guess)/dble(ao_num*ao_num), ' %)'
END_PROVIDER
BEGIN_PROVIDER [ integer, cholesky_ao_num ]
&BEGIN_PROVIDER [ double precision, cholesky_ao, (ao_num, ao_num, cholesky_ao_num_guess) ]
use mmap_module
implicit none
BEGIN_DOC
! Cholesky vectors in AO basis: (ik|a):
! <ij|kl> = (ik|jl) = sum_a (ik|a).(a|jl)
END_DOC
type(c_ptr) :: ptr
integer :: fd, i,j,k,l, rank
double precision, pointer :: ao_integrals(:,:,:,:)
double precision, external :: ao_two_e_integral
! Store AO integrals in a memory mapped file
call mmap(trim(ezfio_work_dir)//'ao_integrals', &
(/ int(ao_num,8), int(ao_num,8), int(ao_num,8), int(ao_num,8) /), &
8, fd, .False., ptr)
call c_f_pointer(ptr, ao_integrals, (/ao_num, ao_num, ao_num, ao_num/))
double precision :: integral
logical, external :: ao_two_e_integral_zero
!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i,j,k,l, integral) SCHEDULE(dynamic)
do l=1,ao_num
do j=1,l
do k=1,ao_num
do i=1,k
if (ao_two_e_integral_zero(i,j,k,l)) cycle
integral = ao_two_e_integral(i,k,j,l)
ao_integrals(i,k,j,l) = integral
ao_integrals(k,i,j,l) = integral
ao_integrals(i,k,l,j) = integral
ao_integrals(k,i,l,j) = integral
enddo
enddo
enddo
enddo
!$OMP END PARALLEL DO
! Call Lapack
cholesky_ao_num = cholesky_ao_num_guess
call pivoted_cholesky(ao_integrals, cholesky_ao_num, ao_integrals_threshold, ao_num*ao_num, cholesky_ao)
print *, 'Rank: ', cholesky_ao_num, '(', 100.d0*dble(cholesky_ao_num)/dble(ao_num*ao_num), ' %)'
! Remove mmap
double precision, external :: getUnitAndOpen
call munmap( &
(/ int(ao_num,8), int(ao_num,8), int(ao_num,8), int(ao_num,8) /), &
8, fd, ptr)
open(unit=99,file=trim(ezfio_work_dir)//'ao_integrals')
close(99, status='delete')
END_PROVIDER
BEGIN_PROVIDER [ double precision, cholesky_ao_transp, (cholesky_ao_num, ao_num, ao_num) ]
implicit none
BEGIN_DOC
! Transposed of the Cholesky vectors in AO basis set
END_DOC
integer :: i,j,k
do j=1,ao_num
do i=1,ao_num
do k=1,ao_num
cholesky_ao_transp(k,i,j) = cholesky_ao(i,j,k)
enddo
enddo
enddo
END_PROVIDER

View File

@ -486,7 +486,7 @@ subroutine get_ao_two_e_integrals(j,k,l,sze,out_val)
PROVIDE ao_two_e_integrals_in_map ao_integrals_map
if (ao_one_e_integral_zero(j,l)) then
out_val = 0.d0
out_val(1:sze) = 0.d0
return
endif

View File

@ -1,3 +1,4 @@
json
perturbation
zmq
mpi

View File

@ -16,7 +16,6 @@ subroutine run_cipsi
double precision, external :: memory_of_double
PROVIDE H_apply_buffer_allocated
N_iter = 1
threshold_generators = 1.d0
SOFT_TOUCH threshold_generators
@ -76,7 +75,6 @@ subroutine run_cipsi
)
write(*,'(A)') '--------------------------------------------------------------------------------'
to_select = int(sqrt(dble(N_states))*dble(N_det)*selection_factor)
to_select = max(N_states_diag, to_select)
if (do_pt2) then
@ -106,10 +104,10 @@ subroutine run_cipsi
call save_energy(psi_energy_with_nucl_rep, pt2_data % pt2)
call save_iterations(psi_energy_with_nucl_rep(1:N_states),pt2_data % rpt2,N_det)
call increment_n_iter(psi_energy_with_nucl_rep, pt2_data)
call print_extrapolated_energy()
call print_mol_properties()
N_iter += 1
call write_cipsi_json(pt2_data,pt2_data_err)
if (qp_stop()) exit
@ -130,13 +128,13 @@ subroutine run_cipsi
if (qp_stop()) exit
enddo
if (.not.qp_stop()) then
if (N_det < N_det_max) then
call diagonalize_CI
call save_wavefunction
call save_energy(psi_energy_with_nucl_rep, zeros)
endif
! If stopped because N_det > N_det_max, do an extra iteration to compute the PT2
if ((.not.qp_stop()).and. &
(N_det > N_det_max) .and. &
(maxval(abs(pt2_data % pt2(1:N_states))) > pt2_max) .and. &
(maxval(abs(pt2_data % variance(1:N_states))) > variance_max) .and.&
(correlation_energy_ratio <= correlation_energy_ratio_max) &
) then
if (do_pt2) then
call pt2_dealloc(pt2_data)
call pt2_dealloc(pt2_data_err)
@ -155,11 +153,13 @@ subroutine run_cipsi
call save_energy(psi_energy_with_nucl_rep, pt2_data % pt2)
call print_summary(psi_energy_with_nucl_rep(1:N_states), &
pt2_data, pt2_data_err, N_det,N_configuration,N_states,psi_s2)
call save_iterations(psi_energy_with_nucl_rep(1:N_states),pt2_data % rpt2,N_det)
call increment_n_iter(psi_energy_with_nucl_rep, pt2_data)
call print_extrapolated_energy()
call print_mol_properties()
call write_cipsi_json(pt2_data,pt2_data_err)
endif
call pt2_dealloc(pt2_data)
call pt2_dealloc(pt2_data_err)
end

View File

@ -7,7 +7,9 @@ BEGIN_PROVIDER [ integer, nthreads_pt2 ]
character*(32) :: env
call getenv('QP_NTHREADS_PT2',env)
if (trim(env) /= '') then
call lock_io()
read(env,*) nthreads_pt2
call unlock_io()
call write_int(6,nthreads_pt2,'Target number of threads for PT2')
endif
END_PROVIDER

View File

@ -15,7 +15,6 @@ subroutine run_stochastic_cipsi
double precision, external :: memory_of_double
PROVIDE H_apply_buffer_allocated distributed_davidson mo_two_e_integrals_in_map
N_iter = 1
threshold_generators = 1.d0
SOFT_TOUCH threshold_generators
@ -96,10 +95,10 @@ subroutine run_stochastic_cipsi
call save_energy(psi_energy_with_nucl_rep, pt2_data % pt2)
call save_iterations(psi_energy_with_nucl_rep(1:N_states),pt2_data % rpt2,N_det)
call increment_n_iter(psi_energy_with_nucl_rep, pt2_data)
call print_extrapolated_energy()
call print_mol_properties()
N_iter += 1
call write_cipsi_json(pt2_data,pt2_data_err)
if (qp_stop()) exit
@ -119,13 +118,13 @@ subroutine run_stochastic_cipsi
if (qp_stop()) exit
enddo
if (.not.qp_stop()) then
if (N_det < N_det_max) then
call diagonalize_CI
call save_wavefunction
call save_energy(psi_energy_with_nucl_rep, zeros)
endif
! If stopped because N_det > N_det_max, do an extra iteration to compute the PT2
if ((.not.qp_stop()).and. &
(N_det > N_det_max) .and. &
(maxval(abs(pt2_data % pt2(1:N_states))) > pt2_max) .and. &
(maxval(abs(pt2_data % variance(1:N_states))) > variance_max) .and.&
(correlation_energy_ratio <= correlation_energy_ratio_max) &
) then
call pt2_dealloc(pt2_data)
call pt2_dealloc(pt2_data_err)
call pt2_alloc(pt2_data, N_states)
@ -135,9 +134,10 @@ subroutine run_stochastic_cipsi
call save_energy(psi_energy_with_nucl_rep, pt2_data % pt2)
call print_summary(psi_energy_with_nucl_rep, &
pt2_data , pt2_data_err, N_det, N_configuration, N_states, psi_s2)
call save_iterations(psi_energy_with_nucl_rep(1:N_states),pt2_data % rpt2,N_det)
call increment_n_iter(psi_energy_with_nucl_rep, pt2_data)
call print_extrapolated_energy()
call print_mol_properties()
call write_cipsi_json(pt2_data,pt2_data_err)
endif
call pt2_dealloc(pt2_data)
call pt2_dealloc(pt2_data_err)

View File

@ -0,0 +1,53 @@
subroutine write_cipsi_json(pt2_data, pt2_data_err)
use selection_types
implicit none
BEGIN_DOC
! Writes JSON data for CIPSI runs
END_DOC
type(pt2_type), intent(in) :: pt2_data, pt2_data_err
integer :: i,j,k
call lock_io
character*(64), allocatable :: fmtk(:)
integer :: N_states_p, N_iter_p
N_states_p = min(N_states,N_det)
N_iter_p = min(N_iter,8)
allocate(fmtk(0:N_iter_p))
fmtk(:) = '('' '',E22.15,'','')'
fmtk(N_iter_p) = '('' '',E22.15)'
write(json_unit, json_dict_uopen_fmt)
write(json_unit, json_int_fmt) 'n_det', N_det
if (s2_eig) then
write(json_unit, json_int_fmt) 'n_cfg', N_configuration
if (only_expected_s2) then
write(json_unit, json_int_fmt) 'n_csf', N_csf
endif
endif
write(json_unit, json_array_open_fmt) 'states'
do k=1,N_states_p
write(json_unit, json_dict_uopen_fmt)
write(json_unit, json_real_fmt) 'energy', psi_energy_with_nucl_rep(k)
write(json_unit, json_real_fmt) 's2', psi_s2(k)
write(json_unit, json_real_fmt) 'pt2', pt2_data % pt2(k)
write(json_unit, json_real_fmt) 'pt2_err', pt2_data_err % pt2(k)
write(json_unit, json_real_fmt) 'rpt2', pt2_data % rpt2(k)
write(json_unit, json_real_fmt) 'rpt2_err', pt2_data_err % rpt2(k)
write(json_unit, json_real_fmt) 'variance', pt2_data % variance(k)
write(json_unit, json_real_fmt) 'variance_err', pt2_data_err % variance(k)
write(json_unit, json_array_open_fmt) 'ex_energy'
do i=2,N_iter_p
write(json_unit, fmtk(i)) extrapolated_energy(i,k)
enddo
write(json_unit, json_array_close_fmtx)
if (k < N_states_p) then
write(json_unit, json_dict_close_fmt)
else
write(json_unit, json_dict_close_fmtx)
endif
enddo
write(json_unit, json_array_close_fmtx)
write(json_unit, json_dict_close_fmt)
deallocate(fmtk)
call unlock_io
end

View File

@ -1,6 +1,7 @@
json
mpi
perturbation
zmq
iterations_tc
iterations
csf
tc_bi_ortho

View File

@ -94,7 +94,15 @@ subroutine run_stochastic_cipsi
call ZMQ_pt2(E_denom, pt2_data, pt2_data_err, relative_error,to_select) ! Stochastic PT2 and selection
! stop
N_iter += 1
call print_summary(psi_energy_with_nucl_rep, &
pt2_data, pt2_data_err, N_det,N_configuration,N_states,psi_s2)
call save_energy(psi_energy_with_nucl_rep, pt2_data % pt2)
call increment_n_iter(psi_energy_with_nucl_rep, pt2_data)
call print_extrapolated_energy()
! call print_mol_properties()
call write_cipsi_json(pt2_data,pt2_data_err)
if (qp_stop()) exit

View File

@ -0,0 +1,53 @@
subroutine write_cipsi_json(pt2_data, pt2_data_err)
use selection_types
implicit none
BEGIN_DOC
! Writes JSON data for CIPSI runs
END_DOC
type(pt2_type), intent(in) :: pt2_data, pt2_data_err
integer :: i,j,k
call lock_io
character*(64), allocatable :: fmtk(:)
integer :: N_states_p, N_iter_p
N_states_p = min(N_states,N_det)
N_iter_p = min(N_iter,8)
allocate(fmtk(0:N_iter_p))
fmtk(:) = '('' '',E22.15,'','')'
fmtk(N_iter_p) = '('' '',E22.15)'
write(json_unit, json_dict_uopen_fmt)
write(json_unit, json_int_fmt) 'n_det', N_det
if (s2_eig) then
write(json_unit, json_int_fmt) 'n_cfg', N_configuration
if (only_expected_s2) then
write(json_unit, json_int_fmt) 'n_csf', N_csf
endif
endif
write(json_unit, json_array_open_fmt) 'states'
do k=1,N_states_p
write(json_unit, json_dict_uopen_fmt)
write(json_unit, json_real_fmt) 'energy', psi_energy_with_nucl_rep(k)
write(json_unit, json_real_fmt) 's2', psi_s2(k)
write(json_unit, json_real_fmt) 'pt2', pt2_data % pt2(k)
write(json_unit, json_real_fmt) 'pt2_err', pt2_data_err % pt2(k)
write(json_unit, json_real_fmt) 'rpt2', pt2_data % rpt2(k)
write(json_unit, json_real_fmt) 'rpt2_err', pt2_data_err % rpt2(k)
write(json_unit, json_real_fmt) 'variance', pt2_data % variance(k)
write(json_unit, json_real_fmt) 'variance_err', pt2_data_err % variance(k)
write(json_unit, json_array_open_fmt) 'ex_energy'
do i=2,N_iter_p
write(json_unit, fmtk(i)) extrapolated_energy(i,k)
enddo
write(json_unit, json_array_close_fmtx)
if (k < N_states_p) then
write(json_unit, json_dict_close_fmt)
else
write(json_unit, json_dict_close_fmtx)
endif
enddo
write(json_unit, json_array_close_fmtx)
write(json_unit, json_dict_close_fmt)
deallocate(fmtk)
call unlock_io
end

View File

@ -150,7 +150,9 @@ subroutine davidson_slave_work(zmq_to_qp_run_socket, zmq_socket_push, N_st, sze,
exit
endif
if(task_id == 0) exit
call lock_io()
read (msg,*) imin, imax, ishift, istep
call unlock_io()
integer :: k
do k=imin,imax
v_t(:,k) = 0.d0
@ -546,6 +548,8 @@ end
integer function zmq_put_N_states_diag(zmq_to_qp_run_socket,worker_id)
use f77_zmq
implicit none

View File

@ -461,9 +461,8 @@ subroutine davidson_diag_hjj_sjj(dets_in,u_in,H_jj,s2_out,energies,dim_in,sze,N_
integer :: lwork, info
double precision, allocatable :: work(:)
y = h
!y = h_p
! y = h_p ! Doesn't work for non-singlets
lwork = -1
allocate(work(1))
call dsygv(1,'V','U',shift2,y,size(y,1), &

View File

@ -9,4 +9,21 @@ BEGIN_PROVIDER [ integer(omp_lock_kind), file_lock ]
call omp_init_lock(file_lock)
END_PROVIDER
! These functions need to be called because internal read and write are not thread safe.
subroutine lock_io()
implicit none
BEGIN_DOC
! Needs to be called because before doing I/O because internal read and write
! are not thread safe.
END_DOC
call omp_set_lock(file_lock)
end subroutine lock_io()
subroutine unlock_io()
implicit none
BEGIN_DOC
! Needs to be called because afterdoing I/O because internal read and write
! are not thread safe.
END_DOC
call omp_unset_lock(file_lock)
end subroutine lock_io()

View File

@ -39,12 +39,19 @@ program fci
if (.not.is_zmq_slave) then
PROVIDE psi_det psi_coef mo_two_e_integrals_in_map
write(json_unit,json_array_open_fmt) 'fci'
if (do_pt2) then
call run_stochastic_cipsi
else
call run_cipsi
endif
write(json_unit,json_dict_uopen_fmt)
write(json_unit,json_dict_close_fmtx)
write(json_unit,json_array_close_fmtx)
call json_close
else
PROVIDE mo_two_e_integrals_in_map pt2_min_parallel_tasks

View File

@ -1,3 +1,4 @@
json
tc_bi_ortho
davidson_undressed
cipsi_tc_bi_ortho

View File

@ -62,6 +62,7 @@ subroutine run_cipsi_tc
endif
endif
! ---
write(json_unit,json_array_open_fmt) 'fci_tc'
if (do_pt2) then
call run_stochastic_cipsi
@ -69,6 +70,11 @@ subroutine run_cipsi_tc
call run_cipsi
endif
write(json_unit,json_dict_uopen_fmt)
write(json_unit,json_dict_close_fmtx)
write(json_unit,json_array_close_fmtx)
call json_close
else
PROVIDE mo_bi_ortho_tc_one_e mo_bi_ortho_tc_two_e pt2_min_parallel_tasks
if(elec_alpha_num+elec_beta_num.ge.3)then

View File

@ -4,6 +4,6 @@ subroutine save_energy(E,pt2)
! Saves the energy in |EZFIO|.
END_DOC
double precision, intent(in) :: E(N_states), pt2(N_states)
call ezfio_set_fci_tc_energy(E(1:N_states))
call ezfio_set_fci_tc_energy_pt2(E(1:N_states)+pt2(1:N_states))
call ezfio_set_fci_tc_bi_energy(E(1:N_states))
call ezfio_set_fci_tc_bi_energy_pt2(E(1:N_states)+pt2(1:N_states))
end

View File

@ -1,3 +1,4 @@
ao_one_e_ints
ao_two_e_ints
scf_utils
json

View File

@ -15,115 +15,59 @@
double precision, allocatable :: ao_two_e_integral_alpha_tmp(:,:)
double precision, allocatable :: ao_two_e_integral_beta_tmp(:,:)
ao_two_e_integral_alpha = 0.d0
ao_two_e_integral_beta = 0.d0
if (do_direct_integrals) then
if (do_ao_cholesky) then ! Use Cholesky-decomposed integrals
ao_two_e_integral_alpha(:,:) = ao_two_e_integral_alpha_chol(:,:)
ao_two_e_integral_beta (:,:) = ao_two_e_integral_beta_chol (:,:)
!$OMP PARALLEL DEFAULT(NONE) &
!$OMP PRIVATE(i,j,l,k1,k,integral,ii,jj,kk,ll,keys,values,p,q,r,s,i0,j0,k0,l0, &
!$OMP ao_two_e_integral_alpha_tmp,ao_two_e_integral_beta_tmp, c0, c1, c2, &
!$OMP local_threshold)&
!$OMP SHARED(ao_num,SCF_density_matrix_ao_alpha,SCF_density_matrix_ao_beta,&
!$OMP ao_integrals_map,ao_integrals_threshold, ao_two_e_integral_schwartz, &
!$OMP ao_two_e_integral_alpha, ao_two_e_integral_beta)
else ! Use integrals in AO basis set
allocate(keys(1), values(1))
allocate(ao_two_e_integral_alpha_tmp(ao_num,ao_num), &
ao_two_e_integral_beta_tmp(ao_num,ao_num))
ao_two_e_integral_alpha_tmp = 0.d0
ao_two_e_integral_beta_tmp = 0.d0
ao_two_e_integral_alpha = 0.d0
ao_two_e_integral_beta = 0.d0
if (do_direct_integrals) then
q = ao_num*ao_num*ao_num*ao_num
!$OMP DO SCHEDULE(static,64)
do p=1_8,q
call two_e_integrals_index_reverse(kk,ii,ll,jj,p)
if ( (kk(1)>ao_num).or. &
(ii(1)>ao_num).or. &
(jj(1)>ao_num).or. &
(ll(1)>ao_num) ) then
cycle
endif
k = kk(1)
i = ii(1)
l = ll(1)
j = jj(1)
!$OMP PARALLEL DEFAULT(NONE) &
!$OMP PRIVATE(i,j,l,k1,k,integral,ii,jj,kk,ll,keys,values,p,q,r,s,i0,j0,k0,l0,&
!$OMP ao_two_e_integral_alpha_tmp,ao_two_e_integral_beta_tmp, c0, c1, c2,&
!$OMP local_threshold) &
!$OMP SHARED(ao_num,SCF_density_matrix_ao_alpha,SCF_density_matrix_ao_beta,&
!$OMP ao_integrals_map,ao_integrals_threshold, ao_two_e_integral_schwartz,&
!$OMP ao_two_e_integral_alpha, ao_two_e_integral_beta)
logical, external :: ao_two_e_integral_zero
if (ao_two_e_integral_zero(i,k,j,l)) then
cycle
endif
local_threshold = ao_two_e_integral_schwartz(k,l)*ao_two_e_integral_schwartz(i,j)
if (local_threshold < ao_integrals_threshold) then
cycle
endif
i0 = i
j0 = j
k0 = k
l0 = l
values(1) = 0.d0
local_threshold = ao_integrals_threshold/local_threshold
do k2=1,8
if (kk(k2)==0) then
cycle
endif
i = ii(k2)
j = jj(k2)
k = kk(k2)
l = ll(k2)
c0 = SCF_density_matrix_ao_alpha(k,l)+SCF_density_matrix_ao_beta(k,l)
c1 = SCF_density_matrix_ao_alpha(k,i)
c2 = SCF_density_matrix_ao_beta(k,i)
if ( dabs(c0)+dabs(c1)+dabs(c2) < local_threshold) then
cycle
endif
if (values(1) == 0.d0) then
values(1) = ao_two_e_integral(k0,l0,i0,j0)
endif
integral = c0 * values(1)
ao_two_e_integral_alpha_tmp(i,j) += integral
ao_two_e_integral_beta_tmp (i,j) += integral
integral = values(1)
ao_two_e_integral_alpha_tmp(l,j) -= c1 * integral
ao_two_e_integral_beta_tmp (l,j) -= c2 * integral
enddo
enddo
!$OMP END DO NOWAIT
!$OMP CRITICAL
ao_two_e_integral_alpha += ao_two_e_integral_alpha_tmp
ao_two_e_integral_beta += ao_two_e_integral_beta_tmp
!$OMP END CRITICAL
deallocate(keys,values,ao_two_e_integral_alpha_tmp,ao_two_e_integral_beta_tmp)
!$OMP END PARALLEL
else
PROVIDE ao_two_e_integrals_in_map
allocate(keys(1), values(1))
allocate(ao_two_e_integral_alpha_tmp(ao_num,ao_num), &
ao_two_e_integral_beta_tmp(ao_num,ao_num))
ao_two_e_integral_alpha_tmp = 0.d0
ao_two_e_integral_beta_tmp = 0.d0
integer(omp_lock_kind) :: lck(ao_num)
integer(map_size_kind) :: i8
integer :: ii(8), jj(8), kk(8), ll(8), k2
integer(cache_map_size_kind) :: n_elements_max, n_elements
integer(key_kind), allocatable :: keys(:)
double precision, allocatable :: values(:)
!$OMP PARALLEL DEFAULT(NONE) &
!$OMP PRIVATE(i,j,l,k1,k,integral,ii,jj,kk,ll,i8,keys,values,n_elements_max, &
!$OMP n_elements,ao_two_e_integral_alpha_tmp,ao_two_e_integral_beta_tmp)&
!$OMP SHARED(ao_num,SCF_density_matrix_ao_alpha,SCF_density_matrix_ao_beta,&
!$OMP ao_integrals_map, ao_two_e_integral_alpha, ao_two_e_integral_beta)
call get_cache_map_n_elements_max(ao_integrals_map,n_elements_max)
allocate(keys(n_elements_max), values(n_elements_max))
allocate(ao_two_e_integral_alpha_tmp(ao_num,ao_num), &
ao_two_e_integral_beta_tmp(ao_num,ao_num))
ao_two_e_integral_alpha_tmp = 0.d0
ao_two_e_integral_beta_tmp = 0.d0
!$OMP DO SCHEDULE(static,1)
do i8=0_8,ao_integrals_map%map_size
n_elements = n_elements_max
call get_cache_map(ao_integrals_map,i8,keys,values,n_elements)
do k1=1,n_elements
call two_e_integrals_index_reverse(kk,ii,ll,jj,keys(k1))
q = ao_num*ao_num*ao_num*ao_num
!$OMP DO SCHEDULE(static,64)
do p=1_8,q
call two_e_integrals_index_reverse(kk,ii,ll,jj,p)
if ( (kk(1)>ao_num).or. &
(ii(1)>ao_num).or. &
(jj(1)>ao_num).or. &
(ll(1)>ao_num) ) then
cycle
endif
k = kk(1)
i = ii(1)
l = ll(1)
j = jj(1)
logical, external :: ao_two_e_integral_zero
if (ao_two_e_integral_zero(i,k,j,l)) then
cycle
endif
local_threshold = ao_two_e_integral_schwartz(k,l)*ao_two_e_integral_schwartz(i,j)
if (local_threshold < ao_integrals_threshold) then
cycle
endif
i0 = i
j0 = j
k0 = k
l0 = l
values(1) = 0.d0
local_threshold = ao_integrals_threshold/local_threshold
do k2=1,8
if (kk(k2)==0) then
cycle
@ -132,25 +76,162 @@
j = jj(k2)
k = kk(k2)
l = ll(k2)
integral = (SCF_density_matrix_ao_alpha(k,l)+SCF_density_matrix_ao_beta(k,l)) * values(k1)
c0 = SCF_density_matrix_ao_alpha(k,l)+SCF_density_matrix_ao_beta(k,l)
c1 = SCF_density_matrix_ao_alpha(k,i)
c2 = SCF_density_matrix_ao_beta(k,i)
if ( dabs(c0)+dabs(c1)+dabs(c2) < local_threshold) then
cycle
endif
if (values(1) == 0.d0) then
values(1) = ao_two_e_integral(k0,l0,i0,j0)
endif
integral = c0 * values(1)
ao_two_e_integral_alpha_tmp(i,j) += integral
ao_two_e_integral_beta_tmp (i,j) += integral
integral = values(k1)
ao_two_e_integral_alpha_tmp(l,j) -= SCF_density_matrix_ao_alpha(k,i) * integral
ao_two_e_integral_beta_tmp (l,j) -= SCF_density_matrix_ao_beta (k,i) * integral
integral = values(1)
ao_two_e_integral_alpha_tmp(l,j) -= c1 * integral
ao_two_e_integral_beta_tmp (l,j) -= c2 * integral
enddo
enddo
enddo
!$OMP END DO NOWAIT
!$OMP CRITICAL
ao_two_e_integral_alpha += ao_two_e_integral_alpha_tmp
ao_two_e_integral_beta += ao_two_e_integral_beta_tmp
!$OMP END CRITICAL
deallocate(keys,values,ao_two_e_integral_alpha_tmp,ao_two_e_integral_beta_tmp)
!$OMP END PARALLEL
!$OMP END DO NOWAIT
!$OMP CRITICAL
ao_two_e_integral_alpha += ao_two_e_integral_alpha_tmp
ao_two_e_integral_beta += ao_two_e_integral_beta_tmp
!$OMP END CRITICAL
deallocate(keys,values,ao_two_e_integral_alpha_tmp,ao_two_e_integral_beta_tmp)
!$OMP END PARALLEL
else
PROVIDE ao_two_e_integrals_in_map
integer(omp_lock_kind) :: lck(ao_num)
integer(map_size_kind) :: i8
integer :: ii(8), jj(8), kk(8), ll(8), k2
integer(cache_map_size_kind) :: n_elements_max, n_elements
integer(key_kind), allocatable :: keys(:)
double precision, allocatable :: values(:)
!$OMP PARALLEL DEFAULT(NONE) &
!$OMP PRIVATE(i,j,l,k1,k,integral,ii,jj,kk,ll,i8,keys,values,n_elements_max,&
!$OMP n_elements,ao_two_e_integral_alpha_tmp,ao_two_e_integral_beta_tmp)&
!$OMP SHARED(ao_num,SCF_density_matrix_ao_alpha,SCF_density_matrix_ao_beta,&
!$OMP ao_integrals_map, ao_two_e_integral_alpha, ao_two_e_integral_beta)
call get_cache_map_n_elements_max(ao_integrals_map,n_elements_max)
allocate(keys(n_elements_max), values(n_elements_max))
allocate(ao_two_e_integral_alpha_tmp(ao_num,ao_num), &
ao_two_e_integral_beta_tmp(ao_num,ao_num))
ao_two_e_integral_alpha_tmp = 0.d0
ao_two_e_integral_beta_tmp = 0.d0
!$OMP DO SCHEDULE(static,1)
do i8=0_8,ao_integrals_map%map_size
n_elements = n_elements_max
call get_cache_map(ao_integrals_map,i8,keys,values,n_elements)
do k1=1,n_elements
call two_e_integrals_index_reverse(kk,ii,ll,jj,keys(k1))
do k2=1,8
if (kk(k2)==0) then
cycle
endif
i = ii(k2)
j = jj(k2)
k = kk(k2)
l = ll(k2)
integral = (SCF_density_matrix_ao_alpha(k,l)+SCF_density_matrix_ao_beta(k,l)) * values(k1)
ao_two_e_integral_alpha_tmp(i,j) += integral
ao_two_e_integral_beta_tmp (i,j) += integral
integral = values(k1)
ao_two_e_integral_alpha_tmp(l,j) -= SCF_density_matrix_ao_alpha(k,i) * integral
ao_two_e_integral_beta_tmp (l,j) -= SCF_density_matrix_ao_beta (k,i) * integral
enddo
enddo
enddo
!$OMP END DO NOWAIT
!$OMP CRITICAL
ao_two_e_integral_alpha += ao_two_e_integral_alpha_tmp
ao_two_e_integral_beta += ao_two_e_integral_beta_tmp
!$OMP END CRITICAL
deallocate(keys,values,ao_two_e_integral_alpha_tmp,ao_two_e_integral_beta_tmp)
!$OMP END PARALLEL
endif
endif
END_PROVIDER
BEGIN_PROVIDER [ double precision, ao_two_e_integral_alpha_chol, (ao_num, ao_num) ]
&BEGIN_PROVIDER [ double precision, ao_two_e_integral_beta_chol , (ao_num, ao_num) ]
use map_module
implicit none
BEGIN_DOC
! Alpha and Beta Fock matrices in AO basis set
END_DOC
integer :: m,n,l,s,j
double precision :: integral
double precision, allocatable :: X(:), X2(:,:,:,:), X3(:,:,:,:)
allocate (X(cholesky_ao_num))
! X(j) = \sum_{mn} SCF_density_matrix_ao(m,n) * cholesky_ao(m,n,j)
call dgemm('T','N',cholesky_ao_num,1,ao_num*ao_num,1.d0, &
cholesky_ao, ao_num*ao_num, &
SCF_density_matrix_ao, ao_num*ao_num,0.d0, &
X, cholesky_ao_num)
!
! ao_two_e_integral_alpha(m,n) = \sum_{j} cholesky_ao(m,n,j) * X(j)
call dgemm('N','N',ao_num*ao_num,1,cholesky_ao_num, 1.d0, &
cholesky_ao, ao_num*ao_num, &
X, cholesky_ao_num, 0.d0, &
ao_two_e_integral_alpha_chol, ao_num*ao_num)
deallocate(X)
ao_two_e_integral_beta_chol = ao_two_e_integral_alpha_chol
allocate(X2(ao_num,ao_num,cholesky_ao_num,2))
! ao_two_e_integral_alpha_chol (l,s) -= cholesky_ao(l,m,j) * SCF_density_matrix_ao_beta (m,n) * cholesky_ao(n,s,j)
call dgemm('N','N',ao_num,ao_num*cholesky_ao_num,ao_num, 1.d0, &
SCF_density_matrix_ao_alpha, ao_num, &
cholesky_ao, ao_num, 0.d0, &
X2(1,1,1,1), ao_num)
call dgemm('N','N',ao_num,ao_num*cholesky_ao_num,ao_num, 1.d0, &
SCF_density_matrix_ao_beta, ao_num, &
cholesky_ao, ao_num, 0.d0, &
X2(1,1,1,2), ao_num)
allocate(X3(ao_num,cholesky_ao_num,ao_num,2))
do s=1,ao_num
do j=1,cholesky_ao_num
do m=1,ao_num
X3(m,j,s,1) = X2(m,s,j,1)
X3(m,j,s,2) = X2(m,s,j,2)
enddo
enddo
enddo
deallocate(X2)
call dgemm('N','N',ao_num,ao_num,ao_num*cholesky_ao_num, -1.d0, &
cholesky_ao, ao_num, &
X3(1,1,1,1), ao_num*cholesky_ao_num, 1.d0, &
ao_two_e_integral_alpha_chol, ao_num)
call dgemm('N','N',ao_num,ao_num,ao_num*cholesky_ao_num, -1.d0, &
cholesky_ao, ao_num, &
X3(1,1,1,2), ao_num*cholesky_ao_num, 1.d0, &
ao_two_e_integral_beta_chol, ao_num)
deallocate(X3)
END_PROVIDER
BEGIN_PROVIDER [ double precision, Fock_matrix_ao_alpha, (ao_num, ao_num) ]

View File

@ -80,9 +80,14 @@ subroutine run
mo_label = 'Orthonormalized'
call Roothaan_Hall_SCF
call ezfio_set_hartree_fock_energy(SCF_energy)
write(json_unit,json_array_open_fmt) 'scf'
call Roothaan_Hall_SCF
write(json_unit,json_array_close_fmtx)
call json_close
call ezfio_set_hartree_fock_energy(SCF_energy)
end

View File

@ -1,24 +0,0 @@
[n_iter]
interface: ezfio
doc: Number of saved iterations
type:integer
default: 1
[n_det_iterations]
interface: ezfio, provider
doc: Number of determinants at each iteration
type: integer
size: (100)
[energy_iterations]
interface: ezfio, provider
doc: The variational energy at each iteration
type: double precision
size: (determinants.n_states,100)
[pt2_iterations]
interface: ezfio, provider
doc: The |PT2| correction at each iteration
type: double precision
size: (determinants.n_states,100)

View File

@ -1,37 +0,0 @@
BEGIN_PROVIDER [ integer, n_iter ]
implicit none
BEGIN_DOC
! number of iterations
END_DOC
logical :: has
PROVIDE ezfio_filename
if (mpi_master) then
double precision :: zeros(N_states,100)
integer :: izeros(100)
zeros = 0.d0
izeros = 0
call ezfio_set_iterations_n_iter(0)
call ezfio_set_iterations_energy_iterations(zeros)
call ezfio_set_iterations_pt2_iterations(zeros)
call ezfio_set_iterations_n_det_iterations(izeros)
n_iter = 1
endif
IRP_IF MPI_DEBUG
print *, irp_here, mpi_rank
call MPI_BARRIER(MPI_COMM_WORLD, ierr)
IRP_ENDIF
IRP_IF MPI
include 'mpif.h'
integer :: ierr
call MPI_BCAST( n_iter, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
if (ierr /= MPI_SUCCESS) then
stop 'Unable to read n_iter with MPI'
endif
IRP_ENDIF
call write_time(6)
END_PROVIDER

View File

@ -1,42 +1,65 @@
BEGIN_PROVIDER [ double precision, extrapolated_energy, (N_iter,N_states) ]
implicit none
BEGIN_DOC
! Extrapolated energy, using E_var = f(PT2) where PT2=0
END_DOC
integer :: i
do i=1,min(N_states,N_det)
call extrapolate_data(N_iter, &
energy_iterations(i,1:N_iter), &
pt2_iterations(i,1:N_iter), &
extrapolated_energy(1:N_iter,i))
enddo
END_PROVIDER
subroutine save_iterations(e_, pt2_,n_)
BEGIN_PROVIDER [ integer, N_iter ]
implicit none
BEGIN_DOC
! Update the energy in the EZFIO file.
! Number of CIPSI iterations
END_DOC
integer, intent(in) :: n_
double precision, intent(in) :: e_(N_states), pt2_(N_states)
integer :: i
if (N_iter == 101) then
do i=2,N_iter-1
energy_iterations(1:N_states,N_iter-1) = energy_iterations(1:N_states,N_iter)
pt2_iterations(1:N_states,N_iter-1) = pt2_iterations(1:N_states,N_iter)
N_iter = 0
END_PROVIDER
BEGIN_PROVIDER [ integer, N_iter_max ]
implicit none
BEGIN_DOC
! Max number of iterations for extrapolations
END_DOC
N_iter_max = 8
END_PROVIDER
BEGIN_PROVIDER [ double precision, energy_iterations , (n_states,N_iter_max) ]
&BEGIN_PROVIDER [ double precision, pt2_iterations , (n_states,N_iter_max) ]
&BEGIN_PROVIDER [ double precision, extrapolated_energy, (N_iter_max,N_states) ]
implicit none
BEGIN_DOC
! The energy at each iteration for the extrapolations
END_DOC
energy_iterations = 0.d0
pt2_iterations = 0.d0
extrapolated_energy = 0.d0
END_PROVIDER
subroutine increment_n_iter(e, pt2_data)
use selection_types