3
0
mirror of https://github.com/triqs/dft_tools synced 2024-12-22 20:34:38 +01:00

[feat] improved standard behavior of block struct (#248)

* previously the default gf_struct_solver had keys up / down,
inconsistent with the default behavior after analyse_block_structure was
run: up_0 / down_0. Now the default solver structure always has the _0
in the key.
* old behavior resulted in error when analyse_block_structure was called
twice
* fixed analyse block structure tests with new changes
* to correctly use analyse_block_structure use now
extract_G_loc(transform_to_solver_blocks=False)
* changed density_matrix function to use directly extract_G_loc() if
using_gf is selected.
* print deprecation warning in density_matrix, same can be achieved via
extract_G_loc and [G.density() for G in Gloc]
* new function density_matrix_using_point_integration()
* enforce in analyse block structure that input dm or G is list with
length of n_corr_shells
* correct doc string for how include_shells are given
* fixed other tests accordingly
* fixed small bug in initial block structure regarding length of lists
This commit is contained in:
Alexander Hampel 2024-02-26 14:50:24 -05:00 committed by GitHub
parent d4d231786e
commit b355173cf1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 183 additions and 155 deletions

View File

@ -168,8 +168,8 @@ class SumkDFT(object):
# blocks possible # blocks possible
self.gf_struct_sumk = [[(sp, self.corr_shells[icrsh]['dim']) for sp in self.spin_block_names[self.corr_shells[icrsh]['SO']]] self.gf_struct_sumk = [[(sp, self.corr_shells[icrsh]['dim']) for sp in self.spin_block_names[self.corr_shells[icrsh]['SO']]]
for icrsh in range(self.n_corr_shells)] for icrsh in range(self.n_corr_shells)]
# First set a standard gf_struct solver: # First set a standard gf_struct solver (add _0 here for consistency with analyse_block_structure):
self.gf_struct_solver = [dict([(sp, self.corr_shells[self.inequiv_to_corr[ish]]['dim']) self.gf_struct_solver = [dict([(sp+'_0', self.corr_shells[self.inequiv_to_corr[ish]]['dim'])
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]]) for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]])
for ish in range(self.n_inequiv_shells)] for ish in range(self.n_inequiv_shells)]
# Set standard (identity) maps from gf_struct_sumk <-> # Set standard (identity) maps from gf_struct_sumk <->
@ -180,12 +180,12 @@ class SumkDFT(object):
for ish in range(self.n_inequiv_shells)] for ish in range(self.n_inequiv_shells)]
for ish in range(self.n_inequiv_shells): for ish in range(self.n_inequiv_shells):
for block, inner_dim in self.gf_struct_sumk[self.inequiv_to_corr[ish]]: for block, inner_dim in self.gf_struct_sumk[self.inequiv_to_corr[ish]]:
self.solver_to_sumk_block[ish][block] = block self.solver_to_sumk_block[ish][block+'_0'] = block
for inner in range(inner_dim): for inner in range(inner_dim):
self.sumk_to_solver[ish][ self.sumk_to_solver[ish][
(block, inner)] = (block, inner) (block, inner)] = (block+'_0', inner)
self.solver_to_sumk[ish][ self.solver_to_sumk[ish][
(block, inner)] = (block, inner) (block+'_0', inner)] = (block, inner)
# assume no shells are degenerate # assume no shells are degenerate
self.deg_shells = [[] for ish in range(self.n_inequiv_shells)] self.deg_shells = [[] for ish in range(self.n_inequiv_shells)]
@ -862,8 +862,8 @@ class SumkDFT(object):
If the difference between density matrix / hloc elements is below threshold, If the difference between density matrix / hloc elements is below threshold,
they are considered to be equal. they are considered to be equal.
include_shells : list of integers, optional include_shells : list of integers, optional
List of correlated shells to be analysed. List of inequivalent shells to be analysed.
If include_shells is not provided all correlated shells will be analysed. If include_shells is not provided all inequivalent shells will be analysed.
dm : list of dict, optional dm : list of dict, optional
List of density matrices from which block stuctures are to be analysed. List of density matrices from which block stuctures are to be analysed.
Each density matrix is a dict {block names: 2d numpy arrays} for each correlated shell. Each density matrix is a dict {block names: 2d numpy arrays} for each correlated shell.
@ -874,14 +874,11 @@ class SumkDFT(object):
If not provided, it will be calculated using eff_atomic_levels. If not provided, it will be calculated using eff_atomic_levels.
""" """
self.gf_struct_solver = [{} for ish in range(self.n_inequiv_shells)]
self.sumk_to_solver = [{} for ish in range(self.n_inequiv_shells)]
self.solver_to_sumk = [{} for ish in range(self.n_inequiv_shells)]
self.solver_to_sumk_block = [{}
for ish in range(self.n_inequiv_shells)]
if dm is None: if dm is None:
dm = self.density_matrix(method='using_point_integration') warn("WARNING: No density matrix given. Calculating density matrix with default parameters. This will be deprecated in future releases.")
dm = self.density_matrix(method='using_gf', transform_to_solver_blocks=False)
assert len(dm) == self.n_corr_shells, "The number of density matrices must be equal to the number of correlated shells."
dens_mat = [dm[self.inequiv_to_corr[ish]] dens_mat = [dm[self.inequiv_to_corr[ish]]
for ish in range(self.n_inequiv_shells)] for ish in range(self.n_inequiv_shells)]
if hloc is None: if hloc is None:
@ -890,8 +887,13 @@ class SumkDFT(object):
if include_shells is None: if include_shells is None:
include_shells = list(range(self.n_inequiv_shells)) include_shells = list(range(self.n_inequiv_shells))
for ish in include_shells: for ish in include_shells:
self.gf_struct_solver[ish] = {}
self.sumk_to_solver[ish] = {}
self.solver_to_sumk[ish] = {}
self.solver_to_sumk_block[ish] = {}
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]: for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]:
assert sp in dens_mat[ish], f"The density matrix does not contain the block {sp}. Is the input dm given in sumk block structure?"
n_orb = self.corr_shells[self.inequiv_to_corr[ish]]['dim'] n_orb = self.corr_shells[self.inequiv_to_corr[ish]]['dim']
# gives an index list of entries larger that threshold # gives an index list of entries larger that threshold
dmbool = (abs(dens_mat[ish][sp]) > threshold) dmbool = (abs(dens_mat[ish][sp]) > threshold)
@ -1049,8 +1051,8 @@ class SumkDFT(object):
If the difference between matrix elements is below threshold, If the difference between matrix elements is below threshold,
they are considered to be equal. they are considered to be equal.
include_shells : list of integers, optional include_shells : list of integers, optional
List of correlated shells to be analysed. List of inequivalent shells to be analysed.
If include_shells is not provided all correlated shells will be analysed. If include_shells is not provided all inequivalent shells will be analysed.
analyse_deg_shells : bool analyse_deg_shells : bool
Whether to call the analyse_deg_shells function Whether to call the analyse_deg_shells function
after having finished the block structure analysis after having finished the block structure analysis
@ -1062,29 +1064,26 @@ class SumkDFT(object):
""" """
assert isinstance(G, list), "G must be a list (with elements for each correlated shell)" assert isinstance(G, list), "G must be a list (with elements for each correlated shell)"
assert len(G) == self.n_corr_shells, "G must have one element for each correlated shell, run extract_G_loc with transform_to_solver_blocks=False to get the correct G"
gf = self._get_hermitian_quantity_from_gf(G) gf = self._get_hermitian_quantity_from_gf(G)
# initialize the variables
self.gf_struct_solver = [{} for ish in range(self.n_inequiv_shells)]
self.sumk_to_solver = [{} for ish in range(self.n_inequiv_shells)]
self.solver_to_sumk = [{} for ish in range(self.n_inequiv_shells)]
self.solver_to_sumk_block = [{}
for ish in range(self.n_inequiv_shells)]
# the maximum value of each matrix element of each block and shell
max_gf = [{name:np.max(np.abs(g.data),0) for name, g in gf[ish]} for ish in range(self.n_inequiv_shells)]
if include_shells is None: if include_shells is None:
# include all shells # include all shells
include_shells = list(range(self.n_inequiv_shells)) include_shells = list(range(self.n_inequiv_shells))
for ish in include_shells: for ish in include_shells:
self.gf_struct_solver[ish] = {}
self.sumk_to_solver[ish] = {}
self.solver_to_sumk[ish] = {}
self.solver_to_sumk_block[ish] = {}
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]: for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]:
assert sp in gf[self.inequiv_to_corr[ish]].indices, f"The Green's function does not contain the block {sp}. Is the input G given in sumk block structure?"
n_orb = self.corr_shells[self.inequiv_to_corr[ish]]['dim'] n_orb = self.corr_shells[self.inequiv_to_corr[ish]]['dim']
# gives an index list of entries larger that threshold # gives an index list of entries larger that threshold
maxgf_bool = (abs(max_gf[ish][sp]) > threshold) max_gf = np.max(np.abs(gf[self.inequiv_to_corr[ish]][sp].data),0)
maxgf_bool = (max_gf > threshold)
# Determine off-diagonal entries in upper triangular part of the # Determine off-diagonal entries in upper triangular part of the
# Green's function # Green's function
@ -1455,21 +1454,12 @@ class SumkDFT(object):
return trans return trans
def density_matrix_using_point_integration(self):
def density_matrix(self, method='using_gf'): """
"""Calculate density matrices in one of two ways. Calculate density matrices using point integration: Only works for
diagonal hopping matrix (true in wien2k). Consider using extract_G_loc
Parameters together with [G.density() for G in Gloc] instead. Returned density
---------- matrix is always given in SumK block structure.
method : string, optional
- if 'using_gf': First get lattice gf (g_loc is not set up), then density matrix.
It is useful for Hubbard I, and very quick.
No assumption on the hopping structure is made (ie diagonal or not).
- if 'using_point_integration': Only works for diagonal hopping matrix (true in wien2k).
beta : float, optional
Inverse temperature.
Returns Returns
------- -------
@ -1483,34 +1473,21 @@ class SumkDFT(object):
[self.corr_shells[icrsh]['dim'], self.corr_shells[icrsh]['dim']], complex) [self.corr_shells[icrsh]['dim'], self.corr_shells[icrsh]['dim']], complex)
ikarray = np.array(list(range(self.n_k))) ikarray = np.array(list(range(self.n_k)))
ntoi = self.spin_names_to_ind[self.SO]
spn = self.spin_block_names[self.SO]
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
dims = {sp:self.n_orbitals[ik, ntoi[sp]] for sp in spn}
MMat = [np.zeros([dims[sp], dims[sp]], complex) for sp in spn]
if method == "using_gf": for isp, sp in enumerate(spn):
ind = ntoi[sp]
G_latt_iw = self.lattice_gf(ik=ik, mu=self.chemical_potential) for inu in range(self.n_orbitals[ik, ind]):
G_latt_iw *= self.bz_weights[ik] # only works for diagonal hopping matrix (true in
dm = G_latt_iw.density() # wien2k)
MMat = [dm[sp] for sp in self.spin_block_names[self.SO]] if (self.hopping[ik, ind, inu, inu] - self.h_field * (1 - 2 * isp)) < 0.0:
MMat[isp][inu, inu] = 1.0
elif method == "using_point_integration": else:
MMat[isp][inu, inu] = 0.0
ntoi = self.spin_names_to_ind[self.SO]
spn = self.spin_block_names[self.SO]
dims = {sp:self.n_orbitals[ik, ntoi[sp]] for sp in spn}
MMat = [np.zeros([dims[sp], dims[sp]], complex) for sp in spn]
for isp, sp in enumerate(spn):
ind = ntoi[sp]
for inu in range(self.n_orbitals[ik, ind]):
# only works for diagonal hopping matrix (true in
# wien2k)
if (self.hopping[ik, ind, inu, inu] - self.h_field * (1 - 2 * isp)) < 0.0:
MMat[isp][inu, inu] = 1.0
else:
MMat[isp][inu, inu] = 0.0
else:
raise ValueError("density_matrix: the method '%s' is not supported." % method)
for icrsh in range(self.n_corr_shells): for icrsh in range(self.n_corr_shells):
for isp, sp in enumerate(self.spin_block_names[self.corr_shells[icrsh]['SO']]): for isp, sp in enumerate(self.spin_block_names[self.corr_shells[icrsh]['SO']]):
@ -1519,11 +1496,7 @@ class SumkDFT(object):
dim = self.corr_shells[icrsh]['dim'] dim = self.corr_shells[icrsh]['dim']
n_orb = self.n_orbitals[ik, ind] n_orb = self.n_orbitals[ik, ind]
projmat = self.proj_mat[ik, ind, icrsh, 0:dim, 0:n_orb] projmat = self.proj_mat[ik, ind, icrsh, 0:dim, 0:n_orb]
if method == "using_gf": dens_mat[icrsh][sp] += self.bz_weights[ik] * np.dot(np.dot(projmat, MMat[isp]),
dens_mat[icrsh][sp] += np.dot(np.dot(projmat, MMat[isp]),
projmat.transpose().conjugate())
elif method == "using_point_integration":
dens_mat[icrsh][sp] += self.bz_weights[ik] * np.dot(np.dot(projmat, MMat[isp]),
projmat.transpose().conjugate()) projmat.transpose().conjugate())
# get data from nodes: # get data from nodes:
@ -1546,6 +1519,55 @@ class SumkDFT(object):
return dens_mat return dens_mat
def density_matrix(self, method='using_gf', mu=None, with_Sigma=True, with_dc=True, broadening=None,
transform_to_solver_blocks=True, show_warnings=True):
"""Calculate density matrices in one of two ways.
Parameters
----------
method : string, optional
- if 'using_gf': First get lattice gf (g_loc is not set up), then density matrix.
It is useful for Hubbard I, and very quick.
No assumption on the hopping structure is made (ie diagonal or not).
- if 'using_point_integration': Only works for diagonal hopping matrix (true in wien2k).
mu : real, optional
Input chemical potential. If not provided the value of self.chemical_potential is used as mu.
with_Sigma : boolean, optional
If True then the local GF is calculated with the self-energy self.Sigma_imp.
with_dc : boolean, optional
If True then the double-counting correction is subtracted from the self-energy in calculating the GF.
broadening : float, optional
Imaginary shift for the axis along which the real-axis GF is calculated.
If not provided, broadening will be set to double of the distance between mesh points in 'mesh'.
Only relevant for real-frequency GF.
transform_to_solver_blocks : bool, optional
If True (default), the returned G_loc will be transformed to the block structure ``gf_struct_solver``,
else it will be in ``gf_struct_sumk``.
show_warnings : bool, optional
Displays warning messages during transformation
(Only effective if transform_to_solver_blocks = True
Returns
-------
dens_mat : list of dicts
Density matrix for each spin in each correlated shell.
"""
if method == "using_gf":
warn("WARNING: density_matrix: method 'using_gf' is deprecated. Use 'extract_G_loc' instead.")
Gloc = self.extract_G_loc(mu, with_Sigma, with_dc, broadening,
transform_to_solver_blocks, show_warnings)
dens_mat = [G.density() for G in Gloc]
elif method == "using_point_integration":
warn("WARNING: density_matrix: method 'using_point_integration' is deprecated. Use 'density_matrix_using_point_integration' instead. All additionally provided arguments are ignored.")
dens_mat = self.density_matrix_using_point_integration()
else:
raise ValueError("density_matrix: the method '%s' is not supported." % method)
return dens_mat
# For simple dft input, get crystal field splittings. # For simple dft input, get crystal field splittings.
def eff_atomic_levels(self): def eff_atomic_levels(self):
r""" r"""

View File

@ -1,9 +1,9 @@
from triqs.gf import * from triqs.gf import MeshImFreq, inverse, Omega
from triqs_dft_tools.sumk_dft import SumkDFT from triqs_dft_tools.sumk_dft import SumkDFT
from scipy.linalg import expm from scipy.linalg import expm
import numpy as np import numpy as np
from triqs.utility.comparison_tests import assert_gfs_are_close, assert_arrays_are_close, assert_block_gfs_are_close from triqs.utility.comparison_tests import assert_gfs_are_close, assert_arrays_are_close, assert_block_gfs_are_close
from h5 import * from h5 import HDFArchive
import itertools import itertools
# The full test checks all different possible combinations of conjugated # The full test checks all different possible combinations of conjugated
@ -19,11 +19,10 @@ full_test = False
####################################################################### #######################################################################
mesh = MeshImFreq(40, 'Fermion', 1025) mesh = MeshImFreq(40, 'Fermion', 1025)
SK = SumkDFT(hdf_file = 'SrIrO3_rot.h5', mesh=mesh) SK = SumkDFT(hdf_file='SrIrO3_rot.h5', mesh=mesh)
Sigma = SK.block_structure.create_gf(mesh=mesh) Sigma = SK.block_structure.create_gf(mesh=mesh)
SK.put_Sigma([Sigma]) SK.put_Sigma([Sigma])
G = SK.extract_G_loc() G = SK.extract_G_loc(transform_to_solver_blocks=False)
# the original block structure # the original block structure
block_structure1 = SK.block_structure.copy() block_structure1 = SK.block_structure.copy()
G_new = SK.analyse_block_structure_from_gf(G) G_new = SK.analyse_block_structure_from_gf(G)
@ -31,30 +30,29 @@ G_new = SK.analyse_block_structure_from_gf(G)
# the new block structure # the new block structure
block_structure2 = SK.block_structure.copy() block_structure2 = SK.block_structure.copy()
with HDFArchive('analyse_block_structure_from_gf.out.h5','w') as ar: with HDFArchive('analyse_block_structure_from_gf.out.h5', 'w') as ar:
ar['bs1'] = block_structure1 ar['bs1'] = block_structure1
ar['bs2'] = block_structure2 ar['bs2'] = block_structure2
# check whether the block structure is the same as in the reference # check whether the block structure is the same as in the reference
with HDFArchive('analyse_block_structure_from_gf.out.h5','r') as ar,\ with HDFArchive('analyse_block_structure_from_gf.out.h5', 'r') as ar, HDFArchive('analyse_block_structure_from_gf.ref.h5', 'r') as ar2:
HDFArchive('analyse_block_structure_from_gf.ref.h5','r') as ar2:
assert ar['bs1'] == ar2['bs1'], 'bs1 not equal' assert ar['bs1'] == ar2['bs1'], 'bs1 not equal'
a1 = ar['bs2'] a1 = ar['bs2']
a2 = ar2['bs2'] a2 = ar2['bs2']
assert a1==block_structure2, "writing/reading block structure incorrect" assert a1 == block_structure2, 'writing/reading block structure incorrect'
# we set the deg_shells to None because the transformation matrices # we set the deg_shells to None because the transformation matrices
# have a phase freedom and will, therefore, not be equal in general # have a phase freedom and will, therefore, not be equal in general
a1.deg_shells = None a1.deg_shells = None
a2.deg_shells = None a2.deg_shells = None
assert a1==a2, 'bs2 not equal' assert a1 == a2, 'bs2 not equal'
# check if deg shells are correct # check if deg shells are correct
assert len(SK.deg_shells[0])==1, "wrong number of equivalent groups" assert len(SK.deg_shells[0]) == 1, 'wrong number of equivalent groups'
# check if the Green's functions that are found to be equal in the # check if the Green's functions that are found to be equal in the
# routine are indeed equal # routine are indeed equal
for d in SK.deg_shells[0]: for d in SK.deg_shells[0]:
assert len(d)==2, "wrong number of shells in equivalent group" assert len(d) == 2, 'wrong number of shells in equivalent group'
# the convention is that for every degenerate shell, the transformation # the convention is that for every degenerate shell, the transformation
# matrix v and the conjugate bool is saved # matrix v and the conjugate bool is saved
# then, # then,
@ -70,8 +68,8 @@ for d in SK.deg_shells[0]:
normalized_gf << normalized_gf.transpose() normalized_gf << normalized_gf.transpose()
normalized_gfs.append(normalized_gf) normalized_gfs.append(normalized_gf)
for i in range(len(normalized_gfs)): for i in range(len(normalized_gfs)):
for j in range(i+1,len(normalized_gfs)): for j in range(i + 1, len(normalized_gfs)):
assert_arrays_are_close(normalized_gfs[i].data, normalized_gfs[j].data, 1.e-5) assert_arrays_are_close(normalized_gfs[i].data, normalized_gfs[j].data, 1.0e-5)
####################################################################### #######################################################################
# Second test # # Second test #
@ -80,18 +78,21 @@ for d in SK.deg_shells[0]:
# model # # model #
####################################################################### #######################################################################
# helper function to get random Hermitian matrix # helper function to get random Hermitian matrix
def get_random_hermitian(dim): def get_random_hermitian(dim):
herm = np.random.rand(dim,dim)+1.0j*np.random.rand(dim,dim) herm = np.random.rand(dim, dim) + 1.0j * np.random.rand(dim, dim)
herm = herm + herm.conjugate().transpose() herm = herm + herm.conjugate().transpose()
return herm return herm
# helper function to get random unitary matrix # helper function to get random unitary matrix
def get_random_transformation(dim): def get_random_transformation(dim):
herm = get_random_hermitian(dim) herm = get_random_hermitian(dim)
T = expm(1.0j*herm) T = expm(1.0j * herm)
return T return T
# we will conjugate the Green's function blocks according to the entries # we will conjugate the Green's function blocks according to the entries
# of conjugate_values # of conjugate_values
# for each of the 5 blocks that will be constructed, there is an entry # for each of the 5 blocks that will be constructed, there is an entry
@ -101,34 +102,34 @@ if full_test:
conjugate_values = list(itertools.product([False, True], repeat=5)) conjugate_values = list(itertools.product([False, True], repeat=5))
else: else:
# in the quick test we check a random combination # in the quick test we check a random combination
conjugate_values = [np.random.rand(5)>0.5] conjugate_values = [np.random.rand(5) > 0.5]
for conjugate in conjugate_values: for conjugate in conjugate_values:
# construct a random block-diagonal Hloc # construct a random block-diagonal Hloc
Hloc = np.zeros((10,10), dtype=complex) Hloc = np.zeros((10, 10), dtype=complex)
# the Hloc of the first three 2x2 blocks is equal # the Hloc of the first three 2x2 blocks is equal
Hloc0 = get_random_hermitian(2) Hloc0 = get_random_hermitian(2)
Hloc[:2,:2] = Hloc0 Hloc[:2, :2] = Hloc0
Hloc[2:4,2:4] = Hloc0 Hloc[2:4, 2:4] = Hloc0
Hloc[4:6,4:6] = Hloc0 Hloc[4:6, 4:6] = Hloc0
# the Hloc of the last two 2x2 blocks is equal # the Hloc of the last two 2x2 blocks is equal
Hloc1 = get_random_hermitian(2) Hloc1 = get_random_hermitian(2)
Hloc[6:8,6:8] = Hloc1 Hloc[6:8, 6:8] = Hloc1
Hloc[8:,8:] = Hloc1 Hloc[8:, 8:] = Hloc1
# construct the hybridization delta # construct the hybridization delta
# this is equal for all 2x2 blocks # this is equal for all 2x2 blocks
V = get_random_hermitian(2) # the hopping elements from impurity to bath V = get_random_hermitian(2) # the hopping elements from impurity to bath
b1 = np.random.rand() # the bath energy of the first bath level b1 = np.random.rand() # the bath energy of the first bath level
b2 = np.random.rand() # the bath energy of the second bath level b2 = np.random.rand() # the bath energy of the second bath level
delta = G[0]['ud'][:2,:2].copy() delta = G[0]['ud'][:2, :2].copy()
delta[0,0] << (V[0,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[0,1].conjugate()*inverse(Omega-b2))/2.0 delta[0, 0] << (V[0, 0] * V[0, 0].conjugate() * inverse(Omega - b1) + V[0, 1] * V[0, 1].conjugate() * inverse(Omega - b2)) / 2.0
delta[0,1] << (V[0,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[1,1].conjugate()*inverse(Omega-b2))/2.0 delta[0, 1] << (V[0, 0] * V[1, 0].conjugate() * inverse(Omega - b1) + V[0, 1] * V[1, 1].conjugate() * inverse(Omega - b2)) / 2.0
delta[1,0] << (V[1,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[0,1].conjugate()*inverse(Omega-b2))/2.0 delta[1, 0] << (V[1, 0] * V[0, 0].conjugate() * inverse(Omega - b1) + V[1, 1] * V[0, 1].conjugate() * inverse(Omega - b2)) / 2.0
delta[1,1] << (V[1,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[1,1].conjugate()*inverse(Omega-b2))/2.0 delta[1, 1] << (V[1, 0] * V[1, 0].conjugate() * inverse(Omega - b1) + V[1, 1] * V[1, 1].conjugate() * inverse(Omega - b2)) / 2.0
# construct G # construct G
G[0].zero() G[0].zero()
for i in range(0,10,2): for i in range(0, 10, 2):
G[0]['ud'][i:i+2,i:i+2] << inverse(Omega-delta) G[0]['ud'][i : i + 2, i : i + 2] << inverse(Omega - delta)
G[0]['ud'] << inverse(inverse(G[0]['ud']) - Hloc) G[0]['ud'] << inverse(inverse(G[0]['ud']) - Hloc)
# for testing symm_deg_gf below, we need this # for testing symm_deg_gf below, we need this
@ -136,12 +137,12 @@ for conjugate in conjugate_values:
# mean of the blocks of G_noisy is equal to G[0] # mean of the blocks of G_noisy is equal to G[0]
G_noisy = G[0].copy() G_noisy = G[0].copy()
noise1 = np.random.randn(*delta.target_shape) noise1 = np.random.randn(*delta.target_shape)
G_noisy['ud'][:2,:2].data[:,:,:] += noise1 G_noisy['ud'][:2, :2].data[:, :, :] += noise1
G_noisy['ud'][2:4,2:4].data[:,:,:] -= noise1/2.0 G_noisy['ud'][2:4, 2:4].data[:, :, :] -= noise1 / 2.0
G_noisy['ud'][4:6,4:6].data[:,:,:] -= noise1/2.0 G_noisy['ud'][4:6, 4:6].data[:, :, :] -= noise1 / 2.0
noise2 = np.random.randn(*delta.target_shape) noise2 = np.random.randn(*delta.target_shape)
G_noisy['ud'][6:8,6:8].data[:,:,:] += noise2 G_noisy['ud'][6:8, 6:8].data[:, :, :] += noise2
G_noisy['ud'][8:,8:].data[:,:,:] -= noise2 G_noisy['ud'][8:, 8:].data[:, :, :] -= noise2
# for testing backward-compatibility in symm_deg_gf, we need the # for testing backward-compatibility in symm_deg_gf, we need the
# un-transformed Green's functions # un-transformed Green's functions
@ -149,33 +150,35 @@ for conjugate in conjugate_values:
G_noisy_pre_transform = G_noisy.copy() G_noisy_pre_transform = G_noisy.copy()
# transform each block using a random transformation matrix # transform each block using a random transformation matrix
for i in range(0,10,2): for i in range(0, 10, 2):
T = get_random_transformation(2) T = get_random_transformation(2)
G[0]['ud'][i:i+2,i:i+2].from_L_G_R(T, G[0]['ud'][i:i+2,i:i+2], T.conjugate().transpose()) G[0]['ud'][i : i + 2, i : i + 2].from_L_G_R(T, G[0]['ud'][i : i + 2, i : i + 2], T.conjugate().transpose())
G_noisy['ud'][i:i+2,i:i+2].from_L_G_R(T, G_noisy['ud'][i:i+2,i:i+2], T.conjugate().transpose()) G_noisy['ud'][i : i + 2, i : i + 2].from_L_G_R(T, G_noisy['ud'][i : i + 2, i : i + 2], T.conjugate().transpose())
# if that block shall be conjugated, go ahead and do it # if that block shall be conjugated, go ahead and do it
if conjugate[i//2]: if conjugate[i // 2]:
G[0]['ud'][i:i+2,i:i+2] << G[0]['ud'][i:i+2,i:i+2].transpose() G[0]['ud'][i : i + 2, i : i + 2] << G[0]['ud'][i : i + 2, i : i + 2].transpose()
G_noisy['ud'][i:i+2,i:i+2] << G_noisy['ud'][i:i+2,i:i+2].transpose() G_noisy['ud'][i : i + 2, i : i + 2] << G_noisy['ud'][i : i + 2, i : i + 2].transpose()
# analyse the block structure # analyse the block structure
G_new = SK.analyse_block_structure_from_gf(G, 1.e-7) G_new = SK.analyse_block_structure_from_gf(G, 1.0e-7)
# transform G_noisy etc. to the new block structure # transform G_noisy etc. to the new block structure
G_noisy = SK.block_structure.convert_gf(G_noisy, block_structure1, beta = G_noisy.mesh.beta, space_from='sumk') G_noisy = SK.block_structure.convert_gf(G_noisy, block_structure1, beta=G_noisy.mesh.beta, space_from='sumk')
G_pre_transform = SK.block_structure.convert_gf(G_pre_transform, block_structure1, beta = G_noisy.mesh.beta, space_from='sumk') G_pre_transform = SK.block_structure.convert_gf(G_pre_transform, block_structure1, beta=G_noisy.mesh.beta, space_from='sumk')
G_noisy_pre_transform = SK.block_structure.convert_gf(G_noisy_pre_transform, block_structure1, beta = G_noisy.mesh.beta, space_from='sumk') G_noisy_pre_transform = SK.block_structure.convert_gf(
G_noisy_pre_transform, block_structure1, beta=G_noisy.mesh.beta, space_from='sumk'
)
assert len(SK.deg_shells[0]) == 2, "wrong number of equivalent groups found" assert len(SK.deg_shells[0]) == 2, 'wrong number of equivalent groups found'
assert sorted([len(d) for d in SK.deg_shells[0]]) == [2,3], "wrong number of members in the equivalent groups found" assert sorted([len(d) for d in SK.deg_shells[0]]) == [2, 3], 'wrong number of members in the equivalent groups found'
for d in SK.deg_shells[0]: for d in SK.deg_shells[0]:
if len(d)==2: if len(d) == 2:
assert 'ud_3' in d, "shell ud_3 missing" assert 'ud_3' in d, 'shell ud_3 missing'
assert 'ud_4' in d, "shell ud_4 missing" assert 'ud_4' in d, 'shell ud_4 missing'
if len(d)==3: if len(d) == 3:
assert 'ud_0' in d, "shell ud_0 missing" assert 'ud_0' in d, 'shell ud_0 missing'
assert 'ud_1' in d, "shell ud_1 missing" assert 'ud_1' in d, 'shell ud_1 missing'
assert 'ud_2' in d, "shell ud_2 missing" assert 'ud_2' in d, 'shell ud_2 missing'
# the convention is that for every degenerate shell, the transformation # the convention is that for every degenerate shell, the transformation
# matrix v and the conjugate bool is saved # matrix v and the conjugate bool is saved
@ -192,21 +195,21 @@ for conjugate in conjugate_values:
normalized_gf << normalized_gf.transpose() normalized_gf << normalized_gf.transpose()
normalized_gfs.append(normalized_gf) normalized_gfs.append(normalized_gf)
for i in range(len(normalized_gfs)): for i in range(len(normalized_gfs)):
for j in range(i+1,len(normalized_gfs)): for j in range(i + 1, len(normalized_gfs)):
# here, we use a threshold that is 1 order of magnitude less strict # here, we use a threshold that is 1 order of magnitude less strict
# because of numerics # because of numerics
assert_gfs_are_close(normalized_gfs[i], normalized_gfs[j], 1.e-6) assert_gfs_are_close(normalized_gfs[i], normalized_gfs[j], 1.0e-6)
# now we check symm_deg_gf # now we check symm_deg_gf
# symmetrizing the GF has is has to leave it unchanged # symmetrizing the GF has is has to leave it unchanged
G_new_symm = G_new[0].copy() G_new_symm = G_new[0].copy()
SK.symm_deg_gf(G_new_symm, 0) SK.symm_deg_gf(G_new_symm, 0)
assert_block_gfs_are_close(G_new[0], G_new_symm, 1.e-6) assert_block_gfs_are_close(G_new[0], G_new_symm, 1.0e-6)
# symmetrizing the noisy GF, which was carefully constructed, # symmetrizing the noisy GF, which was carefully constructed,
# has to give the same result as G_new[0] # has to give the same result as G_new[0]
SK.symm_deg_gf(G_noisy, 0) SK.symm_deg_gf(G_noisy, 0)
assert_block_gfs_are_close(G_new[0], G_noisy, 1.e-6) assert_block_gfs_are_close(G_new[0], G_noisy, 1.0e-6)
# check backward compatibility of symm_deg_gf # check backward compatibility of symm_deg_gf
# first, construct the old format of the deg shells # first, construct the old format of the deg shells
@ -217,9 +220,9 @@ for conjugate in conjugate_values:
# symmetrizing the GF as is has to leave it unchanged # symmetrizing the GF as is has to leave it unchanged
G_new_symm << G_pre_transform G_new_symm << G_pre_transform
SK.symm_deg_gf(G_new_symm, 0) SK.symm_deg_gf(G_new_symm, 0)
assert_block_gfs_are_close(G_new_symm, G_pre_transform, 1.e-6) assert_block_gfs_are_close(G_new_symm, G_pre_transform, 1.0e-6)
# symmetrizing the noisy GF pre transform, which was carefully constructed, # symmetrizing the noisy GF pre transform, which was carefully constructed,
# has to give the same result as G_pre_transform # has to give the same result as G_pre_transform
SK.symm_deg_gf(G_noisy_pre_transform, 0) SK.symm_deg_gf(G_noisy_pre_transform, 0)
assert_block_gfs_are_close(G_noisy_pre_transform, G_pre_transform, 1.e-6) assert_block_gfs_are_close(G_noisy_pre_transform, G_pre_transform, 1.0e-6)

View File

@ -48,7 +48,7 @@ G['ud'] << inverse(inverse(G['ud']) - Hloc)
SK = SumkDFT(hdf_file = 'SrIrO3_rot.h5', use_dft_blocks=False) SK = SumkDFT(hdf_file = 'SrIrO3_rot.h5', use_dft_blocks=False)
G_new = SK.analyse_block_structure_from_gf([G]) G_new = SK.analyse_block_structure_from_gf([G]*2)
G_new_symm = G_new[0].copy() G_new_symm = G_new[0].copy()
SK.symm_deg_gf(G_new_symm, 0) SK.symm_deg_gf(G_new_symm, 0)
assert_block_gfs_are_close(G_new[0], G_new_symm) assert_block_gfs_are_close(G_new[0], G_new_symm)
@ -93,7 +93,7 @@ known_moments[1,:] = np.eye(10)
tail, err = fit_tail(G['ud'], known_moments) tail, err = fit_tail(G['ud'], known_moments)
Gt['ud'].set_from_fourier(G['ud'], tail) Gt['ud'].set_from_fourier(G['ud'], tail)
G_new = SK.analyse_block_structure_from_gf([Gt]) G_new = SK.analyse_block_structure_from_gf([Gt] * 2)
G_new_symm = G_new[0].copy() G_new_symm = G_new[0].copy()
SK.symm_deg_gf(G_new_symm, 0) SK.symm_deg_gf(G_new_symm, 0)
assert_block_gfs_are_close(G_new[0], G_new_symm) assert_block_gfs_are_close(G_new[0], G_new_symm)

View File

@ -55,7 +55,7 @@ def test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename):
dc_no = method_dict[method]["numbering_convention"] dc_no = method_dict[method]["numbering_convention"]
dc_string = method_dict[method]["new_convention"] dc_string = method_dict[method]["new_convention"]
mpi.report("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") mpi.report("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
mpi.report(f"\n Testing interface {method} \n") mpi.report(f"\n Testing interface {method} \n")
mpi.report("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") mpi.report("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
@ -71,8 +71,8 @@ def test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename):
mpi.report("Up DC matrix:") mpi.report("Up DC matrix:")
mpi.report(SK_new.dc_imp[0]['up']) mpi.report(SK_new.dc_imp[0]['up'])
mpi.report(f"Double counting energy = {SK_new.dc_energ} ") mpi.report(f"Double counting energy = {SK_new.dc_energ} ")
# Load previously computed DC values from h5 archive # Load previously computed DC values from h5 archive
R = HDFArchive(f'./{filename}', 'r') R = HDFArchive(f'./{filename}', 'r')
dc_comp = R[f'DC_{method}_benchmark']['dc_imp'] dc_comp = R[f'DC_{method}_benchmark']['dc_imp']
en_comp = R[f'DC_{method}_benchmark']['dc_energ'] en_comp = R[f'DC_{method}_benchmark']['dc_energ']
@ -84,7 +84,7 @@ def test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename):
assert np.allclose(SK_new.dc_imp[0]['up'], dc_comp, atol=1e-12), f"Assertion failed comparing Vdc to reference, method: {method} " assert np.allclose(SK_new.dc_imp[0]['up'], dc_comp, atol=1e-12), f"Assertion failed comparing Vdc to reference, method: {method} "
assert np.allclose(SK_new.dc_energ, en_comp, atol=1e-12), f"Assertion failed comparing energy to reference, method: {method} " assert np.allclose(SK_new.dc_energ, en_comp, atol=1e-12), f"Assertion failed comparing energy to reference, method: {method} "
mpi.report("Comparison with stored DC values successfull!\n") mpi.report("Comparison with stored DC values successfull!\n")
@ -104,20 +104,20 @@ SK_new.set_mu(13.9)
icrsh = 0 icrsh = 0
dens = SK_compat.density_matrix() dens = SK_compat.density_matrix(transform_to_solver_blocks=True)
with np.printoptions(precision=5): with np.printoptions(precision=5):
for key in dens[0].keys(): for key in dens[0].keys():
mpi.report(f"{key} channel") mpi.report(f"{key} channel")
mpi.report(dens[0][key].real) mpi.report(dens[0][key].real)
N_up = np.trace(dens[0]['up'].real) N_up = np.trace(dens[0]['up_0'].real)
N_down = np.trace(dens[0]['down'].real) N_down = np.trace(dens[0]['down_0'].real)
N_tot = N_up + N_down N_tot = N_up + N_down
mpi.report(f"{N_up=} ,{N_down=}, {N_tot=}\n") mpi.report(f"{N_up=} ,{N_down=}, {N_tot=}\n")
for method in ["FLL", "AMF", "Held"]: for method in ["FLL", "AMF", "Held"]:
test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename = f"{dft_filename}.h5") test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename = f"{dft_filename}.h5")
#in case implementation changes, to write new testing data into archive #in case implementation changes, to write new testing data into archive
@ -141,16 +141,15 @@ SK_compat = SumkDFT(hdf_file=dft_filename+'.h5',use_dft_blocks=use_blocks)
SK_new = SumkDFT(hdf_file=dft_filename+'.h5',use_dft_blocks=use_blocks) SK_new = SumkDFT(hdf_file=dft_filename+'.h5',use_dft_blocks=use_blocks)
icrsh = 0 icrsh = 0
dens = SK_compat.density_matrix() dens = SK_compat.density_matrix(transform_to_solver_blocks=True)
with np.printoptions(precision=5): with np.printoptions(precision=5):
for key in dens[0].keys(): for key in dens[0].keys():
mpi.report(f"{key} channel") mpi.report(f"{key} channel")
mpi.report(dens[0][key].real) mpi.report(dens[0][key].real)
N_up = np.trace(dens[0]['up'].real) N_up = np.trace(dens[0]['up_0'].real)
N_down = np.trace(dens[0]['down'].real) N_down = np.trace(dens[0]['down_0'].real)
N_tot = N_up + N_down N_tot = N_up + N_down
mpi.report(f"{N_up=} ,{N_down=}, {N_tot=}\n") mpi.report(f"{N_up=} ,{N_down=}, {N_tot=}\n")
@ -158,9 +157,9 @@ mpi.report(f"{N_up=} ,{N_down=}, {N_tot=}\n")
Uval = 5 Uval = 5
Jval = 0.3 Jval = 0.3
for method in ["FLL", "AMF", "Held"]: for method in ["FLL", "AMF", "Held"]:
test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename = f"{dft_filename}.h5" ) test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename = f"{dft_filename}.h5" )
#in case implementation changes, to write new testing data into archive #in case implementation changes, to write new testing data into archive
#R = HDFArchive(f'./{dft_filename}.h5', 'a') #R = HDFArchive(f'./{dft_filename}.h5', 'a')
#R.create_group(f'DC_{method}_benchmark') #R.create_group(f'DC_{method}_benchmark')

View File

@ -20,16 +20,20 @@
# #
################################################################################ ################################################################################
from h5 import * from h5 import HDFArchive
from triqs_dft_tools.sumk_dft_tools import SumkDFTTools from triqs_dft_tools.sumk_dft_tools import SumkDFTTools
import triqs.utility.mpi as mpi import triqs.utility.mpi as mpi
from triqs.utility.comparison_tests import * from triqs.utility.comparison_tests import *
from triqs.utility.h5diff import h5diff from triqs.utility.h5diff import h5diff
import numpy as np
SK = SumkDFTTools(hdf_file = 'SrVO3.ref.h5') SK = SumkDFTTools(hdf_file = 'SrVO3.ref.h5')
dm = SK.density_matrix(method = 'using_gf') dm = SK.density_matrix(method = 'using_gf', transform_to_solver_blocks=False, with_Sigma=False)
dm_pc = SK.partial_charges(with_Sigma=False, with_dc=False) dm_pc = SK.partial_charges(with_Sigma=False, with_dc=False)
dm_pi = SK.density_matrix_using_point_integration()
for key, value in dm[0].items():
assert (np.allclose(value, dm_pi[0][key], atol=1e-6, rtol=1e-6))
with HDFArchive('sumkdft_basic.out.h5','w') as ar: with HDFArchive('sumkdft_basic.out.h5','w') as ar:
ar['dm'] = dm ar['dm'] = dm