diff --git a/python/triqs_dft_tools/sumk_dft.py b/python/triqs_dft_tools/sumk_dft.py index 79933ba1..1e3ac27a 100644 --- a/python/triqs_dft_tools/sumk_dft.py +++ b/python/triqs_dft_tools/sumk_dft.py @@ -168,8 +168,8 @@ class SumkDFT(object): # blocks possible self.gf_struct_sumk = [[(sp, self.corr_shells[icrsh]['dim']) for sp in self.spin_block_names[self.corr_shells[icrsh]['SO']]] for icrsh in range(self.n_corr_shells)] - # First set a standard gf_struct solver: - self.gf_struct_solver = [dict([(sp, self.corr_shells[self.inequiv_to_corr[ish]]['dim']) + # First set a standard gf_struct solver (add _0 here for consistency with analyse_block_structure): + self.gf_struct_solver = [dict([(sp+'_0', self.corr_shells[self.inequiv_to_corr[ish]]['dim']) for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]]) for ish in range(self.n_inequiv_shells)] # Set standard (identity) maps from gf_struct_sumk <-> @@ -180,12 +180,12 @@ class SumkDFT(object): for ish in range(self.n_inequiv_shells)] for ish in range(self.n_inequiv_shells): for block, inner_dim in self.gf_struct_sumk[self.inequiv_to_corr[ish]]: - self.solver_to_sumk_block[ish][block] = block + self.solver_to_sumk_block[ish][block+'_0'] = block for inner in range(inner_dim): self.sumk_to_solver[ish][ - (block, inner)] = (block, inner) + (block, inner)] = (block+'_0', inner) self.solver_to_sumk[ish][ - (block, inner)] = (block, inner) + (block+'_0', inner)] = (block, inner) # assume no shells are degenerate self.deg_shells = [[] for ish in range(self.n_inequiv_shells)] @@ -862,8 +862,8 @@ class SumkDFT(object): If the difference between density matrix / hloc elements is below threshold, they are considered to be equal. include_shells : list of integers, optional - List of correlated shells to be analysed. - If include_shells is not provided all correlated shells will be analysed. + List of inequivalent shells to be analysed. + If include_shells is not provided all inequivalent shells will be analysed. dm : list of dict, optional List of density matrices from which block stuctures are to be analysed. Each density matrix is a dict {block names: 2d numpy arrays} for each correlated shell. @@ -874,14 +874,11 @@ class SumkDFT(object): If not provided, it will be calculated using eff_atomic_levels. """ - self.gf_struct_solver = [{} for ish in range(self.n_inequiv_shells)] - self.sumk_to_solver = [{} for ish in range(self.n_inequiv_shells)] - self.solver_to_sumk = [{} for ish in range(self.n_inequiv_shells)] - self.solver_to_sumk_block = [{} - for ish in range(self.n_inequiv_shells)] - if dm is None: - dm = self.density_matrix(method='using_point_integration') + warn("WARNING: No density matrix given. Calculating density matrix with default parameters. This will be deprecated in future releases.") + dm = self.density_matrix(method='using_gf', transform_to_solver_blocks=False) + + assert len(dm) == self.n_corr_shells, "The number of density matrices must be equal to the number of correlated shells." dens_mat = [dm[self.inequiv_to_corr[ish]] for ish in range(self.n_inequiv_shells)] if hloc is None: @@ -890,8 +887,13 @@ class SumkDFT(object): if include_shells is None: include_shells = list(range(self.n_inequiv_shells)) for ish in include_shells: + self.gf_struct_solver[ish] = {} + self.sumk_to_solver[ish] = {} + self.solver_to_sumk[ish] = {} + self.solver_to_sumk_block[ish] = {} for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]: + assert sp in dens_mat[ish], f"The density matrix does not contain the block {sp}. Is the input dm given in sumk block structure?" n_orb = self.corr_shells[self.inequiv_to_corr[ish]]['dim'] # gives an index list of entries larger that threshold dmbool = (abs(dens_mat[ish][sp]) > threshold) @@ -1049,8 +1051,8 @@ class SumkDFT(object): If the difference between matrix elements is below threshold, they are considered to be equal. include_shells : list of integers, optional - List of correlated shells to be analysed. - If include_shells is not provided all correlated shells will be analysed. + List of inequivalent shells to be analysed. + If include_shells is not provided all inequivalent shells will be analysed. analyse_deg_shells : bool Whether to call the analyse_deg_shells function after having finished the block structure analysis @@ -1062,29 +1064,26 @@ class SumkDFT(object): """ assert isinstance(G, list), "G must be a list (with elements for each correlated shell)" + assert len(G) == self.n_corr_shells, "G must have one element for each correlated shell, run extract_G_loc with transform_to_solver_blocks=False to get the correct G" gf = self._get_hermitian_quantity_from_gf(G) - # initialize the variables - self.gf_struct_solver = [{} for ish in range(self.n_inequiv_shells)] - self.sumk_to_solver = [{} for ish in range(self.n_inequiv_shells)] - self.solver_to_sumk = [{} for ish in range(self.n_inequiv_shells)] - self.solver_to_sumk_block = [{} - for ish in range(self.n_inequiv_shells)] - - # the maximum value of each matrix element of each block and shell - max_gf = [{name:np.max(np.abs(g.data),0) for name, g in gf[ish]} for ish in range(self.n_inequiv_shells)] - if include_shells is None: # include all shells include_shells = list(range(self.n_inequiv_shells)) for ish in include_shells: + self.gf_struct_solver[ish] = {} + self.sumk_to_solver[ish] = {} + self.solver_to_sumk[ish] = {} + self.solver_to_sumk_block[ish] = {} for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]: + assert sp in gf[self.inequiv_to_corr[ish]].indices, f"The Green's function does not contain the block {sp}. Is the input G given in sumk block structure?" n_orb = self.corr_shells[self.inequiv_to_corr[ish]]['dim'] # gives an index list of entries larger that threshold - maxgf_bool = (abs(max_gf[ish][sp]) > threshold) + max_gf = np.max(np.abs(gf[self.inequiv_to_corr[ish]][sp].data),0) + maxgf_bool = (max_gf > threshold) # Determine off-diagonal entries in upper triangular part of the # Green's function @@ -1455,21 +1454,12 @@ class SumkDFT(object): return trans - - def density_matrix(self, method='using_gf'): - """Calculate density matrices in one of two ways. - - Parameters - ---------- - method : string, optional - - - if 'using_gf': First get lattice gf (g_loc is not set up), then density matrix. - It is useful for Hubbard I, and very quick. - No assumption on the hopping structure is made (ie diagonal or not). - - if 'using_point_integration': Only works for diagonal hopping matrix (true in wien2k). - - beta : float, optional - Inverse temperature. + def density_matrix_using_point_integration(self): + """ + Calculate density matrices using point integration: Only works for + diagonal hopping matrix (true in wien2k). Consider using extract_G_loc + together with [G.density() for G in Gloc] instead. Returned density + matrix is always given in SumK block structure. Returns ------- @@ -1483,34 +1473,21 @@ class SumkDFT(object): [self.corr_shells[icrsh]['dim'], self.corr_shells[icrsh]['dim']], complex) ikarray = np.array(list(range(self.n_k))) + ntoi = self.spin_names_to_ind[self.SO] + spn = self.spin_block_names[self.SO] for ik in mpi.slice_array(ikarray): + dims = {sp:self.n_orbitals[ik, ntoi[sp]] for sp in spn} + MMat = [np.zeros([dims[sp], dims[sp]], complex) for sp in spn] - if method == "using_gf": - - G_latt_iw = self.lattice_gf(ik=ik, mu=self.chemical_potential) - G_latt_iw *= self.bz_weights[ik] - dm = G_latt_iw.density() - MMat = [dm[sp] for sp in self.spin_block_names[self.SO]] - - elif method == "using_point_integration": - - ntoi = self.spin_names_to_ind[self.SO] - spn = self.spin_block_names[self.SO] - dims = {sp:self.n_orbitals[ik, ntoi[sp]] for sp in spn} - MMat = [np.zeros([dims[sp], dims[sp]], complex) for sp in spn] - - for isp, sp in enumerate(spn): - ind = ntoi[sp] - for inu in range(self.n_orbitals[ik, ind]): - # only works for diagonal hopping matrix (true in - # wien2k) - if (self.hopping[ik, ind, inu, inu] - self.h_field * (1 - 2 * isp)) < 0.0: - MMat[isp][inu, inu] = 1.0 - else: - MMat[isp][inu, inu] = 0.0 - - else: - raise ValueError("density_matrix: the method '%s' is not supported." % method) + for isp, sp in enumerate(spn): + ind = ntoi[sp] + for inu in range(self.n_orbitals[ik, ind]): + # only works for diagonal hopping matrix (true in + # wien2k) + if (self.hopping[ik, ind, inu, inu] - self.h_field * (1 - 2 * isp)) < 0.0: + MMat[isp][inu, inu] = 1.0 + else: + MMat[isp][inu, inu] = 0.0 for icrsh in range(self.n_corr_shells): for isp, sp in enumerate(self.spin_block_names[self.corr_shells[icrsh]['SO']]): @@ -1519,11 +1496,7 @@ class SumkDFT(object): dim = self.corr_shells[icrsh]['dim'] n_orb = self.n_orbitals[ik, ind] projmat = self.proj_mat[ik, ind, icrsh, 0:dim, 0:n_orb] - if method == "using_gf": - dens_mat[icrsh][sp] += np.dot(np.dot(projmat, MMat[isp]), - projmat.transpose().conjugate()) - elif method == "using_point_integration": - dens_mat[icrsh][sp] += self.bz_weights[ik] * np.dot(np.dot(projmat, MMat[isp]), + dens_mat[icrsh][sp] += self.bz_weights[ik] * np.dot(np.dot(projmat, MMat[isp]), projmat.transpose().conjugate()) # get data from nodes: @@ -1546,6 +1519,55 @@ class SumkDFT(object): return dens_mat + + def density_matrix(self, method='using_gf', mu=None, with_Sigma=True, with_dc=True, broadening=None, + transform_to_solver_blocks=True, show_warnings=True): + """Calculate density matrices in one of two ways. + + Parameters + ---------- + method : string, optional + + - if 'using_gf': First get lattice gf (g_loc is not set up), then density matrix. + It is useful for Hubbard I, and very quick. + No assumption on the hopping structure is made (ie diagonal or not). + - if 'using_point_integration': Only works for diagonal hopping matrix (true in wien2k). + mu : real, optional + Input chemical potential. If not provided the value of self.chemical_potential is used as mu. + with_Sigma : boolean, optional + If True then the local GF is calculated with the self-energy self.Sigma_imp. + with_dc : boolean, optional + If True then the double-counting correction is subtracted from the self-energy in calculating the GF. + broadening : float, optional + Imaginary shift for the axis along which the real-axis GF is calculated. + If not provided, broadening will be set to double of the distance between mesh points in 'mesh'. + Only relevant for real-frequency GF. + transform_to_solver_blocks : bool, optional + If True (default), the returned G_loc will be transformed to the block structure ``gf_struct_solver``, + else it will be in ``gf_struct_sumk``. + show_warnings : bool, optional + Displays warning messages during transformation + (Only effective if transform_to_solver_blocks = True + + Returns + ------- + dens_mat : list of dicts + Density matrix for each spin in each correlated shell. + """ + + if method == "using_gf": + warn("WARNING: density_matrix: method 'using_gf' is deprecated. Use 'extract_G_loc' instead.") + Gloc = self.extract_G_loc(mu, with_Sigma, with_dc, broadening, + transform_to_solver_blocks, show_warnings) + dens_mat = [G.density() for G in Gloc] + elif method == "using_point_integration": + warn("WARNING: density_matrix: method 'using_point_integration' is deprecated. Use 'density_matrix_using_point_integration' instead. All additionally provided arguments are ignored.") + dens_mat = self.density_matrix_using_point_integration() + else: + raise ValueError("density_matrix: the method '%s' is not supported." % method) + + return dens_mat + # For simple dft input, get crystal field splittings. def eff_atomic_levels(self): r""" diff --git a/test/python/analyse_block_structure_from_gf.py b/test/python/analyse_block_structure_from_gf.py index e9775ee6..7551fc04 100644 --- a/test/python/analyse_block_structure_from_gf.py +++ b/test/python/analyse_block_structure_from_gf.py @@ -1,9 +1,9 @@ -from triqs.gf import * +from triqs.gf import MeshImFreq, inverse, Omega from triqs_dft_tools.sumk_dft import SumkDFT from scipy.linalg import expm import numpy as np from triqs.utility.comparison_tests import assert_gfs_are_close, assert_arrays_are_close, assert_block_gfs_are_close -from h5 import * +from h5 import HDFArchive import itertools # The full test checks all different possible combinations of conjugated @@ -19,11 +19,10 @@ full_test = False ####################################################################### mesh = MeshImFreq(40, 'Fermion', 1025) -SK = SumkDFT(hdf_file = 'SrIrO3_rot.h5', mesh=mesh) +SK = SumkDFT(hdf_file='SrIrO3_rot.h5', mesh=mesh) Sigma = SK.block_structure.create_gf(mesh=mesh) SK.put_Sigma([Sigma]) -G = SK.extract_G_loc() - +G = SK.extract_G_loc(transform_to_solver_blocks=False) # the original block structure block_structure1 = SK.block_structure.copy() G_new = SK.analyse_block_structure_from_gf(G) @@ -31,30 +30,29 @@ G_new = SK.analyse_block_structure_from_gf(G) # the new block structure block_structure2 = SK.block_structure.copy() -with HDFArchive('analyse_block_structure_from_gf.out.h5','w') as ar: +with HDFArchive('analyse_block_structure_from_gf.out.h5', 'w') as ar: ar['bs1'] = block_structure1 ar['bs2'] = block_structure2 # check whether the block structure is the same as in the reference -with HDFArchive('analyse_block_structure_from_gf.out.h5','r') as ar,\ - HDFArchive('analyse_block_structure_from_gf.ref.h5','r') as ar2: +with HDFArchive('analyse_block_structure_from_gf.out.h5', 'r') as ar, HDFArchive('analyse_block_structure_from_gf.ref.h5', 'r') as ar2: assert ar['bs1'] == ar2['bs1'], 'bs1 not equal' a1 = ar['bs2'] a2 = ar2['bs2'] - assert a1==block_structure2, "writing/reading block structure incorrect" + assert a1 == block_structure2, 'writing/reading block structure incorrect' # we set the deg_shells to None because the transformation matrices # have a phase freedom and will, therefore, not be equal in general a1.deg_shells = None a2.deg_shells = None - assert a1==a2, 'bs2 not equal' + assert a1 == a2, 'bs2 not equal' # check if deg shells are correct -assert len(SK.deg_shells[0])==1, "wrong number of equivalent groups" +assert len(SK.deg_shells[0]) == 1, 'wrong number of equivalent groups' # check if the Green's functions that are found to be equal in the # routine are indeed equal for d in SK.deg_shells[0]: - assert len(d)==2, "wrong number of shells in equivalent group" + assert len(d) == 2, 'wrong number of shells in equivalent group' # the convention is that for every degenerate shell, the transformation # matrix v and the conjugate bool is saved # then, @@ -70,8 +68,8 @@ for d in SK.deg_shells[0]: normalized_gf << normalized_gf.transpose() normalized_gfs.append(normalized_gf) for i in range(len(normalized_gfs)): - for j in range(i+1,len(normalized_gfs)): - assert_arrays_are_close(normalized_gfs[i].data, normalized_gfs[j].data, 1.e-5) + for j in range(i + 1, len(normalized_gfs)): + assert_arrays_are_close(normalized_gfs[i].data, normalized_gfs[j].data, 1.0e-5) ####################################################################### # Second test # @@ -80,18 +78,21 @@ for d in SK.deg_shells[0]: # model # ####################################################################### + # helper function to get random Hermitian matrix def get_random_hermitian(dim): - herm = np.random.rand(dim,dim)+1.0j*np.random.rand(dim,dim) + herm = np.random.rand(dim, dim) + 1.0j * np.random.rand(dim, dim) herm = herm + herm.conjugate().transpose() return herm + # helper function to get random unitary matrix def get_random_transformation(dim): herm = get_random_hermitian(dim) - T = expm(1.0j*herm) + T = expm(1.0j * herm) return T + # we will conjugate the Green's function blocks according to the entries # of conjugate_values # for each of the 5 blocks that will be constructed, there is an entry @@ -101,34 +102,34 @@ if full_test: conjugate_values = list(itertools.product([False, True], repeat=5)) else: # in the quick test we check a random combination - conjugate_values = [np.random.rand(5)>0.5] + conjugate_values = [np.random.rand(5) > 0.5] for conjugate in conjugate_values: # construct a random block-diagonal Hloc - Hloc = np.zeros((10,10), dtype=complex) + Hloc = np.zeros((10, 10), dtype=complex) # the Hloc of the first three 2x2 blocks is equal Hloc0 = get_random_hermitian(2) - Hloc[:2,:2] = Hloc0 - Hloc[2:4,2:4] = Hloc0 - Hloc[4:6,4:6] = Hloc0 + Hloc[:2, :2] = Hloc0 + Hloc[2:4, 2:4] = Hloc0 + Hloc[4:6, 4:6] = Hloc0 # the Hloc of the last two 2x2 blocks is equal Hloc1 = get_random_hermitian(2) - Hloc[6:8,6:8] = Hloc1 - Hloc[8:,8:] = Hloc1 + Hloc[6:8, 6:8] = Hloc1 + Hloc[8:, 8:] = Hloc1 # construct the hybridization delta # this is equal for all 2x2 blocks - V = get_random_hermitian(2) # the hopping elements from impurity to bath - b1 = np.random.rand() # the bath energy of the first bath level - b2 = np.random.rand() # the bath energy of the second bath level - delta = G[0]['ud'][:2,:2].copy() - delta[0,0] << (V[0,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[0,1].conjugate()*inverse(Omega-b2))/2.0 - delta[0,1] << (V[0,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[1,1].conjugate()*inverse(Omega-b2))/2.0 - delta[1,0] << (V[1,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[0,1].conjugate()*inverse(Omega-b2))/2.0 - delta[1,1] << (V[1,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[1,1].conjugate()*inverse(Omega-b2))/2.0 + V = get_random_hermitian(2) # the hopping elements from impurity to bath + b1 = np.random.rand() # the bath energy of the first bath level + b2 = np.random.rand() # the bath energy of the second bath level + delta = G[0]['ud'][:2, :2].copy() + delta[0, 0] << (V[0, 0] * V[0, 0].conjugate() * inverse(Omega - b1) + V[0, 1] * V[0, 1].conjugate() * inverse(Omega - b2)) / 2.0 + delta[0, 1] << (V[0, 0] * V[1, 0].conjugate() * inverse(Omega - b1) + V[0, 1] * V[1, 1].conjugate() * inverse(Omega - b2)) / 2.0 + delta[1, 0] << (V[1, 0] * V[0, 0].conjugate() * inverse(Omega - b1) + V[1, 1] * V[0, 1].conjugate() * inverse(Omega - b2)) / 2.0 + delta[1, 1] << (V[1, 0] * V[1, 0].conjugate() * inverse(Omega - b1) + V[1, 1] * V[1, 1].conjugate() * inverse(Omega - b2)) / 2.0 # construct G G[0].zero() - for i in range(0,10,2): - G[0]['ud'][i:i+2,i:i+2] << inverse(Omega-delta) + for i in range(0, 10, 2): + G[0]['ud'][i : i + 2, i : i + 2] << inverse(Omega - delta) G[0]['ud'] << inverse(inverse(G[0]['ud']) - Hloc) # for testing symm_deg_gf below, we need this @@ -136,12 +137,12 @@ for conjugate in conjugate_values: # mean of the blocks of G_noisy is equal to G[0] G_noisy = G[0].copy() noise1 = np.random.randn(*delta.target_shape) - G_noisy['ud'][:2,:2].data[:,:,:] += noise1 - G_noisy['ud'][2:4,2:4].data[:,:,:] -= noise1/2.0 - G_noisy['ud'][4:6,4:6].data[:,:,:] -= noise1/2.0 + G_noisy['ud'][:2, :2].data[:, :, :] += noise1 + G_noisy['ud'][2:4, 2:4].data[:, :, :] -= noise1 / 2.0 + G_noisy['ud'][4:6, 4:6].data[:, :, :] -= noise1 / 2.0 noise2 = np.random.randn(*delta.target_shape) - G_noisy['ud'][6:8,6:8].data[:,:,:] += noise2 - G_noisy['ud'][8:,8:].data[:,:,:] -= noise2 + G_noisy['ud'][6:8, 6:8].data[:, :, :] += noise2 + G_noisy['ud'][8:, 8:].data[:, :, :] -= noise2 # for testing backward-compatibility in symm_deg_gf, we need the # un-transformed Green's functions @@ -149,33 +150,35 @@ for conjugate in conjugate_values: G_noisy_pre_transform = G_noisy.copy() # transform each block using a random transformation matrix - for i in range(0,10,2): + for i in range(0, 10, 2): T = get_random_transformation(2) - G[0]['ud'][i:i+2,i:i+2].from_L_G_R(T, G[0]['ud'][i:i+2,i:i+2], T.conjugate().transpose()) - G_noisy['ud'][i:i+2,i:i+2].from_L_G_R(T, G_noisy['ud'][i:i+2,i:i+2], T.conjugate().transpose()) + G[0]['ud'][i : i + 2, i : i + 2].from_L_G_R(T, G[0]['ud'][i : i + 2, i : i + 2], T.conjugate().transpose()) + G_noisy['ud'][i : i + 2, i : i + 2].from_L_G_R(T, G_noisy['ud'][i : i + 2, i : i + 2], T.conjugate().transpose()) # if that block shall be conjugated, go ahead and do it - if conjugate[i//2]: - G[0]['ud'][i:i+2,i:i+2] << G[0]['ud'][i:i+2,i:i+2].transpose() - G_noisy['ud'][i:i+2,i:i+2] << G_noisy['ud'][i:i+2,i:i+2].transpose() + if conjugate[i // 2]: + G[0]['ud'][i : i + 2, i : i + 2] << G[0]['ud'][i : i + 2, i : i + 2].transpose() + G_noisy['ud'][i : i + 2, i : i + 2] << G_noisy['ud'][i : i + 2, i : i + 2].transpose() # analyse the block structure - G_new = SK.analyse_block_structure_from_gf(G, 1.e-7) + G_new = SK.analyse_block_structure_from_gf(G, 1.0e-7) # transform G_noisy etc. to the new block structure - G_noisy = SK.block_structure.convert_gf(G_noisy, block_structure1, beta = G_noisy.mesh.beta, space_from='sumk') - G_pre_transform = SK.block_structure.convert_gf(G_pre_transform, block_structure1, beta = G_noisy.mesh.beta, space_from='sumk') - G_noisy_pre_transform = SK.block_structure.convert_gf(G_noisy_pre_transform, block_structure1, beta = G_noisy.mesh.beta, space_from='sumk') + G_noisy = SK.block_structure.convert_gf(G_noisy, block_structure1, beta=G_noisy.mesh.beta, space_from='sumk') + G_pre_transform = SK.block_structure.convert_gf(G_pre_transform, block_structure1, beta=G_noisy.mesh.beta, space_from='sumk') + G_noisy_pre_transform = SK.block_structure.convert_gf( + G_noisy_pre_transform, block_structure1, beta=G_noisy.mesh.beta, space_from='sumk' + ) - assert len(SK.deg_shells[0]) == 2, "wrong number of equivalent groups found" - assert sorted([len(d) for d in SK.deg_shells[0]]) == [2,3], "wrong number of members in the equivalent groups found" + assert len(SK.deg_shells[0]) == 2, 'wrong number of equivalent groups found' + assert sorted([len(d) for d in SK.deg_shells[0]]) == [2, 3], 'wrong number of members in the equivalent groups found' for d in SK.deg_shells[0]: - if len(d)==2: - assert 'ud_3' in d, "shell ud_3 missing" - assert 'ud_4' in d, "shell ud_4 missing" - if len(d)==3: - assert 'ud_0' in d, "shell ud_0 missing" - assert 'ud_1' in d, "shell ud_1 missing" - assert 'ud_2' in d, "shell ud_2 missing" + if len(d) == 2: + assert 'ud_3' in d, 'shell ud_3 missing' + assert 'ud_4' in d, 'shell ud_4 missing' + if len(d) == 3: + assert 'ud_0' in d, 'shell ud_0 missing' + assert 'ud_1' in d, 'shell ud_1 missing' + assert 'ud_2' in d, 'shell ud_2 missing' # the convention is that for every degenerate shell, the transformation # matrix v and the conjugate bool is saved @@ -192,21 +195,21 @@ for conjugate in conjugate_values: normalized_gf << normalized_gf.transpose() normalized_gfs.append(normalized_gf) for i in range(len(normalized_gfs)): - for j in range(i+1,len(normalized_gfs)): + for j in range(i + 1, len(normalized_gfs)): # here, we use a threshold that is 1 order of magnitude less strict # because of numerics - assert_gfs_are_close(normalized_gfs[i], normalized_gfs[j], 1.e-6) + assert_gfs_are_close(normalized_gfs[i], normalized_gfs[j], 1.0e-6) # now we check symm_deg_gf # symmetrizing the GF has is has to leave it unchanged G_new_symm = G_new[0].copy() SK.symm_deg_gf(G_new_symm, 0) - assert_block_gfs_are_close(G_new[0], G_new_symm, 1.e-6) + assert_block_gfs_are_close(G_new[0], G_new_symm, 1.0e-6) # symmetrizing the noisy GF, which was carefully constructed, # has to give the same result as G_new[0] SK.symm_deg_gf(G_noisy, 0) - assert_block_gfs_are_close(G_new[0], G_noisy, 1.e-6) + assert_block_gfs_are_close(G_new[0], G_noisy, 1.0e-6) # check backward compatibility of symm_deg_gf # first, construct the old format of the deg shells @@ -217,9 +220,9 @@ for conjugate in conjugate_values: # symmetrizing the GF as is has to leave it unchanged G_new_symm << G_pre_transform SK.symm_deg_gf(G_new_symm, 0) - assert_block_gfs_are_close(G_new_symm, G_pre_transform, 1.e-6) + assert_block_gfs_are_close(G_new_symm, G_pre_transform, 1.0e-6) # symmetrizing the noisy GF pre transform, which was carefully constructed, # has to give the same result as G_pre_transform SK.symm_deg_gf(G_noisy_pre_transform, 0) - assert_block_gfs_are_close(G_noisy_pre_transform, G_pre_transform, 1.e-6) + assert_block_gfs_are_close(G_noisy_pre_transform, G_pre_transform, 1.0e-6) diff --git a/test/python/analyse_block_structure_from_gf.ref.h5 b/test/python/analyse_block_structure_from_gf.ref.h5 index e0468605..d42fd419 100644 Binary files a/test/python/analyse_block_structure_from_gf.ref.h5 and b/test/python/analyse_block_structure_from_gf.ref.h5 differ diff --git a/test/python/analyse_block_structure_from_gf2.py b/test/python/analyse_block_structure_from_gf2.py index 074a9bf3..13569951 100644 --- a/test/python/analyse_block_structure_from_gf2.py +++ b/test/python/analyse_block_structure_from_gf2.py @@ -48,7 +48,7 @@ G['ud'] << inverse(inverse(G['ud']) - Hloc) SK = SumkDFT(hdf_file = 'SrIrO3_rot.h5', use_dft_blocks=False) -G_new = SK.analyse_block_structure_from_gf([G]) +G_new = SK.analyse_block_structure_from_gf([G]*2) G_new_symm = G_new[0].copy() SK.symm_deg_gf(G_new_symm, 0) assert_block_gfs_are_close(G_new[0], G_new_symm) @@ -93,7 +93,7 @@ known_moments[1,:] = np.eye(10) tail, err = fit_tail(G['ud'], known_moments) Gt['ud'].set_from_fourier(G['ud'], tail) -G_new = SK.analyse_block_structure_from_gf([Gt]) +G_new = SK.analyse_block_structure_from_gf([Gt] * 2) G_new_symm = G_new[0].copy() SK.symm_deg_gf(G_new_symm, 0) assert_block_gfs_are_close(G_new[0], G_new_symm) diff --git a/test/python/dc_test.py b/test/python/dc_test.py index adf527e7..9d7839fa 100755 --- a/test/python/dc_test.py +++ b/test/python/dc_test.py @@ -55,7 +55,7 @@ def test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename): dc_no = method_dict[method]["numbering_convention"] dc_string = method_dict[method]["new_convention"] - + mpi.report("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") mpi.report(f"\n Testing interface {method} \n") mpi.report("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") @@ -71,8 +71,8 @@ def test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename): mpi.report("Up DC matrix:") mpi.report(SK_new.dc_imp[0]['up']) mpi.report(f"Double counting energy = {SK_new.dc_energ} ") - - # Load previously computed DC values from h5 archive + + # Load previously computed DC values from h5 archive R = HDFArchive(f'./{filename}', 'r') dc_comp = R[f'DC_{method}_benchmark']['dc_imp'] en_comp = R[f'DC_{method}_benchmark']['dc_energ'] @@ -84,7 +84,7 @@ def test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename): assert np.allclose(SK_new.dc_imp[0]['up'], dc_comp, atol=1e-12), f"Assertion failed comparing Vdc to reference, method: {method} " assert np.allclose(SK_new.dc_energ, en_comp, atol=1e-12), f"Assertion failed comparing energy to reference, method: {method} " mpi.report("Comparison with stored DC values successfull!\n") - + @@ -104,20 +104,20 @@ SK_new.set_mu(13.9) icrsh = 0 -dens = SK_compat.density_matrix() +dens = SK_compat.density_matrix(transform_to_solver_blocks=True) with np.printoptions(precision=5): for key in dens[0].keys(): mpi.report(f"{key} channel") mpi.report(dens[0][key].real) -N_up = np.trace(dens[0]['up'].real) -N_down = np.trace(dens[0]['down'].real) +N_up = np.trace(dens[0]['up_0'].real) +N_down = np.trace(dens[0]['down_0'].real) N_tot = N_up + N_down mpi.report(f"{N_up=} ,{N_down=}, {N_tot=}\n") -for method in ["FLL", "AMF", "Held"]: +for method in ["FLL", "AMF", "Held"]: test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename = f"{dft_filename}.h5") #in case implementation changes, to write new testing data into archive @@ -141,16 +141,15 @@ SK_compat = SumkDFT(hdf_file=dft_filename+'.h5',use_dft_blocks=use_blocks) SK_new = SumkDFT(hdf_file=dft_filename+'.h5',use_dft_blocks=use_blocks) icrsh = 0 -dens = SK_compat.density_matrix() - +dens = SK_compat.density_matrix(transform_to_solver_blocks=True) with np.printoptions(precision=5): for key in dens[0].keys(): mpi.report(f"{key} channel") mpi.report(dens[0][key].real) -N_up = np.trace(dens[0]['up'].real) -N_down = np.trace(dens[0]['down'].real) +N_up = np.trace(dens[0]['up_0'].real) +N_down = np.trace(dens[0]['down_0'].real) N_tot = N_up + N_down mpi.report(f"{N_up=} ,{N_down=}, {N_tot=}\n") @@ -158,9 +157,9 @@ mpi.report(f"{N_up=} ,{N_down=}, {N_tot=}\n") Uval = 5 Jval = 0.3 -for method in ["FLL", "AMF", "Held"]: +for method in ["FLL", "AMF", "Held"]: test_dc(SK_compat, SK_new, method, method_dict, dens, Uval, Jval, filename = f"{dft_filename}.h5" ) - + #in case implementation changes, to write new testing data into archive #R = HDFArchive(f'./{dft_filename}.h5', 'a') #R.create_group(f'DC_{method}_benchmark') diff --git a/test/python/sumkdft_basic.py b/test/python/sumkdft_basic.py index 5f6d8f39..b5345e30 100644 --- a/test/python/sumkdft_basic.py +++ b/test/python/sumkdft_basic.py @@ -20,16 +20,20 @@ # ################################################################################ -from h5 import * +from h5 import HDFArchive from triqs_dft_tools.sumk_dft_tools import SumkDFTTools import triqs.utility.mpi as mpi from triqs.utility.comparison_tests import * from triqs.utility.h5diff import h5diff - +import numpy as np SK = SumkDFTTools(hdf_file = 'SrVO3.ref.h5') -dm = SK.density_matrix(method = 'using_gf') +dm = SK.density_matrix(method = 'using_gf', transform_to_solver_blocks=False, with_Sigma=False) dm_pc = SK.partial_charges(with_Sigma=False, with_dc=False) +dm_pi = SK.density_matrix_using_point_integration() + +for key, value in dm[0].items(): + assert (np.allclose(value, dm_pi[0][key], atol=1e-6, rtol=1e-6)) with HDFArchive('sumkdft_basic.out.h5','w') as ar: ar['dm'] = dm