mirror of
https://github.com/triqs/dft_tools
synced 2024-10-31 11:13:46 +01:00
Reading vasptriqs.h5 file; reading GAMMA file for non-collinear full csc
This commit is contained in:
parent
f57080a6af
commit
75fbb9cbdb
@ -1,4 +1,3 @@
|
||||
|
||||
################################################################################
|
||||
#
|
||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||
@ -44,6 +43,7 @@ from h5 import HDFArchive
|
||||
|
||||
log = logging.getLogger('plovasp.vaspio')
|
||||
|
||||
|
||||
def read_lines(filename):
|
||||
r"""
|
||||
Generator of lines for a file
|
||||
@ -57,6 +57,7 @@ def read_lines(filename):
|
||||
for line in f:
|
||||
yield line
|
||||
|
||||
|
||||
################################################################################
|
||||
################################################################################
|
||||
#
|
||||
@ -68,45 +69,59 @@ class VaspData:
|
||||
"""
|
||||
Container class for all VASP data.
|
||||
"""
|
||||
|
||||
def __init__(self, vasp_dir, read_all=True, efermi_required=True):
|
||||
self.vasp_dir = vasp_dir
|
||||
|
||||
self.plocar = Plocar()
|
||||
self.poscar = Poscar()
|
||||
self.kpoints = Kpoints()
|
||||
self.eigenval = Eigenval()
|
||||
self.doscar = Doscar()
|
||||
# NEW vasptriqs.h5
|
||||
vasptriqs = os.path.isfile(os.path.join(vasp_dir, 'vasptriqs.h5'))
|
||||
if vasptriqs:
|
||||
log.warning("Reading from vasptriqs.h5")
|
||||
h5path = os.path.join(vasp_dir, 'vasptriqs.h5')
|
||||
self.plocar = h5Plocar(h5path)
|
||||
self.poscar = h5Poscar(h5path)
|
||||
self.kpoints = h5Kpoints(h5path)
|
||||
self.eigenval = h5Eigenval(h5path)
|
||||
self.doscar = h5Doscar(h5path)
|
||||
else:
|
||||
self.plocar = Plocar()
|
||||
self.poscar = Poscar()
|
||||
self.kpoints = Kpoints()
|
||||
self.eigenval = Eigenval()
|
||||
self.doscar = Doscar()
|
||||
|
||||
if read_all:
|
||||
self.plocar.from_file(vasp_dir)
|
||||
self.poscar.from_file(vasp_dir)
|
||||
self.kpoints.from_file(vasp_dir)
|
||||
try:
|
||||
self.eigenval.from_file(vasp_dir)
|
||||
except (IOError, StopIteration):
|
||||
self.eigenval.eigs = None
|
||||
self.eigenval.ferw = None
|
||||
log.warning("Error reading from EIGENVAL, trying LOCPROJ...")
|
||||
|
||||
if efermi_required:
|
||||
if read_all:
|
||||
self.plocar.from_file(vasp_dir)
|
||||
self.poscar.from_file(vasp_dir)
|
||||
self.kpoints.from_file(vasp_dir)
|
||||
try:
|
||||
with HDFArchive(vasp_dir + "vasptriqs.h5", 'r') as ar:
|
||||
self.doscar.efermi = ar['triqs/efermi']
|
||||
print(f'Fermi energy read from vasptriqs.h5: {self.doscar.efermi:.4f} eV')
|
||||
except NameError:
|
||||
raise Exception("Efermi cannot be read from vasptriqs.h5 file")
|
||||
else:
|
||||
# TODO: This a hack. Find out a way to determine ncdij without DOSCAR
|
||||
log.warning("Error reading Efermi from DOSCAR, taking from config")
|
||||
self.doscar.ncdij = self.plocar.nspin
|
||||
self.eigenval.from_file(vasp_dir)
|
||||
except (IOError, StopIteration):
|
||||
self.eigenval.eigs = None
|
||||
self.eigenval.ferw = None
|
||||
log.warning("Error reading from EIGENVAL, trying LOCPROJ...")
|
||||
|
||||
################################################################################
|
||||
################################################################################
|
||||
try:
|
||||
self.doscar.from_file(vasp_dir)
|
||||
except (IOError, StopIteration):
|
||||
if efermi_required:
|
||||
log.warning("Error reading Efermi from DOSCAR, trying LOCPROJ...")
|
||||
try:
|
||||
self.plocar.efermi
|
||||
self.doscar.efermi = self.plocar.efermi
|
||||
except NameError:
|
||||
raise Exception("Efermi cannot be read from DOSCAR or LOCPROJ")
|
||||
else:
|
||||
# TODO: This a hack. Find out a way to determine ncdij without DOSCAR
|
||||
log.warning("Error reading Efermi from DOSCAR, taking from config")
|
||||
self.doscar.ncdij = self.plocar.nspin
|
||||
|
||||
|
||||
##########################
|
||||
#
|
||||
# class Plocar
|
||||
#
|
||||
################################################################################
|
||||
################################################################################
|
||||
##########################
|
||||
class Plocar:
|
||||
"""
|
||||
Class containing raw PLO data from VASP.
|
||||
@ -117,6 +132,7 @@ class Plocar:
|
||||
- *ferw* (array(nion, ns, nk, nb)) : Fermi weights from VASP
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.plo = None
|
||||
self.proj_params = None
|
||||
@ -133,15 +149,14 @@ class Plocar:
|
||||
plocar_filename (str) : filename [default = 'PLOCAR']
|
||||
|
||||
"""
|
||||
# Add a slash to the path name if necessary
|
||||
# Add a slash to the path name if necessary
|
||||
if vasp_dir[-1] != '/':
|
||||
vasp_dir += '/'
|
||||
|
||||
# self.params, self.plo, self.ferw = c_plocar_io.read_plocar(vasp_dir + plocar_filename)
|
||||
# self.proj_params, self.plo = self.temp_parser(projcar_filename=vasp_dir + "PROJCAR", locproj_filename=vasp_dir + "LOCPROJ")
|
||||
# self.params, self.plo, self.ferw = c_plocar_io.read_plocar(vasp_dir + plocar_filename)
|
||||
# self.proj_params, self.plo = self.temp_parser(projcar_filename=vasp_dir + "PROJCAR", locproj_filename=vasp_dir + "LOCPROJ")
|
||||
self.proj_params, self.plo = self.locproj_parser(locproj_filename=vasp_dir + "LOCPROJ")
|
||||
|
||||
|
||||
def locproj_parser(self, locproj_filename='LOCPROJ'):
|
||||
r"""
|
||||
Parses LOCPROJ (for VASP >= 5.4.2) to get VASP projectors.
|
||||
@ -157,10 +172,10 @@ class Plocar:
|
||||
|
||||
def lm_to_l_m(lm):
|
||||
l = int(np.sqrt(lm))
|
||||
m = lm - l*l
|
||||
m = lm - l * l
|
||||
return l, m
|
||||
|
||||
# Read the first line of LOCPROJ to get the dimensions
|
||||
# Read the first line of LOCPROJ to get the dimensions
|
||||
with open(locproj_filename, 'rt') as f:
|
||||
line = f.readline()
|
||||
line = line.split("#")[0]
|
||||
@ -173,6 +188,11 @@ class Plocar:
|
||||
|
||||
self.nspin_band = 2 if self.ncdij == 2 else 1
|
||||
|
||||
try:
|
||||
self.efermi = float(sline[4])
|
||||
except:
|
||||
log.warning("Error reading Efermi from LOCPROJ, trying DOSCAR...")
|
||||
|
||||
plo = np.zeros((nproj, self.nspin, nk, self.nband), dtype=complex)
|
||||
proj_params = [{} for i in range(nproj)]
|
||||
|
||||
@ -188,7 +208,7 @@ class Plocar:
|
||||
|
||||
log.debug("NC FLAG : {}".format(self.nc_flag))
|
||||
|
||||
# First read the header block with orbital labels
|
||||
# First read the header block with orbital labels
|
||||
line = self.search_for(f, "^ *ISITE")
|
||||
ip = 0
|
||||
while line:
|
||||
@ -197,20 +217,20 @@ class Plocar:
|
||||
label = sline[-1].strip()
|
||||
lm = orb_labels.index(label)
|
||||
l, m = lm_to_l_m(lm)
|
||||
# ip_new = iproj_site * norb + il
|
||||
# ip_prev = (iproj_site - 1) * norb + il
|
||||
# ip_new = iproj_site * norb + il
|
||||
# ip_prev = (iproj_site - 1) * norb + il
|
||||
proj_params[ip]['label'] = label
|
||||
proj_params[ip]['isite'] = isite
|
||||
proj_params[ip]['l'] = l
|
||||
if self.nc_flag == True:
|
||||
if (ip % 2) == 0:
|
||||
proj_params[ip]['m'] = 2*m
|
||||
proj_params[ip]['m'] = 2 * m
|
||||
else:
|
||||
proj_params[ip]['m'] = 2*m + 1
|
||||
proj_params[ip]['m'] = 2 * m + 1
|
||||
else:
|
||||
proj_params[ip]['m'] = m
|
||||
|
||||
ip +=1
|
||||
ip += 1
|
||||
|
||||
line = f.readline().strip()
|
||||
|
||||
@ -244,7 +264,6 @@ class Plocar:
|
||||
|
||||
return proj_params, plo
|
||||
|
||||
|
||||
def search_for(self, f, patt):
|
||||
r"""
|
||||
Reads file 'f' until pattern 'patt' is encountered and returns
|
||||
@ -258,13 +277,11 @@ class Plocar:
|
||||
return line
|
||||
|
||||
|
||||
################################################################################
|
||||
################################################################################
|
||||
##########################
|
||||
#
|
||||
# class Poscar
|
||||
#
|
||||
################################################################################
|
||||
################################################################################
|
||||
##########################
|
||||
class Poscar:
|
||||
"""
|
||||
Class containing POSCAR data from VASP.
|
||||
@ -278,6 +295,7 @@ class Poscar:
|
||||
- q_types ([numpy.array((nions, 3), dtype=float)]) : a list of
|
||||
arrays each containing fractional coordinates of ions of a given type
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.q_cart = None
|
||||
|
||||
@ -292,68 +310,69 @@ class Poscar:
|
||||
plocar_filename (str) : filename [default = 'POSCAR']
|
||||
|
||||
"""
|
||||
# Convenince local function
|
||||
|
||||
# Convenince local function
|
||||
def readline_remove_comments():
|
||||
return next(f).split('!')[0].split('#')[0].strip()
|
||||
|
||||
# Add a slash to the path name if necessary
|
||||
# Add a slash to the path name if necessary
|
||||
if vasp_dir[-1] != '/':
|
||||
vasp_dir += '/'
|
||||
|
||||
f = read_lines(vasp_dir + poscar_filename)
|
||||
# Comment line
|
||||
# Comment line
|
||||
comment = next(f).rstrip()
|
||||
print(" Found POSCAR, title line: %s"%(comment))
|
||||
print(" Found POSCAR, title line: %s" % (comment))
|
||||
|
||||
# Read scale
|
||||
# Read scale
|
||||
sline = readline_remove_comments()
|
||||
ascale = float(sline)
|
||||
# Read lattice vectors
|
||||
# Read lattice vectors
|
||||
self.a_brav = np.zeros((3, 3))
|
||||
for ia in range(3):
|
||||
sline = readline_remove_comments()
|
||||
self.a_brav[ia, :] = list(map(float, sline.split()))
|
||||
# Negative scale means that it is a volume scale
|
||||
# Negative scale means that it is a volume scale
|
||||
if ascale < 0:
|
||||
vscale = -ascale
|
||||
vol = np.linalg.det(self.a_brav)
|
||||
ascale = (vscale / vol)**(1.0/3)
|
||||
ascale = (vscale / vol) ** (1.0 / 3)
|
||||
|
||||
self.a_brav *= ascale
|
||||
|
||||
# Depending on the version of VASP there could be
|
||||
# an extra line with element names
|
||||
# Depending on the version of VASP there could be
|
||||
# an extra line with element names
|
||||
sline = readline_remove_comments()
|
||||
try:
|
||||
# Old v4.6 format: no element names
|
||||
# Old v4.6 format: no element names
|
||||
self.nions = list(map(int, sline.split()))
|
||||
self.el_names = ['El%i'%(i) for i in range(len(self.nions))]
|
||||
self.el_names = ['El%i' % (i) for i in range(len(self.nions))]
|
||||
except ValueError:
|
||||
# New v5.x format: read element names first
|
||||
# New v5.x format: read element names first
|
||||
self.el_names = sline.split()
|
||||
sline = readline_remove_comments()
|
||||
self.nions = list(map(int, sline.split()))
|
||||
|
||||
# Set the number of atom sorts (types) and the total
|
||||
# number of atoms in the unit cell
|
||||
# Set the number of atom sorts (types) and the total
|
||||
# number of atoms in the unit cell
|
||||
self.ntypes = len(self.nions)
|
||||
self.nq = sum(self.nions)
|
||||
|
||||
# Check for the line 'Selective dynamics' (and ignore it)
|
||||
# Check for the line 'Selective dynamics' (and ignore it)
|
||||
sline = readline_remove_comments()
|
||||
if sline[0].lower() == 's':
|
||||
sline = readline_remove_comments()
|
||||
|
||||
# Check whether coordinates are cartesian or fractional
|
||||
# Check whether coordinates are cartesian or fractional
|
||||
cartesian = (sline[0].lower() in 'ck')
|
||||
# determine reciprocal basis in units of 2*pi
|
||||
self.kpt_basis = np.linalg.inv(self.a_brav.T)
|
||||
|
||||
# Read atomic positions
|
||||
# Read atomic positions
|
||||
self.q_types = []
|
||||
self.type_of_ion = []
|
||||
for it in range(self.ntypes):
|
||||
# Array mapping ion index to type
|
||||
# Array mapping ion index to type
|
||||
self.type_of_ion += self.nions[it] * [it]
|
||||
|
||||
q_at_it = np.zeros((self.nions[it], 3))
|
||||
@ -370,19 +389,12 @@ class Poscar:
|
||||
print(" Number of types:", self.ntypes)
|
||||
print(" Number of ions for each type:", self.nions)
|
||||
|
||||
# print
|
||||
# print " Coords:"
|
||||
# for it in range(ntypes):
|
||||
# print " Element:", el_names[it]
|
||||
# print q_at[it]
|
||||
|
||||
################################################################################
|
||||
################################################################################
|
||||
##########################
|
||||
#
|
||||
# class Kpoints
|
||||
#
|
||||
################################################################################
|
||||
################################################################################
|
||||
##########################
|
||||
class Kpoints:
|
||||
"""
|
||||
Class describing k-points and optionally tetrahedra.
|
||||
@ -395,13 +407,15 @@ class Kpoints:
|
||||
- volt (float) : volume of a tetrahedron (the k-grid is assumed to
|
||||
be uniform)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.kpts = None
|
||||
self.nktot = None
|
||||
self.kwghts = None
|
||||
#
|
||||
# Reads IBZKPT file
|
||||
#
|
||||
|
||||
#
|
||||
# Reads IBZKPT file
|
||||
#
|
||||
def from_file(self, vasp_dir='./', ibz_filename='IBZKPT'):
|
||||
r"""
|
||||
Reads from IBZKPT: k-points and optionally
|
||||
@ -415,15 +429,15 @@ class Kpoints:
|
||||
|
||||
"""
|
||||
|
||||
# Add a slash to the path name if necessary
|
||||
# Add a slash to the path name if necessary
|
||||
if vasp_dir[-1] != '/':
|
||||
vasp_dir += '/'
|
||||
|
||||
ibz_file = read_lines(vasp_dir + ibz_filename)
|
||||
|
||||
# Skip comment line
|
||||
# Skip comment line
|
||||
line = next(ibz_file)
|
||||
# Number of k-points
|
||||
# Number of k-points
|
||||
line = next(ibz_file)
|
||||
self.nktot = int(line.strip().split()[0])
|
||||
|
||||
@ -433,7 +447,7 @@ class Kpoints:
|
||||
self.kpts = np.zeros((self.nktot, 3))
|
||||
self.kwghts = np.zeros((self.nktot))
|
||||
|
||||
# Skip comment line
|
||||
# Skip comment line
|
||||
line = next(ibz_file)
|
||||
for ik in range(self.nktot):
|
||||
line = next(ibz_file)
|
||||
@ -443,12 +457,12 @@ class Kpoints:
|
||||
|
||||
self.kwghts /= self.nktot
|
||||
|
||||
# Attempt to read tetrahedra
|
||||
# Skip comment line ("Tetrahedra")
|
||||
# Attempt to read tetrahedra
|
||||
# Skip comment line ("Tetrahedra")
|
||||
try:
|
||||
line = next(ibz_file)
|
||||
|
||||
# Number of tetrahedra and volume = 1/(6*nkx*nky*nkz)
|
||||
# Number of tetrahedra and volume = 1/(6*nkx*nky*nkz)
|
||||
line = next(ibz_file)
|
||||
sline = line.split()
|
||||
self.ntet = int(sline[0])
|
||||
@ -456,35 +470,26 @@ class Kpoints:
|
||||
|
||||
print(" {0:>26} {1:d}".format("Total number of tetrahedra:", self.ntet))
|
||||
|
||||
# Traditionally, itet[it, 0] contains multiplicity
|
||||
# Traditionally, itet[it, 0] contains multiplicity
|
||||
self.itet = np.zeros((self.ntet, 5), dtype=int)
|
||||
for it in range(self.ntet):
|
||||
line = next(ibz_file)
|
||||
self.itet[it, :] = list(map(int, line.split()[:5]))
|
||||
line = next(ibz_file)
|
||||
self.itet[it, :] = list(map(int, line.split()[:5]))
|
||||
except StopIteration as ValueError:
|
||||
print(" No tetrahedron data found in %s. Skipping..."%(ibz_filename))
|
||||
print(" No tetrahedron data found in %s. Skipping..." % (ibz_filename))
|
||||
self.ntet = 0
|
||||
|
||||
# data = { 'nktot': nktot,
|
||||
# 'kpts': kpts,
|
||||
# 'ntet': ntet,
|
||||
# 'itet': itet,
|
||||
# 'volt': volt }
|
||||
#
|
||||
# return data
|
||||
|
||||
|
||||
################################################################################
|
||||
################################################################################
|
||||
##########################
|
||||
#
|
||||
# class Eigenval
|
||||
#
|
||||
################################################################################
|
||||
################################################################################
|
||||
##########################
|
||||
class Eigenval:
|
||||
"""
|
||||
Class containing Kohn-Sham-eigenvalues data from VASP (EIGENVAL file).
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.eigs = None
|
||||
self.ferw = None
|
||||
@ -496,43 +501,43 @@ class Eigenval:
|
||||
then used to check the consistency of files read.
|
||||
"""
|
||||
|
||||
# Add a slash to the path name if necessary
|
||||
# Add a slash to the path name if necessary
|
||||
if vasp_dir[-1] != '/':
|
||||
vasp_dir += '/'
|
||||
|
||||
f = read_lines(vasp_dir + eig_filename)
|
||||
|
||||
# First line: only the first and the last number out of four
|
||||
# are used; these are 'nions' and 'ispin'
|
||||
# First line: only the first and the last number out of four
|
||||
# are used; these are 'nions' and 'ispin'
|
||||
sline = next(f).split()
|
||||
self.nq = int(sline[0])
|
||||
self.ispin = int(sline[3])
|
||||
|
||||
# Second line: cell volume and lengths of lattice vectors (skip)
|
||||
# Second line: cell volume and lengths of lattice vectors (skip)
|
||||
sline = next(f)
|
||||
|
||||
# Third line: temperature (skip)
|
||||
# Third line: temperature (skip)
|
||||
sline = next(f)
|
||||
|
||||
# Fourth and fifth line: useless
|
||||
# Fourth and fifth line: useless
|
||||
sline = next(f)
|
||||
sline = next(f)
|
||||
|
||||
# Sixth line: NELECT, NKTOT, NBTOT
|
||||
# Sixth line: NELECT, NKTOT, NBTOT
|
||||
sline = next(f).split()
|
||||
self.nelect = int(sline[0])
|
||||
self.nktot = int(sline[1])
|
||||
self.nband = int(sline[2])
|
||||
|
||||
# Set of eigenvalues and k-points
|
||||
# Set of eigenvalues and k-points
|
||||
self.kpts = np.zeros((self.nktot, 3))
|
||||
self.kwghts = np.zeros((self.nktot,))
|
||||
self.eigs = np.zeros((self.nktot, self.nband, self.ispin))
|
||||
self.ferw = np.zeros((self.nktot, self.nband, self.ispin))
|
||||
|
||||
for ik in range(self.nktot):
|
||||
sline = next(f) # Empty line
|
||||
sline = next(f) # k-point info
|
||||
sline = next(f) # Empty line
|
||||
sline = next(f) # k-point info
|
||||
tmp = list(map(float, sline.split()))
|
||||
self.kpts[ik, :] = tmp[:3]
|
||||
self.kwghts[ik] = tmp[3]
|
||||
@ -541,21 +546,20 @@ class Eigenval:
|
||||
sline = next(f).split()
|
||||
tmp = list(map(float, sline))
|
||||
assert len(tmp) == 2 * self.ispin + 1, "EIGENVAL file is incorrect (probably from old versions of VASP)"
|
||||
self.eigs[ik, ib, :] = tmp[1:self.ispin+1]
|
||||
self.ferw[ik, ib, :] = tmp[self.ispin+1:]
|
||||
self.eigs[ik, ib, :] = tmp[1:self.ispin + 1]
|
||||
self.ferw[ik, ib, :] = tmp[self.ispin + 1:]
|
||||
|
||||
|
||||
################################################################################
|
||||
################################################################################
|
||||
##########################
|
||||
#
|
||||
# class Doscar
|
||||
#
|
||||
################################################################################
|
||||
################################################################################
|
||||
##########################
|
||||
class Doscar:
|
||||
"""
|
||||
Class containing some data from DOSCAR
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.ncdij = None
|
||||
self.efermi = None
|
||||
@ -565,38 +569,40 @@ class Doscar:
|
||||
Reads only E_Fermi from DOSCAR.
|
||||
"""
|
||||
|
||||
# Add a slash to the path name if necessary
|
||||
# Add a slash to the path name if necessary
|
||||
if vasp_dir[-1] != '/':
|
||||
vasp_dir += '/'
|
||||
|
||||
f = read_lines(vasp_dir + dos_filename)
|
||||
|
||||
# First line: NION, NION, JOBPAR, NCDIJ
|
||||
# First line: NION, NION, JOBPAR, NCDIJ
|
||||
sline = next(f).split()
|
||||
|
||||
# Skip next 4 lines
|
||||
# Skip next 4 lines
|
||||
for _ in range(4):
|
||||
sline = next(f)
|
||||
|
||||
# Sixth line: EMAX, EMIN, NEDOS, EFERMI, 1.0
|
||||
# Sixth line: EMAX, EMIN, NEDOS, EFERMI, 1.0
|
||||
sline = next(f).split()
|
||||
self.efermi = float(sline[3])
|
||||
|
||||
|
||||
# TODO: implement output of SYMMCAR in VASP and read it here
|
||||
################################################################
|
||||
##########################
|
||||
#
|
||||
# Reads SYMMCAR
|
||||
#
|
||||
################################################################
|
||||
##########################
|
||||
def read_symmcar(vasp_dir, symm_filename='SYMMCAR'):
|
||||
"""
|
||||
Reads SYMMCAR.
|
||||
"""
|
||||
# Shorthand for simple parsing
|
||||
|
||||
# Shorthand for simple parsing
|
||||
def extract_int_par(parname):
|
||||
return int(re.findall(parname + '\s*=\s*(\d+)', line)[-1])
|
||||
|
||||
# Add a slash to the path name if necessary
|
||||
# Add a slash to the path name if necessary
|
||||
if vasp_dir[-1] != '/':
|
||||
vasp_dir += '/'
|
||||
|
||||
@ -607,11 +613,11 @@ def read_symmcar(vasp_dir, symm_filename='SYMMCAR'):
|
||||
|
||||
line = next(sym_file)
|
||||
ntrans = extract_int_par('NPCELL')
|
||||
# Lmax
|
||||
# Lmax
|
||||
line = next(sym_file)
|
||||
lmax = extract_int_par('LMAX')
|
||||
mmax = 2 * lmax + 1
|
||||
# Nion
|
||||
# Nion
|
||||
line = next(sym_file)
|
||||
nion = extract_int_par('NION')
|
||||
|
||||
@ -620,22 +626,22 @@ def read_symmcar(vasp_dir, symm_filename='SYMMCAR'):
|
||||
print(" {0:>26} {1:d}".format("Number of ions:", nion))
|
||||
print(" {0:>26} {1:d}".format("L_max:", lmax))
|
||||
|
||||
rot_mats = np.zeros((nrot, lmax+1, mmax, mmax))
|
||||
rot_mats = np.zeros((nrot, lmax + 1, mmax, mmax))
|
||||
rot_map = np.zeros((nrot, ntrans, nion), dtype=int)
|
||||
|
||||
for irot in range(nrot):
|
||||
# Empty line
|
||||
# Empty line
|
||||
line = next(sym_file)
|
||||
# IROT index (skip it)
|
||||
# IROT index (skip it)
|
||||
line = next(sym_file)
|
||||
# ISYMOP matrix (can be also skipped)
|
||||
# ISYMOP matrix (can be also skipped)
|
||||
line = next(sym_file)
|
||||
line = next(sym_file)
|
||||
line = next(sym_file)
|
||||
|
||||
# Skip comment " Permutation map..."
|
||||
# Skip comment " Permutation map..."
|
||||
line = next(sym_file)
|
||||
# Permutations (in chunks of 20 indices per line)
|
||||
# Permutations (in chunks of 20 indices per line)
|
||||
for it in range(ntrans):
|
||||
for ibl in range((nion - 1) // 20 + 1):
|
||||
i1 = ibl * 20
|
||||
@ -645,12 +651,163 @@ def read_symmcar(vasp_dir, symm_filename='SYMMCAR'):
|
||||
|
||||
for l in range(lmax + 1):
|
||||
mmax = 2 * l + 1
|
||||
# Comment: "L = ..."
|
||||
# Comment: "L = ..."
|
||||
line = next(sym_file)
|
||||
for m in range(mmax):
|
||||
line = next(sym_file)
|
||||
rot_mats[irot, l, m, :mmax] = list(map(float, line.split()[:mmax]))
|
||||
|
||||
data.update({ 'nrot': nrot, 'ntrans': ntrans,
|
||||
'lmax': lmax, 'nion': nion,
|
||||
'sym_rots': rot_mats, 'perm_map': rot_map })
|
||||
data.update({'nrot': nrot, 'ntrans': ntrans,
|
||||
'lmax': lmax, 'nion': nion,
|
||||
'sym_rots': rot_mats, 'perm_map': rot_map})
|
||||
|
||||
|
||||
class h5Poscar:
|
||||
|
||||
def __init__(self, h5path):
|
||||
# self.q_cart = None
|
||||
|
||||
with HDFArchive(h5path, 'a') as archive:
|
||||
struct = archive['triqs']['structure']
|
||||
ascale = struct['scale']
|
||||
self.a_brav = struct['lattice_vectors']
|
||||
self.nions = struct['number_ion_types']
|
||||
self.el_names = struct['ion_types']
|
||||
direct = struct['direct_coordinates']
|
||||
qcoord = struct['position_ions']
|
||||
|
||||
if ascale < 0:
|
||||
vscale = -ascale
|
||||
vol = np.linalg.det(self.a_brav)
|
||||
ascale = (vscale / vol) ** (1.0 / 3)
|
||||
self.a_brav *= ascale
|
||||
# determine reciprocal basis in units of 2*pi
|
||||
self.kpt_basis = np.linalg.inv(self.a_brav.T)
|
||||
|
||||
# Set the number of atom sorts (types) and the total
|
||||
# number of atoms in the unit cell
|
||||
self.ntypes = len(self.nions)
|
||||
self.nq = sum(self.nions)
|
||||
|
||||
# Read atomic positions
|
||||
self.q_types = []
|
||||
self.type_of_ion = []
|
||||
for it in range(self.ntypes):
|
||||
# Array mapping ion index to type
|
||||
self.type_of_ion += self.nions[it] * [it]
|
||||
q_at_it = np.zeros((self.nions[it], 3))
|
||||
for iq in range(self.nions[it]):
|
||||
if not direct:
|
||||
qcoord = np.dot(self.kpt_basis, qcoord[iq + it])
|
||||
q_at_it[iq, :] = qcoord[iq + it]
|
||||
|
||||
self.q_types.append(q_at_it)
|
||||
|
||||
print(" Total number of ions:", self.nq)
|
||||
print(" Number of types:", self.ntypes)
|
||||
print(" Number of ions for each type:", self.nions)
|
||||
|
||||
|
||||
class h5Kpoints:
|
||||
|
||||
def __init__(self, h5path):
|
||||
|
||||
# h5path = './vasptriqs.h5'
|
||||
with HDFArchive(h5path, 'a') as archive:
|
||||
kpoints = archive['triqs']['kpoints']
|
||||
self.nktot = kpoints['num_kpoints']
|
||||
self.kpts = kpoints['kpoint_coords']
|
||||
self.kwghts = kpoints['kpoints_symmetry_weight']
|
||||
try:
|
||||
self.ntet = kpoints['num_tetrahedra']
|
||||
self.vtet = kpoints['volume_weight_tetrahedra']
|
||||
self.itet = kpoints['coordinate_id_tetrahedra']
|
||||
except StopIteration as ValueError:
|
||||
print(" No tetrahedron data found in vasptriqs.h5. Skipping...")
|
||||
self.ntet = 0
|
||||
|
||||
print()
|
||||
print(" {0:>26} {1:d}".format("Total number of k-points:", self.nktot))
|
||||
print(" {0:>26} {1:d}".format("Total number of tetrahedra:", self.ntet))
|
||||
|
||||
|
||||
class h5Eigenval:
|
||||
|
||||
def __init__(self, h5path):
|
||||
with HDFArchive(h5path, 'a') as archive:
|
||||
self.eigs = archive['triqs']['eigenvalues']
|
||||
self.ferw = archive['triqs']['fermi_weights']
|
||||
# TODO Change the format in VASP to have [kpoints, bands, spin]
|
||||
self.eigs = np.transpose(self.eigs, (1, 2, 0))
|
||||
self.ferw = np.transpose(self.ferw, (1, 2, 0))
|
||||
|
||||
|
||||
class h5Doscar:
|
||||
|
||||
def __init__(self, h5path):
|
||||
with HDFArchive(h5path, 'a') as archive:
|
||||
self.efermi = archive['triqs']['efermi']
|
||||
|
||||
|
||||
class h5Plocar():
|
||||
|
||||
def __init__(self, h5path):
|
||||
with HDFArchive(h5path, 'a') as archive:
|
||||
plo = np.array(archive['triqs']['plo'])
|
||||
self.nc_flag = int(archive['triqs']['noncoll'])
|
||||
|
||||
self.nproj = plo.shape[0]
|
||||
self.ncdij = plo.shape[1]
|
||||
nk = plo.shape[2]
|
||||
self.nband = plo.shape[3]
|
||||
|
||||
self.nspin = self.ncdij if self.ncdij < 4 else 1
|
||||
self.nspin_band = 2 if self.ncdij == 2 else 1
|
||||
|
||||
log.debug("ISPIN is {}".format(self.nspin))
|
||||
log.debug("NC FLAG : {}".format(self.nc_flag))
|
||||
|
||||
# self.proj_params, self.plo = self.locproj_parser(locproj_filename=vasp_dir + "LOCPROJ")
|
||||
|
||||
orb_labels = ["s", "py", "pz", "px", "dxy", "dyz", "dz2", "dxz", "dx2-y2",
|
||||
"fy(3x2-y2)", "fxyz", "fyz2", "fz3", "fxz2", "fz(x2-y2)", "fx(x2-3y2)"]
|
||||
|
||||
self.plo = np.zeros((self.nproj, self.nspin, nk, self.nband), dtype=complex)
|
||||
|
||||
for proj in range(self.nproj):
|
||||
for spin in range(self.nspin):
|
||||
for kpt in range(nk):
|
||||
for band in range(self.nband):
|
||||
real_plo = plo[proj, spin, kpt, band, 0] # Real part
|
||||
imag_plo = plo[proj, spin, kpt, band, 1] # Imaginary part
|
||||
self.plo[proj, spin, kpt, band] = complex(real_plo, imag_plo)
|
||||
|
||||
def lm_to_l_m(lm):
|
||||
l = int(np.sqrt(lm))
|
||||
m = lm - l * l
|
||||
return l, m
|
||||
|
||||
self.proj_params = [{} for i in range(self.nproj)]
|
||||
with HDFArchive(h5path, 'a') as archive:
|
||||
for it in range(self.nproj):
|
||||
projectors = archive['triqs']['plo_parameters'][str(it + 1)]
|
||||
self.proj_params[it]['label'] = projectors['ang_type']
|
||||
self.proj_params[it]['isite'] = projectors['site']
|
||||
self.proj_params[it]['coord'] = projectors['coordinates']
|
||||
|
||||
for it in range(self.nproj):
|
||||
lm = orb_labels.index(self.proj_params[it]['label'])
|
||||
l, m = lm_to_l_m(lm)
|
||||
self.proj_params[it]['l'] = l
|
||||
if self.nc_flag == True:
|
||||
if (it % 2) == 0:
|
||||
self.proj_params[it]['m'] = 2 * m
|
||||
else:
|
||||
self.proj_params[it]['m'] = 2 * m + 1
|
||||
else:
|
||||
self.proj_params[it]['m'] = m
|
||||
# assert ip == nproj, "Number of projectors in the header is wrong in LOCPROJ"
|
||||
|
||||
print("Read parameters: LOCPROJ")
|
||||
for il, par in enumerate(self.proj_params):
|
||||
print(il, " -> ", par)
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
##########################################################################
|
||||
#
|
||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||
@ -48,7 +47,7 @@ class SumkDFT(object):
|
||||
def __init__(self, hdf_file, h_field=0.0, mesh=None, beta=40, n_iw=1025, use_dft_blocks=False,
|
||||
dft_data='dft_input', symmcorr_data='dft_symmcorr_input', parproj_data='dft_parproj_input',
|
||||
symmpar_data='dft_symmpar_input', bands_data='dft_bands_input', transp_data='dft_transp_input',
|
||||
misc_data='dft_misc_input',bc_data='dft_bandchar_input',cont_data='dft_contours_input'):
|
||||
misc_data='dft_misc_input', bc_data='dft_bandchar_input', cont_data='dft_contours_input'):
|
||||
r"""
|
||||
Initialises the class from data previously stored into an hdf5 archive.
|
||||
|
||||
@ -122,26 +121,33 @@ class SumkDFT(object):
|
||||
self.block_structure = BlockStructure()
|
||||
|
||||
# Read input from HDF:
|
||||
req_things_to_read = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
||||
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
||||
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
||||
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr']
|
||||
req_things_to_read = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below',
|
||||
'density_required',
|
||||
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations',
|
||||
'rot_mat',
|
||||
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights',
|
||||
'hopping',
|
||||
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr']
|
||||
self.subgroup_present, self.values_not_read = self.read_input_from_hdf(
|
||||
subgrp=self.dft_data, things_to_read=req_things_to_read)
|
||||
|
||||
# test if all required properties have been found
|
||||
if len(self.values_not_read) > 0 and mpi.is_master_node:
|
||||
raise ValueError('ERROR: One or more necessary SumK input properties have not been found in the given h5 archive:', self.values_not_read)
|
||||
raise ValueError(
|
||||
'ERROR: One or more necessary SumK input properties have not been found in the given h5 archive:',
|
||||
self.values_not_read)
|
||||
|
||||
# optional properties to load
|
||||
# soon bz_weights is depraced and replaced by kpt_weights, kpts_basis and kpts will become required to read soon
|
||||
optional_things_to_read = ['proj_mat_csc', 'proj_or_hk', 'kpt_basis', 'kpts', 'kpt_weights', 'dft_code']
|
||||
subgroup_present, self.optional_values_not_read = self.read_input_from_hdf(subgrp=self.dft_data, things_to_read=optional_things_to_read)
|
||||
subgroup_present, self.optional_values_not_read = self.read_input_from_hdf(subgrp=self.dft_data,
|
||||
things_to_read=optional_things_to_read)
|
||||
|
||||
# warning if dft_code was not read (old h5 structure)
|
||||
if 'dft_code' in self.optional_values_not_read:
|
||||
self.dft_code = None
|
||||
mpi.report('\nWarning: old h5 archive without dft_code input flag detected. Please specify sumk.dft_code manually!\n')
|
||||
mpi.report(
|
||||
'\nWarning: old h5 archive without dft_code input flag detected. Please specify sumk.dft_code manually!\n')
|
||||
|
||||
if self.symm_op:
|
||||
self.symmcorr = Symmetry(hdf_file, subgroup=self.symmcorr_data)
|
||||
@ -164,11 +170,13 @@ class SumkDFT(object):
|
||||
# GF structure used for the local things in the k sums
|
||||
# Most general form allowing for all hybridisation, i.e. largest
|
||||
# blocks possible
|
||||
self.gf_struct_sumk = [[(sp, self.corr_shells[icrsh]['dim']) for sp in self.spin_block_names[self.corr_shells[icrsh]['SO']]]
|
||||
for icrsh in range(self.n_corr_shells)]
|
||||
self.gf_struct_sumk = [
|
||||
[(sp, self.corr_shells[icrsh]['dim']) for sp in self.spin_block_names[self.corr_shells[icrsh]['SO']]]
|
||||
for icrsh in range(self.n_corr_shells)]
|
||||
# First set a standard gf_struct solver (add _0 here for consistency with analyse_block_structure):
|
||||
self.gf_struct_solver = [dict([(sp+'_0', self.corr_shells[self.inequiv_to_corr[ish]]['dim'])
|
||||
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]])
|
||||
self.gf_struct_solver = [dict([(sp + '_0', self.corr_shells[self.inequiv_to_corr[ish]]['dim'])
|
||||
for sp in
|
||||
self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]])
|
||||
for ish in range(self.n_inequiv_shells)]
|
||||
# Set standard (identity) maps from gf_struct_sumk <->
|
||||
# gf_struct_solver
|
||||
@ -178,12 +186,12 @@ class SumkDFT(object):
|
||||
for ish in range(self.n_inequiv_shells)]
|
||||
for ish in range(self.n_inequiv_shells):
|
||||
for block, inner_dim in self.gf_struct_sumk[self.inequiv_to_corr[ish]]:
|
||||
self.solver_to_sumk_block[ish][block+'_0'] = block
|
||||
self.solver_to_sumk_block[ish][block + '_0'] = block
|
||||
for inner in range(inner_dim):
|
||||
self.sumk_to_solver[ish][
|
||||
(block, inner)] = (block+'_0', inner)
|
||||
(block, inner)] = (block + '_0', inner)
|
||||
self.solver_to_sumk[ish][
|
||||
(block+'_0', inner)] = (block, inner)
|
||||
(block + '_0', inner)] = (block, inner)
|
||||
# assume no shells are degenerate
|
||||
self.deg_shells = [[] for ish in range(self.n_inequiv_shells)]
|
||||
|
||||
@ -205,9 +213,9 @@ class SumkDFT(object):
|
||||
self.min_band_energy = None
|
||||
self.max_band_energy = None
|
||||
|
||||
################
|
||||
# hdf5 FUNCTIONS
|
||||
################
|
||||
################
|
||||
# hdf5 FUNCTIONS
|
||||
################
|
||||
|
||||
def read_input_from_hdf(self, subgrp, things_to_read):
|
||||
r"""
|
||||
@ -278,8 +286,8 @@ class SumkDFT(object):
|
||||
with HDFArchive(self.hdf_file, 'a') as ar:
|
||||
if not subgrp in ar: ar.create_group(subgrp)
|
||||
for it in things_to_save:
|
||||
if it in [ "gf_struct_sumk", "gf_struct_solver",
|
||||
"solver_to_sumk", "sumk_to_solver", "solver_to_sumk_block"]:
|
||||
if it in ["gf_struct_sumk", "gf_struct_solver",
|
||||
"solver_to_sumk", "sumk_to_solver", "solver_to_sumk_block"]:
|
||||
warn("It is not recommended to save '{}' individually. Save 'block_structure' instead.".format(it))
|
||||
try:
|
||||
ar[subgrp][it] = getattr(self, it)
|
||||
@ -316,9 +324,9 @@ class SumkDFT(object):
|
||||
raise ValueError("load: %s not found, and so not loaded." % it)
|
||||
return list_to_return
|
||||
|
||||
################
|
||||
# CORE FUNCTIONS
|
||||
################
|
||||
################
|
||||
# CORE FUNCTIONS
|
||||
################
|
||||
|
||||
def downfold(self, ik, ish, bname, gf_to_downfold, gf_inp, shells='corr', ir=None):
|
||||
r"""
|
||||
@ -479,7 +487,7 @@ class SumkDFT(object):
|
||||
), gf_rotated, rot_mat[ish].transpose())
|
||||
else:
|
||||
gf_rotated.from_L_G_R(rot_mat[ish], gf_rotated, rot_mat[
|
||||
ish].conjugate().transpose())
|
||||
ish].conjugate().transpose())
|
||||
|
||||
elif direction == 'toLocal':
|
||||
|
||||
@ -535,15 +543,15 @@ class SumkDFT(object):
|
||||
broadening = 2.0 * ((mesh.w_max - mesh.w_min) / (len(mesh) - 1))
|
||||
|
||||
# Check if G_latt is present
|
||||
set_up_G_latt = False # Assume not
|
||||
if not hasattr(self, "G_latt" ):
|
||||
set_up_G_latt = False # Assume not
|
||||
if not hasattr(self, "G_latt"):
|
||||
# Need to create G_latt_(i)w
|
||||
set_up_G_latt = True
|
||||
else: # Check that existing GF is consistent
|
||||
else: # Check that existing GF is consistent
|
||||
G_latt = self.G_latt
|
||||
GFsize = [gf.target_shape[0] for bname, gf in G_latt]
|
||||
unchangedsize = all([self.n_orbitals[ik, ntoi[spn[isp]]] == GFsize[
|
||||
isp] for isp in range(self.n_spin_blocks[self.SO])])
|
||||
isp] for isp in range(self.n_spin_blocks[self.SO])])
|
||||
if (not mesh is None) or (not unchangedsize):
|
||||
set_up_G_latt = True
|
||||
|
||||
@ -606,7 +614,7 @@ class SumkDFT(object):
|
||||
- self.hopping[ik, ind, 0:n_orb, 0:n_orb])
|
||||
else:
|
||||
gf.data[:, :, :] = (idmat[ibl] *
|
||||
(mesh_values[:, None, None] + mu + self.h_field*(1-2*ibl) + 1j*broadening)
|
||||
(mesh_values[:, None, None] + mu + self.h_field * (1 - 2 * ibl) + 1j * broadening)
|
||||
- self.hopping[ik, ind, 0:n_orb, 0:n_orb])
|
||||
|
||||
if with_Sigma:
|
||||
@ -640,9 +648,9 @@ class SumkDFT(object):
|
||||
if transform_to_sumk_blocks:
|
||||
Sigma_imp = self.transform_to_sumk_blocks(Sigma_imp)
|
||||
|
||||
assert isinstance(Sigma_imp, list),\
|
||||
assert isinstance(Sigma_imp, list), \
|
||||
"put_Sigma: Sigma_imp has to be a list of Sigmas for the correlated shells, even if it is of length 1!"
|
||||
assert len(Sigma_imp) == self.n_corr_shells,\
|
||||
assert len(Sigma_imp) == self.n_corr_shells, \
|
||||
"put_Sigma: give exactly one Sigma for each corr. shell!"
|
||||
|
||||
if (isinstance(self.mesh, (MeshImFreq, MeshDLRImFreq)) and
|
||||
@ -651,12 +659,15 @@ class SumkDFT(object):
|
||||
gf.mesh == self.mesh for bname, gf in Sigma_imp[0])):
|
||||
# Imaginary frequency Sigma:
|
||||
self.Sigma_imp = [self.block_structure.create_gf(ish=icrsh, mesh=Sigma_imp[icrsh].mesh, space='sumk')
|
||||
for icrsh in range(self.n_corr_shells)]
|
||||
for icrsh in range(self.n_corr_shells)]
|
||||
SK_Sigma_imp = self.Sigma_imp
|
||||
elif isinstance(self.mesh, MeshReFreq) and all(isinstance(gf, Gf) and isinstance(gf.mesh, MeshReFreq) and gf.mesh == self.mesh for bname, gf in Sigma_imp[0]):
|
||||
elif isinstance(self.mesh, MeshReFreq) and all(
|
||||
isinstance(gf, Gf) and isinstance(gf.mesh, MeshReFreq) and gf.mesh == self.mesh for bname, gf in
|
||||
Sigma_imp[0]):
|
||||
# Real frequency Sigma:
|
||||
self.Sigma_imp = [self.block_structure.create_gf(ish=icrsh, mesh=Sigma_imp[icrsh].mesh, gf_function=Gf, space='sumk')
|
||||
for icrsh in range(self.n_corr_shells)]
|
||||
self.Sigma_imp = [
|
||||
self.block_structure.create_gf(ish=icrsh, mesh=Sigma_imp[icrsh].mesh, gf_function=Gf, space='sumk')
|
||||
for icrsh in range(self.n_corr_shells)]
|
||||
SK_Sigma_imp = self.Sigma_imp
|
||||
|
||||
else:
|
||||
@ -672,13 +683,16 @@ class SumkDFT(object):
|
||||
else:
|
||||
gf << Sigma_imp[icrsh][bname]
|
||||
|
||||
#warning if real frequency self energy is within the bounds of the band energies
|
||||
# warning if real frequency self energy is within the bounds of the band energies
|
||||
if isinstance(self.mesh, MeshReFreq):
|
||||
if self.min_band_energy is None or self.max_band_energy is None:
|
||||
self.calculate_min_max_band_energies()
|
||||
mesh = np.linspace(self.mesh.w_min, self.mesh.w_max, len(self.mesh))
|
||||
if mesh[0] > (self.min_band_energy - self.chemical_potential) or mesh[-1] < (self.max_band_energy - self.chemical_potential):
|
||||
warn('The given Sigma is on a mesh which does not cover the band energy range. The Sigma MeshReFreq runs from %f to %f, while the band energy (minus the chemical potential) runs from %f to %f'%(mesh[0], mesh[-1], self.min_band_energy, self.max_band_energy))
|
||||
if mesh[0] > (self.min_band_energy - self.chemical_potential) or mesh[-1] < (
|
||||
self.max_band_energy - self.chemical_potential):
|
||||
warn(
|
||||
'The given Sigma is on a mesh which does not cover the band energy range. The Sigma MeshReFreq runs from %f to %f, while the band energy (minus the chemical potential) runs from %f to %f' % (
|
||||
mesh[0], mesh[-1], self.min_band_energy, self.max_band_energy))
|
||||
|
||||
def transform_to_sumk_blocks(self, Sigma_imp, Sigma_out=None):
|
||||
r""" transform Sigma from solver to sumk space
|
||||
@ -693,13 +707,14 @@ class SumkDFT(object):
|
||||
according to ``gf_struct_sumk``; if None, it will be created
|
||||
"""
|
||||
|
||||
assert isinstance(Sigma_imp, list),\
|
||||
assert isinstance(Sigma_imp, list), \
|
||||
"transform_to_sumk_blocks: Sigma_imp has to be a list of Sigmas for the inequivalent correlated shells, even if it is of length 1!"
|
||||
assert len(Sigma_imp) == self.n_inequiv_shells,\
|
||||
assert len(Sigma_imp) == self.n_inequiv_shells, \
|
||||
"transform_to_sumk_blocks: give exactly one Sigma for each inequivalent corr. shell!"
|
||||
|
||||
if Sigma_out is None:
|
||||
Sigma_out = [self.block_structure.create_gf(ish=icrsh, mesh=Sigma_imp[self.corr_to_inequiv[icrsh]].mesh, space='sumk')
|
||||
Sigma_out = [self.block_structure.create_gf(ish=icrsh, mesh=Sigma_imp[self.corr_to_inequiv[icrsh]].mesh,
|
||||
space='sumk')
|
||||
for icrsh in range(self.n_corr_shells)]
|
||||
else:
|
||||
for icrsh in range(self.n_corr_shells):
|
||||
@ -808,7 +823,7 @@ class SumkDFT(object):
|
||||
|
||||
return G_loc
|
||||
|
||||
def transform_to_solver_blocks(self, G_loc, G_out=None, show_warnings = True):
|
||||
def transform_to_solver_blocks(self, G_loc, G_out=None, show_warnings=True):
|
||||
""" transform G_loc from sumk to solver space
|
||||
|
||||
Parameters
|
||||
@ -846,7 +861,7 @@ class SumkDFT(object):
|
||||
ish_to=ish,
|
||||
space_from='sumk',
|
||||
G_out=G_out[ish],
|
||||
show_warnings = show_warnings)
|
||||
show_warnings=show_warnings)
|
||||
|
||||
# return only the inequivalent shells:
|
||||
return G_out
|
||||
@ -876,10 +891,12 @@ class SumkDFT(object):
|
||||
"""
|
||||
|
||||
if dm is None:
|
||||
warn("WARNING: No density matrix given. Calculating density matrix with default parameters. This will be deprecated in future releases.")
|
||||
warn(
|
||||
"WARNING: No density matrix given. Calculating density matrix with default parameters. This will be deprecated in future releases.")
|
||||
dm = self.density_matrix(method='using_gf', transform_to_solver_blocks=False)
|
||||
|
||||
assert len(dm) == self.n_corr_shells, "The number of density matrices must be equal to the number of correlated shells."
|
||||
assert len(
|
||||
dm) == self.n_corr_shells, "The number of density matrices must be equal to the number of correlated shells."
|
||||
dens_mat = [dm[self.inequiv_to_corr[ish]]
|
||||
for ish in range(self.n_inequiv_shells)]
|
||||
if hloc is None:
|
||||
@ -894,7 +911,8 @@ class SumkDFT(object):
|
||||
self.solver_to_sumk_block[ish] = {}
|
||||
|
||||
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]:
|
||||
assert sp in dens_mat[ish], f"The density matrix does not contain the block {sp}. Is the input dm given in sumk block structure?"
|
||||
assert sp in dens_mat[
|
||||
ish], f"The density matrix does not contain the block {sp}. Is the input dm given in sumk block structure?"
|
||||
n_orb = self.corr_shells[self.inequiv_to_corr[ish]]['dim']
|
||||
# gives an index list of entries larger that threshold
|
||||
dmbool = (abs(dens_mat[ish][sp]) > threshold)
|
||||
@ -914,10 +932,10 @@ class SumkDFT(object):
|
||||
pair = offdiag.pop()
|
||||
for b1, b2 in product(blocs, blocs):
|
||||
if (pair[0] in b1) and (pair[1] in b2):
|
||||
if blocs.index(b1) != blocs.index(b2): # In separate blocks?
|
||||
if blocs.index(b1) != blocs.index(b2): # In separate blocks?
|
||||
# Merge two blocks
|
||||
b1.extend(blocs.pop(blocs.index(b2)))
|
||||
break # Move on to next pair in offdiag
|
||||
break # Move on to next pair in offdiag
|
||||
|
||||
# Set the gf_struct for the solver accordingly
|
||||
num_blocs = len(blocs)
|
||||
@ -993,9 +1011,10 @@ class SumkDFT(object):
|
||||
"""
|
||||
# make a GfImTime from the supplied GfImFreq
|
||||
if all(isinstance(g_sh.mesh, MeshImFreq) for g_sh in G):
|
||||
gf = [BlockGf(name_block_generator = [(name, GfImTime(beta=block.mesh.beta,
|
||||
indices=block.indices,n_points=len(block.mesh)+1)) for name, block in g_sh],
|
||||
make_copies=False) for g_sh in G]
|
||||
gf = [BlockGf(name_block_generator=[(name, GfImTime(beta=block.mesh.beta,
|
||||
indices=block.indices, n_points=len(block.mesh) + 1))
|
||||
for name, block in g_sh],
|
||||
make_copies=False) for g_sh in G]
|
||||
for ish in range(len(gf)):
|
||||
for name, g in gf[ish]:
|
||||
g.set_from_fourier(G[ish][name])
|
||||
@ -1007,7 +1026,7 @@ class SumkDFT(object):
|
||||
gf = [g_sh.copy() for g_sh in G]
|
||||
for ish in range(len(gf)):
|
||||
for name, g in gf[ish]:
|
||||
g << 1.0j*(g-g.conjugate().transpose())/2.0/np.pi
|
||||
g << 1.0j * (g - g.conjugate().transpose()) / 2.0 / np.pi
|
||||
elif all(isinstance(g_sh.mesh, MeshReTime) for g_sh in G):
|
||||
def get_delta_from_mesh(mesh):
|
||||
w0 = None
|
||||
@ -1015,24 +1034,23 @@ class SumkDFT(object):
|
||||
if w0 is None:
|
||||
w0 = w
|
||||
else:
|
||||
return w-w0
|
||||
gf = [BlockGf(name_block_generator = [(name, GfReFreq(
|
||||
window=(-np.pi*(len(block.mesh)-1) / (len(block.mesh)*get_delta_from_mesh(block.mesh)),
|
||||
np.pi*(len(block.mesh)-1) / (len(block.mesh)*get_delta_from_mesh(block.mesh))),
|
||||
return w - w0
|
||||
|
||||
gf = [BlockGf(name_block_generator=[(name, GfReFreq(
|
||||
window=(-np.pi * (len(block.mesh) - 1) / (len(block.mesh) * get_delta_from_mesh(block.mesh)),
|
||||
np.pi * (len(block.mesh) - 1) / (len(block.mesh) * get_delta_from_mesh(block.mesh))),
|
||||
n_points=len(block.mesh), indices=block.indices)) for name, block in g_sh], make_copies=False)
|
||||
for g_sh in G]
|
||||
for g_sh in G]
|
||||
|
||||
for ish in range(len(gf)):
|
||||
for name, g in gf[ish]:
|
||||
g.set_from_fourier(G[ish][name])
|
||||
g << 1.0j*(g-g.conjugate().transpose())/2.0/np.pi
|
||||
g << 1.0j * (g - g.conjugate().transpose()) / 2.0 / np.pi
|
||||
else:
|
||||
raise Exception("G must be a list of BlockGf of either GfImFreq, GfImTime, GfReFreq or GfReTime")
|
||||
return gf
|
||||
|
||||
|
||||
|
||||
def analyse_block_structure_from_gf(self, G, threshold=1.e-5, include_shells=None, analyse_deg_shells = True):
|
||||
def analyse_block_structure_from_gf(self, G, threshold=1.e-5, include_shells=None, analyse_deg_shells=True):
|
||||
r"""
|
||||
Determines the block structure of local Green's functions by analysing
|
||||
the structure of the corresponding non-interacting Green's function.
|
||||
@ -1065,7 +1083,8 @@ class SumkDFT(object):
|
||||
"""
|
||||
|
||||
assert isinstance(G, list), "G must be a list (with elements for each correlated shell)"
|
||||
assert len(G) == self.n_corr_shells, "G must have one element for each correlated shell, run extract_G_loc with transform_to_solver_blocks=False to get the correct G"
|
||||
assert len(
|
||||
G) == self.n_corr_shells, "G must have one element for each correlated shell, run extract_G_loc with transform_to_solver_blocks=False to get the correct G"
|
||||
|
||||
gf = self._get_hermitian_quantity_from_gf(G)
|
||||
|
||||
@ -1079,11 +1098,12 @@ class SumkDFT(object):
|
||||
self.solver_to_sumk[ish] = {}
|
||||
self.solver_to_sumk_block[ish] = {}
|
||||
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]:
|
||||
assert sp in gf[self.inequiv_to_corr[ish]].indices, f"The Green's function does not contain the block {sp}. Is the input G given in sumk block structure?"
|
||||
assert sp in gf[self.inequiv_to_corr[
|
||||
ish]].indices, f"The Green's function does not contain the block {sp}. Is the input G given in sumk block structure?"
|
||||
n_orb = self.corr_shells[self.inequiv_to_corr[ish]]['dim']
|
||||
|
||||
# gives an index list of entries larger that threshold
|
||||
max_gf = np.max(np.abs(gf[self.inequiv_to_corr[ish]][sp].data),0)
|
||||
max_gf = np.max(np.abs(gf[self.inequiv_to_corr[ish]][sp].data), 0)
|
||||
maxgf_bool = (max_gf > threshold)
|
||||
|
||||
# Determine off-diagonal entries in upper triangular part of the
|
||||
@ -1100,10 +1120,10 @@ class SumkDFT(object):
|
||||
pair = offdiag.pop()
|
||||
for b1, b2 in product(blocs, blocs):
|
||||
if (pair[0] in b1) and (pair[1] in b2):
|
||||
if blocs.index(b1) != blocs.index(b2): # In separate blocks?
|
||||
if blocs.index(b1) != blocs.index(b2): # In separate blocks?
|
||||
# Merge two blocks
|
||||
b1.extend(blocs.pop(blocs.index(b2)))
|
||||
break # Move on to next pair in offdiag
|
||||
break # Move on to next pair in offdiag
|
||||
|
||||
# Set the gf_struct for the solver accordingly
|
||||
num_blocs = len(blocs)
|
||||
@ -1129,13 +1149,13 @@ class SumkDFT(object):
|
||||
|
||||
# transform G to the new structure
|
||||
full_structure = BlockStructure.full_structure(
|
||||
[{sp:self.corr_shells[self.inequiv_to_corr[ish]]['dim']
|
||||
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]}
|
||||
for ish in range(self.n_inequiv_shells)],self.corr_to_inequiv)
|
||||
[{sp: self.corr_shells[self.inequiv_to_corr[ish]]['dim']
|
||||
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]}
|
||||
for ish in range(self.n_inequiv_shells)], self.corr_to_inequiv)
|
||||
G_transformed = [
|
||||
self.block_structure.convert_gf(G[ish],
|
||||
full_structure, ish, mesh=G[ish].mesh.copy(), show_warnings=threshold,
|
||||
gf_function=type(G[ish]._first()), space_from='sumk', space_to='solver')
|
||||
full_structure, ish, mesh=G[ish].mesh.copy(), show_warnings=threshold,
|
||||
gf_function=type(G[ish]._first()), space_from='sumk', space_to='solver')
|
||||
for ish in range(self.n_inequiv_shells)]
|
||||
|
||||
if analyse_deg_shells:
|
||||
@ -1196,7 +1216,7 @@ class SumkDFT(object):
|
||||
for ish in include_shells:
|
||||
for block1 in self.gf_struct_solver[ish].keys():
|
||||
for block2 in self.gf_struct_solver[ish].keys():
|
||||
if block1==block2: continue
|
||||
if block1 == block2: continue
|
||||
|
||||
# check if the blocks are already present in the deg_shells
|
||||
ind1 = -1
|
||||
@ -1229,9 +1249,9 @@ class SumkDFT(object):
|
||||
|
||||
e1 = np.linalg.eigvalsh(gf1.data[0])
|
||||
e2 = np.linalg.eigvalsh(gf2.data[0])
|
||||
if np.any(abs(e1-e2) > threshold): continue
|
||||
if np.any(abs(e1 - e2) > threshold): continue
|
||||
|
||||
for conjugate in [False,True]:
|
||||
for conjugate in [False, True]:
|
||||
if conjugate:
|
||||
gf2 = gf2.conjugate()
|
||||
|
||||
@ -1248,13 +1268,15 @@ class SumkDFT(object):
|
||||
# product to get a linear problem, which consists
|
||||
# of finding the null space of M vec T = 0.
|
||||
|
||||
M = np.kron(np.eye(*gf1.target_shape),gf2.data[0])-np.kron(gf1.data[0].transpose(),np.eye(*gf1.target_shape))
|
||||
M = np.kron(np.eye(*gf1.target_shape), gf2.data[0]) - np.kron(gf1.data[0].transpose(),
|
||||
np.eye(*gf1.target_shape))
|
||||
N = null(M, threshold)
|
||||
|
||||
# now we get the intersection of the null spaces
|
||||
# of all values of tau
|
||||
for i in range(1,len(gf1.data)):
|
||||
M = np.kron(np.eye(*gf1.target_shape),gf2.data[i])-np.kron(gf1.data[i].transpose(),np.eye(*gf1.target_shape))
|
||||
for i in range(1, len(gf1.data)):
|
||||
M = np.kron(np.eye(*gf1.target_shape), gf2.data[i]) - np.kron(gf1.data[i].transpose(),
|
||||
np.eye(*gf1.target_shape))
|
||||
# transform M into current null space
|
||||
M = np.dot(M, N)
|
||||
N = np.dot(N, null(M, threshold))
|
||||
@ -1288,6 +1310,7 @@ class SumkDFT(object):
|
||||
Then, it calculates the chi2 of
|
||||
sum y[i] N[:,:,i] y[j].conjugate() N[:,:,j].conjugate().transpose() - eye.
|
||||
"""
|
||||
|
||||
def chi2(y):
|
||||
# reinterpret y as complex number
|
||||
y = y.view(complex)
|
||||
@ -1295,7 +1318,7 @@ class SumkDFT(object):
|
||||
for a in range(Z.shape[0]):
|
||||
for b in range(Z.shape[1]):
|
||||
ret += np.abs(np.dot(y, np.dot(Z[a, b], y.conjugate()))
|
||||
- (1.0 if a == b else 0.0))**2
|
||||
- (1.0 if a == b else 0.0)) ** 2
|
||||
return ret
|
||||
|
||||
# use the minimization routine from scipy
|
||||
@ -1348,7 +1371,8 @@ class SumkDFT(object):
|
||||
# C1(v1^dagger G1 v1) = C2(v2^dagger G2 v2)
|
||||
if (ind1 < 0) and (ind2 >= 0):
|
||||
if conjugate:
|
||||
self.deg_shells[ish][ind2][block1] = np.dot(T.conjugate().transpose(), v2[0].conjugate()), not v2[1]
|
||||
self.deg_shells[ish][ind2][block1] = np.dot(T.conjugate().transpose(),
|
||||
v2[0].conjugate()), not v2[1]
|
||||
else:
|
||||
self.deg_shells[ish][ind2][block1] = np.dot(T.conjugate().transpose(), v2[0]), v2[1]
|
||||
# the first block is already present
|
||||
@ -1379,7 +1403,8 @@ class SumkDFT(object):
|
||||
# a block was found, break out of the loop
|
||||
break
|
||||
|
||||
def calculate_diagonalization_matrix(self, prop_to_be_diagonal='eal', calc_in_solver_blocks=True, write_to_blockstructure = True, shells=None):
|
||||
def calculate_diagonalization_matrix(self, prop_to_be_diagonal='eal', calc_in_solver_blocks=True,
|
||||
write_to_blockstructure=True, shells=None):
|
||||
"""
|
||||
Calculates the diagonalisation matrix, and (optionally) stores it in the BlockStructure.
|
||||
|
||||
@ -1410,13 +1435,13 @@ class SumkDFT(object):
|
||||
|
||||
if self.block_structure.transformation:
|
||||
mpi.report(
|
||||
"calculate_diagonalization_matrix: requires block_structure.transformation = None.")
|
||||
"calculate_diagonalization_matrix: requires block_structure.transformation = None.")
|
||||
return 0
|
||||
|
||||
# Use all shells
|
||||
if shells is None:
|
||||
shells = range(self.n_corr_shells)
|
||||
elif max(shells) >= self.n_corr_shells: # Check if the shell indices are present
|
||||
elif max(shells) >= self.n_corr_shells: # Check if the shell indices are present
|
||||
mpi.report("calculate_diagonalization_matrix: shells not correct.")
|
||||
return 0
|
||||
|
||||
@ -1439,7 +1464,7 @@ class SumkDFT(object):
|
||||
prop[ish] = self.block_structure.convert_matrix(prop[ish], space_from='sumk', space_to='solver')
|
||||
# Get diagonalisation matrix, if not already diagonal
|
||||
for name in prop[ish]:
|
||||
if np.sum(abs(prop[ish][name]-np.diag(np.diagonal(prop[ish][name])))) > 1e-13:
|
||||
if np.sum(abs(prop[ish][name] - np.diag(np.diagonal(prop[ish][name])))) > 1e-13:
|
||||
trafo[name] = np.linalg.eigh(prop[ish][name])[1].conj().T
|
||||
else:
|
||||
trafo[name] = np.identity(np.shape(prop[ish][name])[0])
|
||||
@ -1477,7 +1502,7 @@ class SumkDFT(object):
|
||||
ntoi = self.spin_names_to_ind[self.SO]
|
||||
spn = self.spin_block_names[self.SO]
|
||||
for ik in mpi.slice_array(ikarray):
|
||||
dims = {sp:self.n_orbitals[ik, ntoi[sp]] for sp in spn}
|
||||
dims = {sp: self.n_orbitals[ik, ntoi[sp]] for sp in spn}
|
||||
MMat = [np.zeros([dims[sp], dims[sp]], complex) for sp in spn]
|
||||
|
||||
for isp, sp in enumerate(spn):
|
||||
@ -1498,7 +1523,7 @@ class SumkDFT(object):
|
||||
n_orb = self.n_orbitals[ik, ind]
|
||||
projmat = self.proj_mat[ik, ind, icrsh, 0:dim, 0:n_orb]
|
||||
dens_mat[icrsh][sp] += self.bz_weights[ik] * np.dot(np.dot(projmat, MMat[isp]),
|
||||
projmat.transpose().conjugate())
|
||||
projmat.transpose().conjugate())
|
||||
|
||||
# get data from nodes:
|
||||
for icrsh in range(self.n_corr_shells):
|
||||
@ -1515,14 +1540,14 @@ class SumkDFT(object):
|
||||
for sp in dens_mat[icrsh]:
|
||||
if self.rot_mat_time_inv[icrsh] == 1:
|
||||
dens_mat[icrsh][sp] = dens_mat[icrsh][sp].conjugate()
|
||||
dens_mat[icrsh][sp] = np.dot(np.dot(self.rot_mat[icrsh].conjugate().transpose(), dens_mat[icrsh][sp]),
|
||||
self.rot_mat[icrsh])
|
||||
dens_mat[icrsh][sp] = np.dot(
|
||||
np.dot(self.rot_mat[icrsh].conjugate().transpose(), dens_mat[icrsh][sp]),
|
||||
self.rot_mat[icrsh])
|
||||
|
||||
return dens_mat
|
||||
|
||||
|
||||
def density_matrix(self, method='using_gf', mu=None, with_Sigma=True, with_dc=True, broadening=None,
|
||||
transform_to_solver_blocks=True, show_warnings=True):
|
||||
transform_to_solver_blocks=True, show_warnings=True):
|
||||
"""Calculate density matrices in one of two ways.
|
||||
|
||||
Parameters
|
||||
@ -1562,10 +1587,11 @@ class SumkDFT(object):
|
||||
transform_to_solver_blocks, show_warnings)
|
||||
dens_mat = [G.density() for G in Gloc]
|
||||
elif method == "using_point_integration":
|
||||
warn("WARNING: density_matrix: method 'using_point_integration' is deprecated. Use 'density_matrix_using_point_integration' instead. All additionally provided arguments are ignored.")
|
||||
warn(
|
||||
"WARNING: density_matrix: method 'using_point_integration' is deprecated. Use 'density_matrix_using_point_integration' instead. All additionally provided arguments are ignored.")
|
||||
dens_mat = self.density_matrix_using_point_integration()
|
||||
else:
|
||||
raise ValueError("density_matrix: the method '%s' is not supported." % method)
|
||||
raise ValueError("density_matrix: the method '%s' is not supported." % method)
|
||||
|
||||
return dens_mat
|
||||
|
||||
@ -1622,10 +1648,10 @@ class SumkDFT(object):
|
||||
n_orb = self.n_orbitals[ik, ind]
|
||||
MMat = np.identity(n_orb, complex)
|
||||
MMat = self.hopping[
|
||||
ik, ind, 0:n_orb, 0:n_orb] - (1 - 2 * isp) * self.h_field * MMat
|
||||
ik, ind, 0:n_orb, 0:n_orb] - (1 - 2 * isp) * self.h_field * MMat
|
||||
projmat = self.proj_mat[ik, ind, icrsh, 0:dim, 0:n_orb]
|
||||
self.Hsumk[icrsh][sp] += self.bz_weights[ik] * np.dot(np.dot(projmat, MMat),
|
||||
projmat.conjugate().transpose())
|
||||
projmat.conjugate().transpose())
|
||||
# symmetrisation:
|
||||
if self.symm_op != 0:
|
||||
self.Hsumk = self.symmcorr.symmetrize(self.Hsumk)
|
||||
@ -1637,8 +1663,9 @@ class SumkDFT(object):
|
||||
if self.rot_mat_time_inv[icrsh] == 1:
|
||||
self.Hsumk[icrsh][sp] = self.Hsumk[
|
||||
icrsh][sp].conjugate()
|
||||
self.Hsumk[icrsh][sp] = np.dot(np.dot(self.rot_mat[icrsh].conjugate().transpose(), self.Hsumk[icrsh][sp]),
|
||||
self.rot_mat[icrsh])
|
||||
self.Hsumk[icrsh][sp] = np.dot(
|
||||
np.dot(self.rot_mat[icrsh].conjugate().transpose(), self.Hsumk[icrsh][sp]),
|
||||
self.rot_mat[icrsh])
|
||||
|
||||
# add to matrix:
|
||||
for ish in range(self.n_inequiv_shells):
|
||||
@ -1761,7 +1788,7 @@ class SumkDFT(object):
|
||||
dim //= 2
|
||||
|
||||
if use_dc_value is None:
|
||||
#For legacy compatibility
|
||||
# For legacy compatibility
|
||||
if use_dc_formula == 0:
|
||||
mpi.report(f"Detected {use_dc_formula=}, changing to sFLL")
|
||||
use_dc_formula = "sFLL"
|
||||
@ -1773,7 +1800,8 @@ class SumkDFT(object):
|
||||
use_dc_formula = "sAMF"
|
||||
|
||||
for sp in spn:
|
||||
DC_val, E_val = compute_DC_from_density(N_tot=Ncrtot,U=U_interact, J=J_hund, n_orbitals=dim, N_spin=Ncr[sp], method=use_dc_formula)
|
||||
DC_val, E_val = compute_DC_from_density(N_tot=Ncrtot, U=U_interact, J=J_hund, n_orbitals=dim,
|
||||
N_spin=Ncr[sp], method=use_dc_formula)
|
||||
self.dc_imp[icrsh][sp] *= DC_val
|
||||
self.dc_energ[icrsh] = E_val
|
||||
|
||||
@ -1793,7 +1821,7 @@ class SumkDFT(object):
|
||||
for sp in spn:
|
||||
T = self.block_structure.effective_transformation_sumk[icrsh][sp]
|
||||
self.dc_imp[icrsh][sp] = np.dot(T.conjugate().transpose(),
|
||||
np.dot(self.dc_imp[icrsh][sp], T))
|
||||
np.dot(self.dc_imp[icrsh][sp], T))
|
||||
|
||||
def add_dc(self):
|
||||
r"""
|
||||
@ -1968,9 +1996,11 @@ class SumkDFT(object):
|
||||
beta = 1 / broadening
|
||||
|
||||
if isinstance(self.mesh, MeshReFreq):
|
||||
def tot_den(bgf): return bgf.total_density(beta)
|
||||
def tot_den(bgf):
|
||||
return bgf.total_density(beta)
|
||||
else:
|
||||
def tot_den(bgf): return bgf.total_density()
|
||||
def tot_den(bgf):
|
||||
return bgf.total_density()
|
||||
|
||||
dens = 0.0
|
||||
ikarray = np.array(list(range(self.n_k)))
|
||||
@ -2030,6 +2060,7 @@ class SumkDFT(object):
|
||||
within specified precision.
|
||||
|
||||
"""
|
||||
|
||||
def find_bounds(function, x_init, delta_x, max_loops=1000):
|
||||
mpi.report("Finding bounds on chemical potential")
|
||||
x = x_init
|
||||
@ -2042,12 +2073,12 @@ class SumkDFT(object):
|
||||
|
||||
nbre_loop = 0
|
||||
# abort the loop after maxiter is reached or when y1 and y2 have different sign
|
||||
while (nbre_loop <= max_loops) and (y2*y1) > 0:
|
||||
while (nbre_loop <= max_loops) and (y2 * y1) > 0:
|
||||
nbre_loop += 1
|
||||
x1 = x2
|
||||
y1 = y2
|
||||
|
||||
x2 -= eps*delta_x
|
||||
x2 -= eps * delta_x
|
||||
y2 = function(x2)
|
||||
|
||||
if nbre_loop > (max_loops):
|
||||
@ -2064,8 +2095,11 @@ class SumkDFT(object):
|
||||
|
||||
# previous implementation
|
||||
|
||||
def F_bisection(mu): return self.total_density(mu=mu, broadening=broadening, beta=beta).real
|
||||
def F_bisection(mu):
|
||||
return self.total_density(mu=mu, broadening=broadening, beta=beta).real
|
||||
|
||||
density = self.density_required - self.charge_below
|
||||
|
||||
# using scipy.optimize
|
||||
|
||||
def F_optimize(mu):
|
||||
@ -2119,7 +2153,8 @@ class SumkDFT(object):
|
||||
|
||||
return self.chemical_potential
|
||||
|
||||
def calc_density_correction(self, filename=None, dm_type=None, spinave=False, kpts_to_write=None, broadening=None, beta=None):
|
||||
def calc_density_correction(self, filename=None, dm_type=None, spinave=False, kpts_to_write=None, broadening=None,
|
||||
beta=None):
|
||||
r"""
|
||||
Calculates the charge density correction and stores it into a file.
|
||||
|
||||
@ -2159,12 +2194,12 @@ class SumkDFT(object):
|
||||
the corresponing total charge `dens`.
|
||||
|
||||
"""
|
||||
#automatically set dm_type if required
|
||||
# automatically set dm_type if required
|
||||
if dm_type is None:
|
||||
dm_type = self.dft_code
|
||||
|
||||
assert dm_type in ('vasp', 'wien2k','elk', 'qe'), "'dm_type' must be either 'vasp', 'wienk', 'elk' or 'qe'"
|
||||
#default file names
|
||||
assert dm_type in ('vasp', 'wien2k', 'elk', 'qe'), "'dm_type' must be either 'vasp', 'wienk', 'elk' or 'qe'"
|
||||
# default file names
|
||||
if filename is None:
|
||||
if dm_type == 'wien2k':
|
||||
filename = 'dens_mat.dat'
|
||||
@ -2175,40 +2210,38 @@ class SumkDFT(object):
|
||||
elif dm_type == 'qe':
|
||||
filename = self.hdf_file
|
||||
|
||||
|
||||
assert isinstance(filename, str), ("calc_density_correction: "
|
||||
"filename has to be a string!")
|
||||
"filename has to be a string!")
|
||||
|
||||
assert kpts_to_write is None or dm_type == 'vasp', ('Selecting k-points only'
|
||||
+'implemented for vasp')
|
||||
+ 'implemented for vasp')
|
||||
|
||||
ntoi = self.spin_names_to_ind[self.SO]
|
||||
spn = self.spin_block_names[self.SO]
|
||||
dens = {sp: 0.0 for sp in spn}
|
||||
band_en_correction = 0.0
|
||||
|
||||
# Fetch Fermi weights and energy window band indices
|
||||
if dm_type in ['vasp','qe']:
|
||||
# Fetch Fermi weights and energy window band indices
|
||||
if dm_type in ['vasp', 'qe']:
|
||||
fermi_weights = 0
|
||||
band_window = 0
|
||||
if mpi.is_master_node():
|
||||
with HDFArchive(self.hdf_file,'r') as ar:
|
||||
with HDFArchive(self.hdf_file, 'r') as ar:
|
||||
fermi_weights = ar['dft_misc_input']['dft_fermi_weights']
|
||||
band_window = ar['dft_misc_input']['band_window']
|
||||
fermi_weights = mpi.bcast(fermi_weights)
|
||||
band_window = mpi.bcast(band_window)
|
||||
|
||||
# Convert Fermi weights to a density matrix
|
||||
# Convert Fermi weights to a density matrix
|
||||
dens_mat_dft = {}
|
||||
for sp in spn:
|
||||
dens_mat_dft[sp] = [fermi_weights[ik, ntoi[sp], :].astype(complex) for ik in range(self.n_k)]
|
||||
|
||||
|
||||
# Set up deltaN:
|
||||
deltaN = {}
|
||||
for sp in spn:
|
||||
deltaN[sp] = [np.zeros([self.n_orbitals[ik, ntoi[sp]], self.n_orbitals[
|
||||
ik, ntoi[sp]]], complex) for ik in range(self.n_k)]
|
||||
ik, ntoi[sp]]], complex) for ik in range(self.n_k)]
|
||||
|
||||
ikarray = np.arange(self.n_k)
|
||||
for ik in mpi.slice_array(ikarray):
|
||||
@ -2219,7 +2252,7 @@ class SumkDFT(object):
|
||||
for bname, gf in G_latt:
|
||||
G_latt_rot = gf.copy()
|
||||
G_latt_rot << self.upfold(
|
||||
ik, 0, bname, G_latt[bname], gf,shells='csc')
|
||||
ik, 0, bname, G_latt[bname], gf, shells='csc')
|
||||
|
||||
G_latt[bname] = G_latt_rot.copy()
|
||||
|
||||
@ -2230,23 +2263,25 @@ class SumkDFT(object):
|
||||
dens[bname] += self.bz_weights[ik] * G_latt[bname].total_density()
|
||||
else:
|
||||
dens[bname] += self.bz_weights[ik] * G_latt[bname].total_density(beta)
|
||||
if dm_type in ['vasp','qe']:
|
||||
# In 'vasp'-mode subtract the DFT density matrix
|
||||
if dm_type in ['vasp', 'qe']:
|
||||
# In 'vasp'-mode subtract the DFT density matrix
|
||||
nb = self.n_orbitals[ik, ntoi[bname]]
|
||||
diag_inds = np.diag_indices(nb)
|
||||
deltaN[bname][ik][diag_inds] -= dens_mat_dft[bname][ik][:nb]
|
||||
|
||||
if self.charge_mixing and self.deltaNOld is not None:
|
||||
G2 = np.sum(self.kpts_cart[ik,:]**2)
|
||||
G2 = np.sum(self.kpts_cart[ik, :] ** 2)
|
||||
# Kerker mixing
|
||||
mix_fac = self.charge_mixing_alpha * G2 / (G2 + self.charge_mixing_gamma**2)
|
||||
deltaN[bname][ik][diag_inds] = (1.0 - mix_fac) * self.deltaNOld[bname][ik][diag_inds] + mix_fac * deltaN[bname][ik][diag_inds]
|
||||
mix_fac = self.charge_mixing_alpha * G2 / (G2 + self.charge_mixing_gamma ** 2)
|
||||
deltaN[bname][ik][diag_inds] = (1.0 - mix_fac) * self.deltaNOld[bname][ik][
|
||||
diag_inds] + mix_fac * deltaN[bname][ik][diag_inds]
|
||||
dens[bname] -= self.bz_weights[ik] * dens_mat_dft[bname][ik].sum().real
|
||||
isp = ntoi[bname]
|
||||
b1, b2 = band_window[isp][ik, :2]
|
||||
nb = b2 - b1 + 1
|
||||
assert nb == self.n_orbitals[ik, ntoi[bname]], "Number of bands is inconsistent at ik = %s"%(ik)
|
||||
band_en_correction += np.dot(deltaN[bname][ik], self.hopping[ik, isp, :nb, :nb]).trace().real * self.bz_weights[ik]
|
||||
assert nb == self.n_orbitals[ik, ntoi[bname]], "Number of bands is inconsistent at ik = %s" % (ik)
|
||||
band_en_correction += np.dot(deltaN[bname][ik], self.hopping[ik, isp, :nb, :nb]).trace().real * \
|
||||
self.bz_weights[ik]
|
||||
|
||||
# mpi reduce:
|
||||
for bname in deltaN:
|
||||
@ -2256,8 +2291,6 @@ class SumkDFT(object):
|
||||
self.deltaNOld = copy.copy(deltaN)
|
||||
mpi.barrier()
|
||||
|
||||
|
||||
|
||||
band_en_correction = mpi.all_reduce(band_en_correction)
|
||||
|
||||
# now save to file:
|
||||
@ -2285,9 +2318,9 @@ class SumkDFT(object):
|
||||
for inu in range(self.n_orbitals[ik, 0]):
|
||||
for imu in range(self.n_orbitals[ik, 0]):
|
||||
valre = (deltaN['up'][ik][
|
||||
inu, imu].real + deltaN['down'][ik][inu, imu].real) / 2.0
|
||||
inu, imu].real + deltaN['down'][ik][inu, imu].real) / 2.0
|
||||
valim = (deltaN['up'][ik][
|
||||
inu, imu].imag + deltaN['down'][ik][inu, imu].imag) / 2.0
|
||||
inu, imu].imag + deltaN['down'][ik][inu, imu].imag) / 2.0
|
||||
f.write("%.14f %.14f " % (valre, valim))
|
||||
f.write("\n")
|
||||
f.write("\n")
|
||||
@ -2307,7 +2340,7 @@ class SumkDFT(object):
|
||||
for inu in range(self.n_orbitals[ik, isp]):
|
||||
for imu in range(self.n_orbitals[ik, isp]):
|
||||
fout.write("%.14f %.14f " % (deltaN[sp][ik][
|
||||
inu, imu].real, deltaN[sp][ik][inu, imu].imag))
|
||||
inu, imu].real, deltaN[sp][ik][inu, imu].imag))
|
||||
fout.write("\n")
|
||||
fout.write("\n")
|
||||
fout.close()
|
||||
@ -2321,16 +2354,21 @@ class SumkDFT(object):
|
||||
|
||||
if mpi.is_master_node():
|
||||
with open(filename, 'w') as f:
|
||||
f.write(" %i -1 ! Number of k-points, default number of bands\n"%len(kpts_to_write))
|
||||
f.write(" %i -1 ! Number of k-points, default number of bands\n" % len(kpts_to_write))
|
||||
for index, ik in enumerate(kpts_to_write):
|
||||
ib1 = band_window[0][ik, 0]
|
||||
ib2 = band_window[0][ik, 1]
|
||||
f.write(" %i %i %i\n"%(index + 1, ib1, ib2))
|
||||
f.write(" %i %i %i\n" % (index + 1, ib1, ib2))
|
||||
for inu in range(self.n_orbitals[ik, 0]):
|
||||
for imu in range(self.n_orbitals[ik, 0]):
|
||||
valre = (deltaN['up'][ik][inu, imu].real + deltaN['down'][ik][inu, imu].real) / 2.0
|
||||
valim = (deltaN['up'][ik][inu, imu].imag + deltaN['down'][ik][inu, imu].imag) / 2.0
|
||||
f.write(" %.14f %.14f"%(valre, valim))
|
||||
if (self.SO == 1):
|
||||
valre = (deltaN['ud'][ik][inu, imu].real) / 1.0
|
||||
valim = (deltaN['ud'][ik][inu, imu].imag) / 1.0
|
||||
f.write(" %.14f %.14f" % (valre, valim))
|
||||
else:
|
||||
valre = (deltaN['up'][ik][inu, imu].real + deltaN['down'][ik][inu, imu].real) / 2.0
|
||||
valim = (deltaN['up'][ik][inu, imu].imag + deltaN['down'][ik][inu, imu].imag) / 2.0
|
||||
f.write(" %.14f %.14f" % (valre, valim))
|
||||
f.write("\n")
|
||||
|
||||
if os.path.isfile('vasptriqs.h5'):
|
||||
@ -2342,54 +2380,58 @@ class SumkDFT(object):
|
||||
|
||||
|
||||
elif dm_type == 'elk':
|
||||
# output each k-point density matrix for Elk
|
||||
# output each k-point density matrix for Elk
|
||||
if mpi.is_master_node():
|
||||
# read in misc data from .h5 file
|
||||
things_to_read = ['band_window','vkl','nstsv']
|
||||
# read in misc data from .h5 file
|
||||
things_to_read = ['band_window', 'vkl', 'nstsv']
|
||||
self.subgroup_present, self.value_read = self.read_input_from_hdf(
|
||||
subgrp=self.misc_data, things_to_read=things_to_read)
|
||||
# open file
|
||||
subgrp=self.misc_data, things_to_read=things_to_read)
|
||||
# open file
|
||||
with open(filename, 'w') as f:
|
||||
# determine the number of spin blocks
|
||||
# determine the number of spin blocks
|
||||
n_spin_blocks = self.SP + 1 - self.SO
|
||||
nbmax = np.max(self.n_orbitals)
|
||||
# output beta and mu in Hartrees
|
||||
# output beta and mu in Hartrees
|
||||
beta = self.mesh.beta * self.energy_unit
|
||||
mu = self.chemical_potential/self.energy_unit
|
||||
# ouput n_k, nspin and max orbitals - a check
|
||||
f.write(" %d %d %d %.14f %.14f ! nkpt, nspin, nstmax, beta, mu\n"%(self.n_k, n_spin_blocks, nbmax, beta, mu))
|
||||
mu = self.chemical_potential / self.energy_unit
|
||||
# ouput n_k, nspin and max orbitals - a check
|
||||
f.write(" %d %d %d %.14f %.14f ! nkpt, nspin, nstmax, beta, mu\n" % (
|
||||
self.n_k, n_spin_blocks, nbmax, beta, mu))
|
||||
for ik in range(self.n_k):
|
||||
for ispn in range(n_spin_blocks):
|
||||
#Determine the SO density matrix band indices from the spinor band indices
|
||||
if(self.SO==1):
|
||||
band0=[self.band_window[0][ik, 0],self.band_window[1][ik, 0]]
|
||||
band1=[self.band_window[0][ik, 1],self.band_window[1][ik, 1]]
|
||||
ib1=int(min(band0))
|
||||
ib2=int(max(band1))
|
||||
else:
|
||||
#Determine the density matrix band indices from the spinor band indices
|
||||
ib1 = self.band_window[ispn][ik, 0]
|
||||
ib2 = self.band_window[ispn][ik, 1]
|
||||
f.write(" %d %d %d %d ! ik, ispn, minist, maxist\n"%(ik + 1, ispn + 1, ib1, ib2))
|
||||
for inu in range(self.n_orbitals[ik, ispn]):
|
||||
for imu in range(self.n_orbitals[ik, ispn]):
|
||||
#output non-magnetic or spin-averaged density matrix
|
||||
if((self.SP==0) or (spinave)):
|
||||
valre = (deltaN['up'][ik][inu, imu].real + deltaN['down'][ik][inu, imu].real) / 2.0
|
||||
valim = (deltaN['up'][ik][inu, imu].imag + deltaN['down'][ik][inu, imu].imag) / 2.0
|
||||
else:
|
||||
valre = deltaN[spn[ispn]][ik][inu, imu].real
|
||||
valim = deltaN[spn[ispn]][ik][inu, imu].imag
|
||||
f.write(" %.14f %.14f"%(valre, valim))
|
||||
f.write("\n")
|
||||
for ispn in range(n_spin_blocks):
|
||||
# Determine the SO density matrix band indices from the spinor band indices
|
||||
if (self.SO == 1):
|
||||
band0 = [self.band_window[0][ik, 0], self.band_window[1][ik, 0]]
|
||||
band1 = [self.band_window[0][ik, 1], self.band_window[1][ik, 1]]
|
||||
ib1 = int(min(band0))
|
||||
ib2 = int(max(band1))
|
||||
else:
|
||||
# Determine the density matrix band indices from the spinor band indices
|
||||
ib1 = self.band_window[ispn][ik, 0]
|
||||
ib2 = self.band_window[ispn][ik, 1]
|
||||
f.write(" %d %d %d %d ! ik, ispn, minist, maxist\n" % (ik + 1, ispn + 1, ib1, ib2))
|
||||
for inu in range(self.n_orbitals[ik, ispn]):
|
||||
for imu in range(self.n_orbitals[ik, ispn]):
|
||||
# output non-magnetic or spin-averaged density matrix
|
||||
if ((self.SP == 0) or (spinave)):
|
||||
valre = (deltaN['up'][ik][inu, imu].real + deltaN['down'][ik][
|
||||
inu, imu].real) / 2.0
|
||||
valim = (deltaN['up'][ik][inu, imu].imag + deltaN['down'][ik][
|
||||
inu, imu].imag) / 2.0
|
||||
else:
|
||||
valre = deltaN[spn[ispn]][ik][inu, imu].real
|
||||
valim = deltaN[spn[ispn]][ik][inu, imu].imag
|
||||
f.write(" %.14f %.14f" % (valre, valim))
|
||||
f.write("\n")
|
||||
|
||||
elif dm_type == 'qe':
|
||||
if self.SP == 0:
|
||||
mpi.report("SUMK calc_density_correction: WARNING! Averaging out spin-polarized correction in the density channel")
|
||||
mpi.report(
|
||||
"SUMK calc_density_correction: WARNING! Averaging out spin-polarized correction in the density channel")
|
||||
|
||||
subgrp = 'dft_update'
|
||||
delta_N = np.zeros([self.n_k, max(self.n_orbitals[:,0]), max(self.n_orbitals[:,0])], dtype=complex)
|
||||
mpi.report(" %i -1 ! Number of k-points, default number of bands\n"%(self.n_k))
|
||||
delta_N = np.zeros([self.n_k, max(self.n_orbitals[:, 0]), max(self.n_orbitals[:, 0])], dtype=complex)
|
||||
mpi.report(" %i -1 ! Number of k-points, default number of bands\n" % (self.n_k))
|
||||
for ik in range(self.n_k):
|
||||
ib1 = band_window[0][ik, 0]
|
||||
ib2 = band_window[0][ik, 1]
|
||||
@ -2398,7 +2440,7 @@ class SumkDFT(object):
|
||||
valre = (deltaN['up'][ik][inu, imu].real + deltaN['down'][ik][inu, imu].real) / 2.0
|
||||
valim = (deltaN['up'][ik][inu, imu].imag + deltaN['down'][ik][inu, imu].imag) / 2.0
|
||||
# write into delta_N
|
||||
delta_N[ik, inu, imu] = valre + 1j*valim
|
||||
delta_N[ik, inu, imu] = valre + 1j * valim
|
||||
if mpi.is_master_node():
|
||||
with HDFArchive(self.hdf_file, 'a') as ar:
|
||||
if subgrp not in ar:
|
||||
@ -2409,7 +2451,7 @@ class SumkDFT(object):
|
||||
|
||||
|
||||
else:
|
||||
raise NotImplementedError("Unknown density matrix type: '%s'"%(dm_type))
|
||||
raise NotImplementedError("Unknown density matrix type: '%s'" % (dm_type))
|
||||
|
||||
res = deltaN, dens
|
||||
|
||||
@ -2431,9 +2473,9 @@ class SumkDFT(object):
|
||||
self.max_band_energy = max_band_energy
|
||||
return min_band_energy, max_band_energy
|
||||
|
||||
################
|
||||
# FIXME LEAVE UNDOCUMENTED
|
||||
################
|
||||
################
|
||||
# FIXME LEAVE UNDOCUMENTED
|
||||
################
|
||||
|
||||
def check_projectors(self):
|
||||
"""Calculated the density matrix from projectors (DM = P Pdagger) to check that it is correct and
|
||||
@ -2447,7 +2489,7 @@ class SumkDFT(object):
|
||||
n_orb = self.n_orbitals[ik, 0]
|
||||
projmat = self.proj_mat[ik, 0, icrsh, 0:dim, 0:n_orb]
|
||||
dens_mat[icrsh][
|
||||
:, :] += np.dot(projmat, projmat.transpose().conjugate()) * self.bz_weights[ik]
|
||||
:, :] += np.dot(projmat, projmat.transpose().conjugate()) * self.bz_weights[ik]
|
||||
|
||||
if self.symm_op != 0:
|
||||
dens_mat = self.symmcorr.symmetrize(dens_mat)
|
||||
@ -2458,7 +2500,7 @@ class SumkDFT(object):
|
||||
if self.rot_mat_time_inv[icrsh] == 1:
|
||||
dens_mat[icrsh] = dens_mat[icrsh].conjugate()
|
||||
dens_mat[icrsh] = np.dot(np.dot(self.rot_mat[icrsh].conjugate().transpose(), dens_mat[icrsh]),
|
||||
self.rot_mat[icrsh])
|
||||
self.rot_mat[icrsh])
|
||||
|
||||
return dens_mat
|
||||
|
||||
@ -2482,39 +2524,51 @@ class SumkDFT(object):
|
||||
# after introducing the block_structure class
|
||||
def __get_gf_struct_sumk(self):
|
||||
return self.block_structure.gf_struct_sumk
|
||||
def __set_gf_struct_sumk(self,value):
|
||||
|
||||
def __set_gf_struct_sumk(self, value):
|
||||
self.block_structure.gf_struct_sumk = value
|
||||
gf_struct_sumk = property(__get_gf_struct_sumk,__set_gf_struct_sumk)
|
||||
|
||||
gf_struct_sumk = property(__get_gf_struct_sumk, __set_gf_struct_sumk)
|
||||
|
||||
def __get_gf_struct_solver(self):
|
||||
return self.block_structure.gf_struct_solver
|
||||
def __set_gf_struct_solver(self,value):
|
||||
|
||||
def __set_gf_struct_solver(self, value):
|
||||
self.block_structure.gf_struct_solver = value
|
||||
gf_struct_solver = property(__get_gf_struct_solver,__set_gf_struct_solver)
|
||||
|
||||
gf_struct_solver = property(__get_gf_struct_solver, __set_gf_struct_solver)
|
||||
|
||||
def __get_solver_to_sumk(self):
|
||||
return self.block_structure.solver_to_sumk
|
||||
def __set_solver_to_sumk(self,value):
|
||||
|
||||
def __set_solver_to_sumk(self, value):
|
||||
self.block_structure.solver_to_sumk = value
|
||||
solver_to_sumk = property(__get_solver_to_sumk,__set_solver_to_sumk)
|
||||
|
||||
solver_to_sumk = property(__get_solver_to_sumk, __set_solver_to_sumk)
|
||||
|
||||
def __get_sumk_to_solver(self):
|
||||
return self.block_structure.sumk_to_solver
|
||||
def __set_sumk_to_solver(self,value):
|
||||
|
||||
def __set_sumk_to_solver(self, value):
|
||||
self.block_structure.sumk_to_solver = value
|
||||
sumk_to_solver = property(__get_sumk_to_solver,__set_sumk_to_solver)
|
||||
|
||||
sumk_to_solver = property(__get_sumk_to_solver, __set_sumk_to_solver)
|
||||
|
||||
def __get_solver_to_sumk_block(self):
|
||||
return self.block_structure.solver_to_sumk_block
|
||||
def __set_solver_to_sumk_block(self,value):
|
||||
|
||||
def __set_solver_to_sumk_block(self, value):
|
||||
self.block_structure.solver_to_sumk_block = value
|
||||
solver_to_sumk_block = property(__get_solver_to_sumk_block,__set_solver_to_sumk_block)
|
||||
|
||||
solver_to_sumk_block = property(__get_solver_to_sumk_block, __set_solver_to_sumk_block)
|
||||
|
||||
def __get_deg_shells(self):
|
||||
return self.block_structure.deg_shells
|
||||
def __set_deg_shells(self,value):
|
||||
|
||||
def __set_deg_shells(self, value):
|
||||
self.block_structure.deg_shells = value
|
||||
deg_shells = property(__get_deg_shells,__set_deg_shells)
|
||||
|
||||
deg_shells = property(__get_deg_shells, __set_deg_shells)
|
||||
|
||||
@property
|
||||
def gf_struct_solver_list(self):
|
||||
@ -2534,12 +2588,16 @@ class SumkDFT(object):
|
||||
|
||||
def __get_corr_to_inequiv(self):
|
||||
return self.block_structure.corr_to_inequiv
|
||||
|
||||
def __set_corr_to_inequiv(self, value):
|
||||
self.block_structure.corr_to_inequiv = value
|
||||
|
||||
corr_to_inequiv = property(__get_corr_to_inequiv, __set_corr_to_inequiv)
|
||||
|
||||
def __get_inequiv_to_corr(self):
|
||||
return self.block_structure.inequiv_to_corr
|
||||
|
||||
def __set_inequiv_to_corr(self, value):
|
||||
self.block_structure.inequiv_to_corr = value
|
||||
|
||||
inequiv_to_corr = property(__get_inequiv_to_corr, __set_inequiv_to_corr)
|
||||
|
Loading…
Reference in New Issue
Block a user