3
0
mirror of https://github.com/triqs/dft_tools synced 2024-12-23 04:43:42 +01:00

fixed tests for incorporating kpts

This commit is contained in:
Malte Schüler 2019-11-21 21:34:37 +01:00
parent 7a6450b6fa
commit 6b11183f4d
14 changed files with 214 additions and 31 deletions

View File

@ -123,6 +123,21 @@ class ElectronicStructure:
# Concatenate coordinates grouped by type into one array
self.structure['qcoords'] = np.vstack(vasp_data.poscar.q_types)
self.structure['type_of_ion'] = vasp_data.poscar.type_of_ion
a = []
for ia in range(3):
a.append( vasp_data.poscar.a_brav[:,ia])
vol = np.dot(a[0],np.cross(a[1],a[2]))
b1 = 2.0*np.pi*np.cross(a[1],a[2])/vol
b2 = 2.0*np.pi*np.cross(a[2],a[0])/vol
b3 = 2.0*np.pi*np.cross(a[0],a[1])/vol
b = [b1,b2,b3]
self.kmesh['kpoints_cart'] = 0.0 * self.kmesh['kpoints']
for ik in range(self.nktot):
for ii in range(3):
self.kmesh['kpoints_cart'][ik] += self.kmesh['kpoints'][ik,ii]*b[ii]
# FIXME: This can be removed if ion coordinates are stored in a continuous array
## Construct a map to access coordinates by index
# self.structure['ion_index'] = []

View File

@ -85,6 +85,7 @@ class ConfigParameters:
self.sh_optional = {
'transform': ('tmatrix', lambda s: self.parse_string_tmatrix(s, real=True)),
'transfile': ('tmatrices', self.parse_file_tmatrix),
'sort': ('ion_sort', self.parse_string_int,None),
'corr': ('corr', self.parse_string_logical, True)}
self.gr_required = {
@ -195,6 +196,18 @@ class ConfigParameters:
assert first_char in 'tf', "Logical parameters should be given by either 'True' or 'False'"
return first_char == 't'
################################################################################
#
# parse_string_int()
#
################################################################################
def parse_string_int(self, par_str):
"""
int parameters
"""
return int(par_str)
################################################################################
#
# parse_energy_window()

View File

@ -121,6 +121,7 @@ def generate_plo(conf_pars, el_struct):
print " Number of ions: %i"%(pshell.nion)
print " Dimension : %i"%(pshell.ndim)
print " Correlated : %r"%(pshell.corr)
print " Ion sort : %r"%(pshell.ion_sort)
pshells.append(pshell)
@ -286,6 +287,13 @@ def ctrl_output(conf_pars, el_struct, ng):
tmp1 = "".join(map("{0:15.10f}".format, kp))
out = tmp1 + "{0:16.10f}".format(el_struct.kmesh['kweights'][ik])
f.write(out + "\n")
f.write("# k-points and weights cartesian\n")
labels = ['kx', 'ky', 'kz']
out = "".join(map(lambda s: s.center(15), labels))
f.write("#" + out + "\n")
for ik, kp in enumerate(el_struct.kmesh['kpoints_cart']):
out = "".join(map("{0:15.10f}".format, kp))
f.write(out + "\n")
################################################################################

View File

@ -73,6 +73,7 @@ class ProjectorShell:
self.ions = sh_pars['ions']
self.user_index = sh_pars['user_index']
self.corr = sh_pars['corr']
self.ion_sort = [sh_pars['ion_sort']]
self.nc_flag = nc_flag
# try:
# self.tmatrix = sh_pars['tmatrix']
@ -85,6 +86,8 @@ class ProjectorShell:
self.nion = self.ions['nion']
# Extract ion list and equivalence classes (ion sorts)
self.ion_list = sorted(it.chain(*self.ions['ion_list']))
if self.ion_sort[0] is None:
self.ion_sort = []
for ion in self.ion_list:
for icl, eq_cl in enumerate(self.ions['ion_list']):

View File

@ -123,14 +123,7 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
break
# Tell VASP to stop if the maximum number of iterations is reached
iter += 1
if iter == n_iter:
if mpi.is_master_node():
print "\n Maximum number of iterations reached."
print " Aborting VASP iterations...\n"
f_stop = open('STOPCAR', 'wt')
f_stop.write("LABORT = .TRUE.\n")
f_stop.close()
if debug: print bcolors.MAGENTA + "rank %s"%(mpi.rank) + bcolors.ENDC
err = 0
@ -166,6 +159,7 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
# the hack consists of removing the call of LPRJ_LDApU in VASP src file
# electron.F around line 644
iter_dft = 0
if vasp_version == 'standard':
copyfile(src='GAMMA',dst='GAMMA_recent')
while iter_dft < n_iter_dft:
@ -180,7 +174,13 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
iter_dft += 1
if vasp_version == 'standard':
copyfile(src='GAMMA_recent',dst='GAMMA')
iter += 1
if iter == n_iter:
print "\n Maximum number of iterations reached."
print " Aborting VASP iterations...\n"
f_stop = open('STOPCAR', 'wt')
f_stop.write("LABORT = .TRUE.\n")
f_stop.close()
if mpi.is_master_node():
total_energy = dft_energy + corr_energy - dft_dc
with open('TOTENERGY', 'w') as f:

View File

@ -160,12 +160,16 @@ class VaspConverter(ConverterTools):
SO = ctrl_head['nc_flag']
kpts = numpy.zeros((n_k, 3))
kpts_cart = numpy.zeros((n_k, 3))
bz_weights = numpy.zeros(n_k)
try:
for ik in xrange(n_k):
kx, ky, kz = rf.next(), rf.next(), rf.next()
kpts[ik, :] = kx, ky, kz
bz_weights[ik] = rf.next()
for ik in xrange(n_k):
kx, ky, kz = rf.next(), rf.next(), rf.next()
kpts_cart[ik, :] = kx, ky, kz
except StopIteration:
raise "VaspConverter: error reading %s"%self.ctrl_file
@ -220,12 +224,13 @@ class VaspConverter(ConverterTools):
# TODO: check what 'irep' entry does (it seems to be very specific to dmftproj)
pars['irep'] = 0
shells.append(pars)
shion_to_shell[ish].append(i)
shion_to_shell[ish].append(ish)
shorbs_to_globalorbs[ish].append([last_dimension,
last_dimension+sh['ndim']])
last_dimension = last_dimension+sh['ndim']
if sh['corr']:
corr_shells.append(pars)
print shorbs_to_globalorbs[ish]
# TODO: generalize this to the case of multiple shell groups
@ -336,7 +341,6 @@ class VaspConverter(ConverterTools):
# now save only projectors with flag 'corr' to proj_mat
proj_mat = numpy.zeros([n_k, n_spin_blocs, n_corr_shells, max([crsh['dim'] for crsh in corr_shells]), numpy.max(n_orbitals)], numpy.complex_)
if self.proj_or_hk == 'proj':
for ish, sh in enumerate(p_shells):
if sh['corr']:
@ -379,7 +383,7 @@ class VaspConverter(ConverterTools):
things_to_save = ['energy_unit','n_k','k_dep_projection','SP','SO','charge_below','density_required',
'symm_op','n_shells','shells','n_corr_shells','corr_shells','use_rotations','rot_mat',
'rot_mat_time_inv','n_reps','dim_reps','T','n_orbitals','proj_mat','bz_weights','hopping',
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr','proj_or_hk']
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr','proj_or_hk','kpts','kpts_cart']
if self.proj_or_hk == 'hk' or True:
things_to_save.append('proj_mat_csc')
for it in things_to_save: ar[self.dft_subgrp][it] = locals()[it]

View File

@ -98,7 +98,7 @@ class SumkDFT(object):
things_to_read = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'proj_mat_csc', 'bz_weights', 'hopping',
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr','proj_or_hk']
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr','proj_or_hk','kpts_cart']
self.subgroup_present, self.value_read = self.read_input_from_hdf(
subgrp=self.dft_data, things_to_read=things_to_read)
# if self.proj_or_hk == 'hk':
@ -153,6 +153,13 @@ class SumkDFT(object):
self.chemical_potential = 0.0 # initialise mu
self.init_dc() # initialise the double counting
# charge mixing parameters
self.charge_mixing = False
# defaults from PRB 90 235103 ("... slow but stable convergence ...")
self.charge_mixing_alpha = 0.1
self.charge_mixing_gamma = 1.0
self.deltaNOld = None
# Analyse the block structure and determine the smallest gf_struct
# blocks and maps, if desired
if use_dft_blocks:
@ -321,7 +328,7 @@ class SumkDFT(object):
dim = self.shells[ish]['dim']
projmat = self.proj_mat_all[ik, isp, ish, ir, 0:dim, 0:n_orb]
elif shells == 'csc':
projmat = self.proj_mat_csc[ik, isp, 0:n_orb, 0:n_orb]
projmat = self.proj_mat_csc[ik, isp, :, 0:n_orb]
gf_downfolded.from_L_G_R(
projmat, gf_to_downfold, projmat.conjugate().transpose())
@ -615,6 +622,7 @@ class SumkDFT(object):
for block, inner in self.gf_struct_sumk[icrsh]], make_copies=False)
for icrsh in range(self.n_corr_shells)]
SK_Sigma_imp = self.Sigma_imp_w
else:
raise ValueError, "put_Sigma: This type of Sigma is not handled."
@ -1773,7 +1781,7 @@ class SumkDFT(object):
"""
F = lambda mu: self.total_density(
mu=mu, iw_or_w=iw_or_w, broadening=broadening)
mu=mu, iw_or_w=iw_or_w, broadening=broadening).real
density = self.density_required - self.charge_below
self.chemical_potential = dichotomy.dichotomy(function=F,
@ -1869,6 +1877,12 @@ class SumkDFT(object):
nb = self.n_orbitals[ik, ntoi[bname]]
diag_inds = numpy.diag_indices(nb)
deltaN[bname][ik][diag_inds] -= dens_mat_dft[bname][ik][:nb]
if self.charge_mixing and self.deltaNOld is not None:
G2 = numpy.sum(self.kpts_cart[ik,:]**2)
# Kerker mixing
mix_fac = self.charge_mixing_alpha * G2 / (G2 + self.charge_mixing_gamma**2)
deltaN[bname][ik][diag_inds] = (1.0 - mix_fac) * self.deltaNOld[bname][ik][diag_inds] + mix_fac * deltaN[bname][ik][diag_inds]
dens[bname] -= self.bz_weights[ik] * dens_mat_dft[bname][ik].sum().real
isp = ntoi[bname]
b1, b2 = band_window[isp][ik, :2]
@ -1883,7 +1897,11 @@ class SumkDFT(object):
mpi.world, deltaN[bname][ik], lambda x, y: x + y)
dens[bname] = mpi.all_reduce(
mpi.world, dens[bname], lambda x, y: x + y)
self.deltaNOld = copy.copy(deltaN)
mpi.barrier()
band_en_correction = mpi.all_reduce(mpi.world, band_en_correction, lambda x,y : x+y)
# now save to file:

View File

@ -78,7 +78,6 @@ class SumkDFTTools(SumkDFT):
DOSproj_orb : Dict of numpy arrays
DOS projected to atoms and resolved into orbital contributions.
"""
if (mesh is None) and (not with_Sigma):
raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq."
if mesh is None:
@ -188,6 +187,129 @@ class SumkDFTTools(SumkDFT):
return DOS, DOSproj, DOSproj_orb
def dos_wannier_basis_all(self, mu=None, broadening=None, mesh=None, with_Sigma=True, with_dc=True, save_to_file=True):
"""
Calculates the density of states in the basis of the Wannier functions.
Parameters
----------
mu : double, optional
Chemical potential, overrides the one stored in the hdf5 archive.
broadening : double, optional
Lorentzian broadening of the spectra. If not given, standard value of lattice_gf is used.
mesh : real frequency MeshType, optional
Omega mesh for the real-frequency Green's function. Given as parameter to lattice_gf.
with_Sigma : boolean, optional
If True, the self energy is used for the calculation. If false, the DOS is calculated without self energy.
with_dc : boolean, optional
If True the double counting correction is used.
save_to_file : boolean, optional
If True, text files with the calculated data will be created.
Returns
-------
DOS : Dict of numpy arrays
Contains the full density of states.
DOSproj : Dict of numpy arrays
DOS projected to atoms.
DOSproj_orb : Dict of numpy arrays
DOS projected to atoms and resolved into orbital contributions.
"""
if (mesh is None) and (not with_Sigma):
raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq."
if mesh is None:
om_mesh = [x.real for x in self.Sigma_imp_w[0].mesh]
om_min = om_mesh[0]
om_max = om_mesh[-1]
n_om = len(om_mesh)
mesh = (om_min, om_max, n_om)
else:
om_min, om_max, n_om = mesh
om_mesh = numpy.linspace(om_min, om_max, n_om)
spn = self.spin_block_names[self.SO]
gf_struct_parproj = [[(sp, range(self.shells[ish]['dim'])) for sp in spn]
for ish in range(self.n_shells)]
#print(self.proj_mat_csc.shape[2])
#print(spn)
n_local_orbs = self.proj_mat_csc.shape[2]
gf_struct_parproj_all = [[(sp, range(n_local_orbs)) for sp in spn]]
glist_all = [GfReFreq(indices=inner, window=(om_min, om_max), n_points=n_om)
for block, inner in gf_struct_parproj_all[0]]
G_loc_all = BlockGf(name_list=spn, block_list=glist_all, make_copies=False)
DOS = {sp: numpy.zeros([n_om], numpy.float_)
for sp in self.spin_block_names[self.SO]}
DOSproj = {}
DOSproj_orb = {}
for sp in self.spin_block_names[self.SO]:
dim = n_local_orbs
DOSproj[sp] = numpy.zeros([n_om], numpy.float_)
DOSproj_orb[sp] = numpy.zeros(
[n_om, dim, dim], numpy.complex_)
ikarray = numpy.array(range(self.n_k))
for ik in mpi.slice_array(ikarray):
G_latt_w = self.lattice_gf(
ik=ik, mu=mu, iw_or_w="w", broadening=broadening, mesh=mesh, with_Sigma=with_Sigma, with_dc=with_dc)
G_latt_w *= self.bz_weights[ik]
# Non-projected DOS
for iom in range(n_om):
for bname, gf in G_latt_w:
DOS[bname][iom] -= gf.data[iom, :, :].imag.trace() / \
numpy.pi
# Projected DOS:
for bname, gf in G_latt_w:
G_loc_all[bname] << self.downfold(ik, 0, bname, gf, G_loc_all[bname], shells='csc')
# Collect data from mpi:
for bname in DOS:
DOS[bname] = mpi.all_reduce(
mpi.world, DOS[bname], lambda x, y: x + y)
G_loc_all[bname] << mpi.all_reduce(
mpi.world, G_loc_all[bname], lambda x, y: x + y)
mpi.barrier()
# Symmetrize and rotate to local coord. system if needed:
#if self.symm_op != 0:
# G_loc_all = self.symmcorr.symmetrize(G_loc_all)
# G_loc can now also be used to look at orbitally-resolved quantities
for bname, gf in G_loc_all: # loop over spins
for iom in range(n_om):
DOSproj[bname][iom] -= gf.data[iom,:,:].imag.trace() / numpy.pi
DOSproj_orb[bname][:,:,:] += (1.0j*(gf-gf.conjugate().transpose())/2.0/numpy.pi).data[:,:,:]
# Write to files
if save_to_file and mpi.is_master_node():
for sp in self.spin_block_names[self.SO]:
f = open('DOS_wann_%s.dat' % sp, 'w')
for iom in range(n_om):
f.write("%s %s\n" % (om_mesh[iom], DOS[sp][iom]))
f.close()
# Partial
f = open('DOS_wann_all_%s_proj.dat' % (sp), 'w')
for iom in range(n_om):
f.write("%s %s\n" %
(om_mesh[iom], DOSproj[sp][iom]))
f.close()
# Orbitally-resolved
for i in range(n_local_orbs):
for j in range(i, n_local_orbs):
f = open('DOS_wann_all' + sp + '_proj_' + str(i) + '_' + str(j) + '.dat', 'w')
for iom in range(n_om):
f.write("%s %s %s\n" % (
om_mesh[iom], DOSproj_orb[sp][iom, i, j].real,DOSproj_orb[sp][iom, i, j].imag))
f.close()
return DOS, DOSproj, DOSproj_orb
# Uses .data of only GfReFreq objects.
def dos_parproj_basis(self, mu=None, broadening=None, mesh=None, with_Sigma=True, with_dc=True, save_to_file=True):
"""

Binary file not shown.

Binary file not shown.

View File

@ -78,9 +78,9 @@ class TestParseInput(arraytest.ArrayTestCase):
res = res.replace(" ","") # Remove spaces for comparison
expected = r"""Shells:
[{'ions':{'nion':4,'ion_list':[[4],[5],[6],[7]]},'user_index':1,'lshell':2,'corr':True},{'tmatrix':array([[0.,1.,0.],
[{'ions':{'nion':4,'ion_list':[[4],[5],[6],[7]]},'user_index':1,'lshell':2,'corr':True,'ion_sort':None},{'tmatrix':array([[0.,1.,0.],
[1.,0.,0.],
[0.,0.,1.]]),'ions':{'nion':4,'ion_list':[[0],[1],[2],[3]]},'user_index':2,'lshell':1,'corr':True},{'ions':{'nion':4,'ion_list':[[0],[1],[2],[3]]},'user_index':3,'lshell':3,'corr':True}]
[0.,0.,1.]]),'ions':{'nion':4,'ion_list':[[0],[1],[2],[3]]},'lshell':1,'corr':True,'ion_sort':None,'user_index':2},{'ions':{'nion':4,'ion_list':[[0],[1],[2],[3]]},'user_index':3,'lshell':3,'corr':True,'ion_sort':None}]
Groups:
[{'normalize':True,'index':1,'ewindow':(-7.6,3.0),'shells':[0,1],'complement':False,'normion':True},{'normalize':True,'index':2,'ewindow':(-1.6,2.0),'shells':[2],'complement':False,'normion':True}]"""
@ -103,7 +103,7 @@ Groups:
res = res.replace(" ","") # Remove spaces for comparison
expected = r"""Shells:
[{'ions':{'nion':4,'ion_list':[[4],[5],[6],[7]]},'user_index':1,'lshell':2,'corr':True}]
[{'ions':{'nion':4,'ion_list':[[4],[5],[6],[7]]},'lshell':2,'corr':True,'ion_sort':None,'user_index':1}]
Groups:
[{'normalize':True,'index':'1','ewindow':(-7.6,3.0),'normion':True,'complement':False,'shells':[0]}]"""

View File

@ -59,9 +59,9 @@ class TestParseShells(arraytest.ArrayTestCase):
conf_pars = ConfigParameters(_rpath + 'parse_shells_4.cfg')
conf_pars.parse_shells()
res = conf_pars.shells
expected = [{'user_index': 1, 'lshell': 2, 'ions': {'nion': 4, 'ion_list': [[4],[5],[6],[7]]},'corr': True},
expected = [{'user_index': 1, 'lshell': 2, 'ions': {'nion': 4, 'ion_list': [[4],[5],[6],[7]]},'corr': True,'ion_sort':None},
{'user_index': 2, 'lshell': 1, 'ions': {'nion': 4, 'ion_list': [[0],[1],[2],[3]]},
'tmatrix': np.array([[ 0., 1., 0.], [ 1., 0., 0.], [ 0., 0., 1.]]),'corr': True}]
'tmatrix': np.array([[ 0., 1., 0.], [ 1., 0., 0.], [ 0., 0., 1.]]),'corr': True,'ion_sort':None}]
# ...lousy way to test equality of two dictionaries containing numpy arrays
self.assertEqual(len(res), len(expected))
@ -84,8 +84,8 @@ class TestParseShells(arraytest.ArrayTestCase):
conf_pars = ConfigParameters(_rpath + 'parse_shells_5.cfg')
conf_pars.parse_shells()
res = conf_pars.shells
expected = [{'user_index': 1, 'lshell': 2, 'ions': {'nion': 4, 'ion_list': [[4],[5],[6],[7]]},'corr': True},
{'user_index': 2, 'lshell': 1, 'ions': {'nion': 4, 'ion_list': [[0],[1],[2],[3]]},'corr': False}]
expected = [{'user_index': 1, 'lshell': 2, 'ions': {'nion': 4, 'ion_list': [[4],[5],[6],[7]]},'corr': True,'ion_sort':None},
{'user_index': 2, 'lshell': 1, 'ions': {'nion': 4, 'ion_list': [[0],[1],[2],[3]]},'corr': False,'ion_sort':None}]
self.assertEqual(len(res), len(expected))
arr = res[0].pop('ions')

View File

@ -1,4 +1,4 @@
pars: {'ions': {'nion': 1, 'ion_list': [[1]]}, 'user_index': 1, 'lshell': 2, 'corr': True}
pars: {'ions': {'nion': 1, 'ion_list': [[1]]}, 'lshell': 2, 'corr': True, 'ion_sort': None, 'user_index': 1}
10 25
1 0.000000 -0.000000
2 0.000000 0.000000