3
0
mirror of https://github.com/triqs/dft_tools synced 2024-10-14 03:51:37 +02:00

[py3] Run 2to3 -w -n **/*.py **/*.py.in

This commit is contained in:
Nils Wentzell 2020-04-08 15:35:59 -04:00
parent 8e8ce2b67b
commit 97d4e0b402
59 changed files with 642 additions and 642 deletions

View File

@ -3,6 +3,6 @@ def application_pytriqs_import(name,*args,**kwargs):
name = name[len('@package_name@')+1:] name = name[len('@package_name@')+1:]
return builtin_import(name,*args,**kwargs) return builtin_import(name,*args,**kwargs)
import __builtin__ import builtins
__builtin__.__import__, builtin_import = application_pytriqs_import, __builtin__.__import__ builtins.__import__, builtin_import = application_pytriqs_import, builtins.__import__

View File

@ -22,8 +22,8 @@ extensions = ['sphinx.ext.autodoc',
source_suffix = '.rst' source_suffix = '.rst'
project = u'TRIQS DFTTools' project = 'TRIQS DFTTools'
copyright = u'2011-2019' copyright = '2011-2019'
version = '@DFT_TOOLS_VERSION@' version = '@DFT_TOOLS_VERSION@'
mathjax_path = "@TRIQS_MATHJAX_PATH@/MathJax.js?config=default" mathjax_path = "@TRIQS_MATHJAX_PATH@/MathJax.js?config=default"

View File

@ -111,7 +111,7 @@ for iteration_number in range(1,Loops+1):
#Save essential SumkDFT data: #Save essential SumkDFT data:
SK.save(['chemical_potential','dc_imp','dc_energ','correnerg']) SK.save(['chemical_potential','dc_imp','dc_energ','correnerg'])
if (mpi.is_master_node()): if (mpi.is_master_node()):
print 'DC after solver: ',SK.dc_imp[0] print('DC after solver: ',SK.dc_imp[0])
# print out occupancy matrix of Ce 4f # print out occupancy matrix of Ce 4f
mpi.report("Orbital densities of impurity Green function:") mpi.report("Orbital densities of impurity Green function:")

View File

@ -31,7 +31,7 @@ SK.dc_imp = mpi.bcast(SK.dc_imp)
SK.dc_energ = mpi.bcast(SK.dc_energ) SK.dc_energ = mpi.bcast(SK.dc_energ)
if (mpi.is_master_node()): if (mpi.is_master_node()):
print 'DC after reading SK: ',SK.dc_imp[0] print('DC after reading SK: ',SK.dc_imp[0])
N = SK.corr_shells[0]['dim'] N = SK.corr_shells[0]['dim']
l = SK.corr_shells[0]['l'] l = SK.corr_shells[0]['l']

View File

@ -39,7 +39,7 @@ if mpi.is_master_node():
if not 'Iterations' in ar['DMFT_results']: ar['DMFT_results'].create_group('Iterations') if not 'Iterations' in ar['DMFT_results']: ar['DMFT_results'].create_group('Iterations')
if 'iteration_count' in ar['DMFT_results']: if 'iteration_count' in ar['DMFT_results']:
iteration_offset = ar['DMFT_results']['iteration_count']+1 iteration_offset = ar['DMFT_results']['iteration_count']+1
print('offset',iteration_offset) print(('offset',iteration_offset))
Sigma_iw = ar['DMFT_results']['Iterations']['Sigma_it'+str(iteration_offset-1)] Sigma_iw = ar['DMFT_results']['Iterations']['Sigma_it'+str(iteration_offset-1)]
SK.dc_imp = ar['DMFT_results']['Iterations']['dc_imp'+str(iteration_offset-1)] SK.dc_imp = ar['DMFT_results']['Iterations']['dc_imp'+str(iteration_offset-1)]
SK.dc_energ = ar['DMFT_results']['Iterations']['dc_energ'+str(iteration_offset-1)] SK.dc_energ = ar['DMFT_results']['Iterations']['dc_energ'+str(iteration_offset-1)]
@ -54,13 +54,13 @@ SK.chemical_potential = mpi.bcast(SK.chemical_potential)
SK.put_Sigma(Sigma_imp = [Sigma_iw]) SK.put_Sigma(Sigma_imp = [Sigma_iw])
ikarray = numpy.array(range(SK.n_k)) ikarray = numpy.array(list(range(SK.n_k)))
# set up the orbitally resolved local lattice greens function: # set up the orbitally resolved local lattice greens function:
n_orbs = SK.proj_mat_csc.shape[2] n_orbs = SK.proj_mat_csc.shape[2]
spn = SK.spin_block_names[SK.SO] spn = SK.spin_block_names[SK.SO]
mesh = Sigma_iw.mesh mesh = Sigma_iw.mesh
block_structure = [range(n_orbs) for sp in spn] block_structure = [list(range(n_orbs)) for sp in spn]
gf_struct = [(spn[isp], block_structure[isp]) gf_struct = [(spn[isp], block_structure[isp])
for isp in range(SK.n_spin_blocks[SK.SO])] for isp in range(SK.n_spin_blocks[SK.SO])]
block_ind_list = [block for block, inner in gf_struct] block_ind_list = [block for block, inner in gf_struct]

View File

@ -76,7 +76,7 @@ spin_names = ["up","down"]
orb_names = [i for i in range(n_orb)] orb_names = [i for i in range(n_orb)]
# Use GF structure determined by DFT blocks # Use GF structure determined by DFT blocks
gf_struct = [(block, indices) for block, indices in SK.gf_struct_solver[0].iteritems()] gf_struct = [(block, indices) for block, indices in SK.gf_struct_solver[0].items()]
# Construct Solver # Construct Solver
S = Solver(beta=beta, gf_struct=gf_struct) S = Solver(beta=beta, gf_struct=gf_struct)
@ -97,7 +97,7 @@ if previous_present:
SK.set_dc(dc_imp,dc_energ) SK.set_dc(dc_imp,dc_energ)
for iteration_number in range(1,loops+1): for iteration_number in range(1,loops+1):
if mpi.is_master_node(): print "Iteration = ", iteration_number if mpi.is_master_node(): print("Iteration = ", iteration_number)
SK.symm_deg_gf(S.Sigma_iw,orb=0) # symmetrise Sigma SK.symm_deg_gf(S.Sigma_iw,orb=0) # symmetrise Sigma
SK.set_Sigma([ S.Sigma_iw ]) # set Sigma into the SumK class SK.set_Sigma([ S.Sigma_iw ]) # set Sigma into the SumK class

View File

@ -12,7 +12,7 @@ if 'iteration_count' in ar['DMFT_results']:
tm = TauMaxEnt(cost_function='bryan', probability='normal') tm = TauMaxEnt(cost_function='bryan', probability='normal')
print(G_latt['up'][0,0]) print((G_latt['up'][0,0]))
t2g_orbs = [0,1,3] t2g_orbs = [0,1,3]
eg_orbs = [2,4] eg_orbs = [2,4]
op_orbs = [5,6,7] op_orbs = [5,6,7]
@ -22,7 +22,7 @@ orbs = [t2g_orbs, eg_orbs, op_orbs]
for orb in orbs: for orb in orbs:
print '\n'+str(orb[0])+'\n' print('\n'+str(orb[0])+'\n')
gf = 0*G_latt['up'][0,0] gf = 0*G_latt['up'][0,0]
for iO in orb: for iO in orb:

View File

@ -30,7 +30,7 @@ for i_sh in range(len(SK.deg_shells)):
mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(num_block_deg_orbs, i_sh)) mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(num_block_deg_orbs, i_sh))
for iblock in range(num_block_deg_orbs): for iblock in range(num_block_deg_orbs):
mpi.report('block {0:d} consists of orbitals:'.format(iblock)) mpi.report('block {0:d} consists of orbitals:'.format(iblock))
for keys in SK.deg_shells[i_sh][iblock].keys(): for keys in list(SK.deg_shells[i_sh][iblock].keys()):
mpi.report(' '+keys) mpi.report(' '+keys)
# Setup CTQMC Solver # Setup CTQMC Solver

View File

@ -37,7 +37,7 @@ def dmft_cycle():
mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(num_block_deg_orbs, i_sh)) mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(num_block_deg_orbs, i_sh))
for iblock in range(num_block_deg_orbs): for iblock in range(num_block_deg_orbs):
mpi.report('block {0:d} consists of orbitals:'.format(iblock)) mpi.report('block {0:d} consists of orbitals:'.format(iblock))
for keys in SK.deg_shells[i_sh][iblock].keys(): for keys in list(SK.deg_shells[i_sh][iblock].keys()):
mpi.report(' '+keys) mpi.report(' '+keys)
# Setup CTQMC Solver # Setup CTQMC Solver
@ -176,15 +176,15 @@ def dmft_cycle():
if mpi.is_master_node(): if mpi.is_master_node():
print 'calculating mu...' print('calculating mu...')
SK.chemical_potential = SK.calc_mu( precision = 0.000001 ) SK.chemical_potential = SK.calc_mu( precision = 0.000001 )
if mpi.is_master_node(): if mpi.is_master_node():
print 'calculating GAMMA' print('calculating GAMMA')
SK.calc_density_correction(dm_type='vasp') SK.calc_density_correction(dm_type='vasp')
if mpi.is_master_node(): if mpi.is_master_node():
print 'calculating energy corrections' print('calculating energy corrections')
correnerg = 0.5 * (S.G_iw * S.Sigma_iw).total_density() correnerg = 0.5 * (S.G_iw * S.Sigma_iw).total_density()

View File

@ -20,11 +20,11 @@
# #
########################################################################## ##########################################################################
from sumk_dft import SumkDFT from .sumk_dft import SumkDFT
from symmetry import Symmetry from .symmetry import Symmetry
from block_structure import BlockStructure from .block_structure import BlockStructure
from sumk_dft_tools import SumkDFTTools from .sumk_dft_tools import SumkDFTTools
from converters import * from .converters import *
__all__ = ['SumkDFT', 'Symmetry', 'SumkDFTTools', __all__ = ['SumkDFT', 'Symmetry', 'SumkDFTTools',
'Wien2kConverter', 'HkConverter','BlockStructure'] 'Wien2kConverter', 'HkConverter','BlockStructure']

View File

@ -145,14 +145,14 @@ class BlockStructure(object):
# create new solver_to_sumk # create new solver_to_sumk
so2su={} so2su={}
so2su_block = {} so2su_block = {}
for blk,idxs in gf_struct.items(): for blk,idxs in list(gf_struct.items()):
for i in range(len(idxs)): for i in range(len(idxs)):
so2su[(blk,i)]=self.solver_to_sumk[ish][(blk,idxs[i])] so2su[(blk,i)]=self.solver_to_sumk[ish][(blk,idxs[i])]
so2su_block[blk]=so2su[(blk,i)][0] so2su_block[blk]=so2su[(blk,i)][0]
self.solver_to_sumk[ish] = so2su self.solver_to_sumk[ish] = so2su
self.solver_to_sumk_block[ish] = so2su_block self.solver_to_sumk_block[ish] = so2su_block
# create new sumk_to_solver # create new sumk_to_solver
for k,v in self.sumk_to_solver[ish].items(): for k,v in list(self.sumk_to_solver[ish].items()):
blk,ind=v blk,ind=v
if blk in gf_struct and ind in gf_struct[blk]: if blk in gf_struct and ind in gf_struct[blk]:
new_ind = gf_struct[blk].index(ind) new_ind = gf_struct[blk].index(ind)
@ -161,7 +161,7 @@ class BlockStructure(object):
self.sumk_to_solver[ish][k]=(None,None) self.sumk_to_solver[ish][k]=(None,None)
# reindexing gf_struct so that it starts with 0 # reindexing gf_struct so that it starts with 0
for k in gf_struct: for k in gf_struct:
gf_struct[k]=range(len(gf_struct[k])) gf_struct[k]=list(range(len(gf_struct[k])))
self.gf_struct_solver[ish]=gf_struct self.gf_struct_solver[ish]=gf_struct
def pick_gf_struct_sumk(self,new_gf_struct): def pick_gf_struct_sumk(self,new_gf_struct):
@ -207,7 +207,7 @@ class BlockStructure(object):
# mapping # mapping
for ish in range(len(new_gf_struct)): for ish in range(len(new_gf_struct)):
gfs.append({}) gfs.append({})
for block in new_gf_struct[ish].keys(): for block in list(new_gf_struct[ish].keys()):
for ind in new_gf_struct[ish][block]: for ind in new_gf_struct[ish][block]:
ind_sol = self.sumk_to_solver[ish][(block,ind)] ind_sol = self.sumk_to_solver[ish][(block,ind)]
if not ind_sol[0] in gfs[ish]: if not ind_sol[0] in gfs[ish]:
@ -232,7 +232,7 @@ class BlockStructure(object):
so2su = {} so2su = {}
su2so = {} su2so = {}
so2su_block = {} so2su_block = {}
for frm,to in mapping[ish].iteritems(): for frm,to in mapping[ish].items():
if not to[0] in gf_struct: if not to[0] in gf_struct:
gf_struct[to[0]]=[] gf_struct[to[0]]=[]
gf_struct[to[0]].append(to[1]) gf_struct[to[0]].append(to[1])
@ -247,7 +247,7 @@ class BlockStructure(object):
else: else:
so2su_block[to[0]]=\ so2su_block[to[0]]=\
self.solver_to_sumk_block[ish][frm[0]] self.solver_to_sumk_block[ish][frm[0]]
for k in self.sumk_to_solver[ish].keys(): for k in list(self.sumk_to_solver[ish].keys()):
if not k in su2so: if not k in su2so:
su2so[k] = (None,None) su2so[k] = (None,None)
self.gf_struct_solver[ish]=gf_struct self.gf_struct_solver[ish]=gf_struct
@ -273,7 +273,7 @@ class BlockStructure(object):
blocks blocks
""" """
names = self.gf_struct_solver[ish].keys() names = list(self.gf_struct_solver[ish].keys())
blocks=[] blocks=[]
for n in names: for n in names:
G = gf_function(indices=self.gf_struct_solver[ish][n],**kwargs) G = gf_function(indices=self.gf_struct_solver[ish][n],**kwargs)
@ -315,7 +315,7 @@ class BlockStructure(object):
show_warnings = True show_warnings = True
G_new = self.create_gf(ish=ish,**kwargs) G_new = self.create_gf(ish=ish,**kwargs)
for block in G_struct.gf_struct_solver[ish].keys(): for block in list(G_struct.gf_struct_solver[ish].keys()):
for i1 in G_struct.gf_struct_solver[ish][block]: for i1 in G_struct.gf_struct_solver[ish][block]:
for i2 in G_struct.gf_struct_solver[ish][block]: for i2 in G_struct.gf_struct_solver[ish][block]:
i1_sumk = G_struct.solver_to_sumk[ish][(block,i1)] i1_sumk = G_struct.solver_to_sumk[ish][(block,i1)]
@ -356,7 +356,7 @@ class BlockStructure(object):
self.gf_struct_solver.append({}) self.gf_struct_solver.append({})
self.solver_to_sumk.append({}) self.solver_to_sumk.append({})
self.solver_to_sumk_block.append({}) self.solver_to_sumk_block.append({})
for frm,to in self.sumk_to_solver[ish].iteritems(): for frm,to in self.sumk_to_solver[ish].items():
if to[0] is not None: if to[0] is not None:
self.gf_struct_solver[ish][frm[0]+'_'+str(frm[1])]=[0] self.gf_struct_solver[ish][frm[0]+'_'+str(frm[1])]=[0]
self.sumk_to_solver[ish][frm]=(frm[0]+'_'+str(frm[1]),0) self.sumk_to_solver[ish][frm]=(frm[0]+'_'+str(frm[1]),0)
@ -384,7 +384,7 @@ class BlockStructure(object):
elif isinstance(one,dict): elif isinstance(one,dict):
if set(one.keys()) != set(two.keys()): if set(one.keys()) != set(two.keys()):
return False return False
for k in set(one.keys()).intersection(two.keys()): for k in set(one.keys()).intersection(list(two.keys())):
if not compare(one[k],two[k]): if not compare(one[k],two[k]):
return False return False
return True return True
@ -413,7 +413,7 @@ class BlockStructure(object):
d = [] d = []
for ish in range(len(mapping)): for ish in range(len(mapping)):
d.append({}) d.append({})
for k,v in mapping[ish].iteritems(): for k,v in mapping[ish].items():
d[ish][repr(k)] = repr(v) d[ish][repr(k)] = repr(v)
return d return d
@ -429,7 +429,7 @@ class BlockStructure(object):
d = [] d = []
for ish in range(len(mapping)): for ish in range(len(mapping)):
d.append({}) d.append({})
for k,v in mapping[ish].iteritems(): for k,v in mapping[ish].items():
# literal_eval is a saje alternative to eval # literal_eval is a saje alternative to eval
d[ish][literal_eval(k)] = literal_eval(v) d[ish][literal_eval(k)] = literal_eval(v)
return d return d
@ -450,7 +450,7 @@ class BlockStructure(object):
s+=' shell '+str(ish)+'\n' s+=' shell '+str(ish)+'\n'
def keyfun(el): def keyfun(el):
return '{}_{:05d}'.format(el[0],el[1]) return '{}_{:05d}'.format(el[0],el[1])
keys = sorted(element[ish].keys(),key=keyfun) keys = sorted(list(element[ish].keys()),key=keyfun)
for k in keys: for k in keys:
s+=' '+str(k)+str(element[ish][k])+'\n' s+=' '+str(k)+str(element[ish][k])+'\n'
s += "deg_shells\n" s += "deg_shells\n"
@ -459,7 +459,7 @@ class BlockStructure(object):
for l in range(len(self.deg_shells[ish])): for l in range(len(self.deg_shells[ish])):
s+=' equivalent group '+str(l)+'\n' s+=' equivalent group '+str(l)+'\n'
if isinstance(self.deg_shells[ish][l],dict): if isinstance(self.deg_shells[ish][l],dict):
for key, val in self.deg_shells[ish][l].iteritems(): for key, val in self.deg_shells[ish][l].items():
s+=' '+key+('*' if val[1] else '')+':\n' s+=' '+key+('*' if val[1] else '')+':\n'
s+=' '+str(val[0]).replace('\n','\n ')+'\n' s+=' '+str(val[0]).replace('\n','\n ')+'\n'
else: else:

View File

@ -3,13 +3,13 @@ import sys
import subprocess import subprocess
if len(sys.argv) < 2: if len(sys.argv) < 2:
print "Usage: python clear_h5_output.py archive" print("Usage: python clear_h5_output.py archive")
sys.exit() sys.exit()
print """ print("""
This script is to remove any SumkDFT generated output from the h5 archive This script is to remove any SumkDFT generated output from the h5 archive
and to restore it to the original post-converter state. and to restore it to the original post-converter state.
""" """)
filename = sys.argv[1] filename = sys.argv[1]
A = h5py.File(filename) A = h5py.File(filename)
@ -21,6 +21,6 @@ A.close()
# Repack to reclaim disk space # Repack to reclaim disk space
retcode = subprocess.call(["h5repack", "-i%s" % filename, "-otemphgfrt.h5"]) retcode = subprocess.call(["h5repack", "-i%s" % filename, "-otemphgfrt.h5"])
if retcode != 0: if retcode != 0:
print "h5repack failed!" print("h5repack failed!")
else: else:
subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % filename]) subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % filename])

View File

@ -20,10 +20,10 @@
# #
########################################################################## ##########################################################################
from wien2k_converter import Wien2kConverter from .wien2k_converter import Wien2kConverter
from hk_converter import HkConverter from .hk_converter import HkConverter
from vasp_converter import VaspConverter from .vasp_converter import VaspConverter
from wannier90_converter import Wannier90Converter from .wannier90_converter import Wannier90Converter
__all__ =['Wien2kConverter','HkConverter','Wannier90Converter','VaspConverter'] __all__ =['Wien2kConverter','HkConverter','Wannier90Converter','VaspConverter']

View File

@ -46,9 +46,9 @@ class ConverterTools:
import os.path import os.path
import string import string
if not(os.path.exists(filename)): if not(os.path.exists(filename)):
raise IOError, "File %s does not exist." % filename raise IOError("File %s does not exist." % filename)
for line in open(filename, 'r'): for line in open(filename, 'r'):
for old, new in to_replace.iteritems(): for old, new in to_replace.items():
line = line.replace(old, new) line = line.replace(old, new)
for x in line.split(): for x in line.split():
yield string.atof(x) yield string.atof(x)

View File

@ -25,7 +25,7 @@ import numpy
from pytriqs.archive import * from pytriqs.archive import *
import pytriqs.utility.mpi as mpi import pytriqs.utility.mpi as mpi
from math import sqrt from math import sqrt
from converter_tools import * from .converter_tools import *
class HkConverter(ConverterTools): class HkConverter(ConverterTools):
@ -96,20 +96,20 @@ class HkConverter(ConverterTools):
# the energy conversion factor is 1.0, we assume eV in files # the energy conversion factor is 1.0, we assume eV in files
energy_unit = 1.0 energy_unit = 1.0
# read the number of k points # read the number of k points
n_k = int(R.next()) n_k = int(next(R))
k_dep_projection = 0 k_dep_projection = 0
SP = 0 # no spin-polarision SP = 0 # no spin-polarision
SO = 0 # no spin-orbit SO = 0 # no spin-orbit
# total charge below energy window is set to 0 # total charge below energy window is set to 0
charge_below = 0.0 charge_below = 0.0
# density required, for setting the chemical potential # density required, for setting the chemical potential
density_required = R.next() density_required = next(R)
symm_op = 0 # No symmetry groups for the k-sum symm_op = 0 # No symmetry groups for the k-sum
# the information on the non-correlated shells is needed for # the information on the non-correlated shells is needed for
# defining dimension of matrices: # defining dimension of matrices:
# number of shells considered in the Wanniers # number of shells considered in the Wanniers
n_shells = int(R.next()) n_shells = int(next(R))
# corresponds to index R in formulas # corresponds to index R in formulas
# now read the information about the shells (atom, sort, l, dim): # now read the information about the shells (atom, sort, l, dim):
shell_entries = ['atom', 'sort', 'l', 'dim'] shell_entries = ['atom', 'sort', 'l', 'dim']
@ -117,7 +117,7 @@ class HkConverter(ConverterTools):
shell_entries, R)} for ish in range(n_shells)] shell_entries, R)} for ish in range(n_shells)]
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell, # number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
n_corr_shells = int(R.next()) n_corr_shells = int(next(R))
# corresponds to index R in formulas # corresponds to index R in formulas
# now read the information about the shells (atom, sort, l, dim, SO # now read the information about the shells (atom, sort, l, dim, SO
# flag, irep): # flag, irep):
@ -141,8 +141,8 @@ class HkConverter(ConverterTools):
T = [] T = []
for ish in range(n_inequiv_shells): for ish in range(n_inequiv_shells):
# number of representatives ("subsets"), e.g. t2g and eg # number of representatives ("subsets"), e.g. t2g and eg
n_reps[ish] = int(R.next()) n_reps[ish] = int(next(R))
dim_reps[ish] = [int(R.next()) for i in range( dim_reps[ish] = [int(next(R)) for i in range(
n_reps[ish])] # dimensions of the subsets n_reps[ish])] # dimensions of the subsets
# The transformation matrix: # The transformation matrix:
@ -201,7 +201,7 @@ class HkConverter(ConverterTools):
if (weights_in_file): if (weights_in_file):
# weights in the file # weights in the file
for ik in range(n_k): for ik in range(n_k):
bz_weights[ik] = R.next() bz_weights[ik] = next(R)
# if the sum over spins is in the weights, take it out again!! # if the sum over spins is in the weights, take it out again!!
sm = sum(bz_weights) sm = sum(bz_weights)
@ -222,7 +222,7 @@ class HkConverter(ConverterTools):
else: else:
istart = 0 istart = 0
for j in range(istart, n_orb): for j in range(istart, n_orb):
hopping[ik, isp, i, j] = R.next() hopping[ik, isp, i, j] = next(R)
for i in range(n_orb): for i in range(n_orb):
if (only_upper_triangle): if (only_upper_triangle):
@ -230,7 +230,7 @@ class HkConverter(ConverterTools):
else: else:
istart = 0 istart = 0
for j in range(istart, n_orb): for j in range(istart, n_orb):
hopping[ik, isp, i, j] += R.next() * 1j hopping[ik, isp, i, j] += next(R) * 1j
if ((only_upper_triangle)and(i != j)): if ((only_upper_triangle)and(i != j)):
hopping[ik, isp, j, i] = hopping[ hopping[ik, isp, j, i] = hopping[
ik, isp, i, j].conjugate() ik, isp, i, j].conjugate()
@ -243,8 +243,8 @@ class HkConverter(ConverterTools):
else: else:
istart = 0 istart = 0
for j in range(istart, n_orb): for j in range(istart, n_orb):
hopping[ik, isp, i, j] = R.next() hopping[ik, isp, i, j] = next(R)
hopping[ik, isp, i, j] += R.next() * 1j hopping[ik, isp, i, j] += next(R) * 1j
if ((only_upper_triangle)and(i != j)): if ((only_upper_triangle)and(i != j)):
hopping[ik, isp, j, i] = hopping[ hopping[ik, isp, j, i] = hopping[

View File

@ -36,10 +36,10 @@ r"""
Usage: python converter.py <conf-file> [<path-to-vasp-calculation>] Usage: python converter.py <conf-file> [<path-to-vasp-calculation>]
""" """
import sys import sys
import vaspio from . import vaspio
from inpconf import ConfigParameters from .inpconf import ConfigParameters
from elstruct import ElectronicStructure from .elstruct import ElectronicStructure
from plotools import generate_plo, output_as_text from .plotools import generate_plo, output_as_text
def generate_and_output_as_text(conf_filename, vasp_dir): def generate_and_output_as_text(conf_filename, vasp_dir):
""" """

View File

@ -92,7 +92,7 @@ class ElectronicStructure:
# removed completely. # removed completely.
# if not vasp_data.eigenval.eigs is None: # if not vasp_data.eigenval.eigs is None:
if False: if False:
print "eigvals from EIGENVAL" print("eigvals from EIGENVAL")
self.eigvals = vasp_data.eigenval.eigs self.eigvals = vasp_data.eigenval.eigs
self.ferw = vasp_data.eigenval.ferw.transpose((2, 0, 1)) self.ferw = vasp_data.eigenval.ferw.transpose((2, 0, 1))
@ -102,7 +102,7 @@ class ElectronicStructure:
# Check that the number of band is the same in PROJCAR and EIGENVAL # Check that the number of band is the same in PROJCAR and EIGENVAL
assert nb_plo == self.nband, "PLOCAR is inconsistent with EIGENVAL (number of bands)" assert nb_plo == self.nband, "PLOCAR is inconsistent with EIGENVAL (number of bands)"
else: else:
print "eigvals from LOCPROJ" print("eigvals from LOCPROJ")
self.eigvals = vasp_data.plocar.eigs self.eigvals = vasp_data.plocar.eigs
self.ferw = vasp_data.plocar.ferw.transpose((2, 0, 1)) self.ferw = vasp_data.plocar.ferw.transpose((2, 0, 1))
self.efermi = vasp_data.doscar.efermi self.efermi = vasp_data.doscar.efermi
@ -163,8 +163,8 @@ class ElectronicStructure:
overlap = np.zeros((ns, nproj, nproj), dtype=np.float64) overlap = np.zeros((ns, nproj, nproj), dtype=np.float64)
# ov_min = np.ones((ns, nproj, nproj), dtype=np.float64) * 100.0 # ov_min = np.ones((ns, nproj, nproj), dtype=np.float64) * 100.0
# ov_max = np.zeros((ns, nproj, nproj), dtype=np.float64) # ov_max = np.zeros((ns, nproj, nproj), dtype=np.float64)
for ispin in xrange(ns): for ispin in range(ns):
for ik in xrange(nk): for ik in range(nk):
kweight = self.kmesh['kweights'][ik] kweight = self.kmesh['kweights'][ik]
occ = self.ferw[ispin, ik, :] occ = self.ferw[ispin, ik, :]
den_mat[ispin, :, :] += np.dot(plo[:, ispin, ik, :] * occ, plo[:, ispin, ik, :].T.conj()).real * kweight * sp_fac den_mat[ispin, :, :] += np.dot(plo[:, ispin, ik, :] * occ, plo[:, ispin, ik, :].T.conj()).real * kweight * sp_fac
@ -174,12 +174,12 @@ class ElectronicStructure:
# ov_min = np.minimum(ov, ov_min) # ov_min = np.minimum(ov, ov_min)
# Output only the site-diagonal parts of the matrices # Output only the site-diagonal parts of the matrices
print print()
print " Unorthonormalized density matrices and overlaps:" print(" Unorthonormalized density matrices and overlaps:")
for ispin in xrange(ns): for ispin in range(ns):
print " Spin:", ispin + 1 print(" Spin:", ispin + 1)
for io, ion in enumerate(ions): for io, ion in enumerate(ions):
print " Site:", ion print(" Site:", ion)
iorb_inds = [(ip, param['m']) for ip, param in enumerate(self.proj_params) if param['isite'] == ion] iorb_inds = [(ip, param['m']) for ip, param in enumerate(self.proj_params) if param['isite'] == ion]
norb = len(iorb_inds) norb = len(iorb_inds)
dm = np.zeros((norb, norb)) dm = np.zeros((norb, norb))
@ -189,9 +189,9 @@ class ElectronicStructure:
dm[iorb, iorb2] = den_mat[ispin, ind, ind2] dm[iorb, iorb2] = den_mat[ispin, ind, ind2]
ov[iorb, iorb2] = overlap[ispin, ind, ind2] ov[iorb, iorb2] = overlap[ispin, ind, ind2]
print " Density matrix" + (12*norb - 12 + 2)*" " + "Overlap" print(" Density matrix" + (12*norb - 12 + 2)*" " + "Overlap")
for drow, dov in zip(dm, ov): for drow, dov in zip(dm, ov):
out = ''.join(map("{0:12.7f}".format, drow)) out = ''.join(map("{0:12.7f}".format, drow))
out += " " out += " "
out += ''.join(map("{0:12.7f}".format, dov)) out += ''.join(map("{0:12.7f}".format, dov))
print out print(out)

View File

@ -29,20 +29,20 @@ r"""
Module for parsing and checking an input config-file. Module for parsing and checking an input config-file.
""" """
import ConfigParser import configparser
import numpy as np import numpy as np
import re import re
import sys import sys
import itertools as it import itertools as it
import vaspio from . import vaspio
def issue_warning(message): def issue_warning(message):
""" """
Issues a warning. Issues a warning.
""" """
print print()
print " !!! WARNING !!!: " + message print(" !!! WARNING !!!: " + message)
print print()
################################################################################ ################################################################################
################################################################################ ################################################################################
@ -73,7 +73,7 @@ class ConfigParameters:
################################################################################ ################################################################################
def __init__(self, input_filename, verbosity=1): def __init__(self, input_filename, verbosity=1):
self.verbosity = verbosity self.verbosity = verbosity
self.cp = ConfigParser.SafeConfigParser() self.cp = configparser.SafeConfigParser()
self.cp.readfp(open(input_filename, 'r')) self.cp.readfp(open(input_filename, 'r'))
self.parameters = {} self.parameters = {}
@ -89,7 +89,7 @@ class ConfigParameters:
'corr': ('corr', self.parse_string_logical, True)} 'corr': ('corr', self.parse_string_logical, True)}
self.gr_required = { self.gr_required = {
'shells': ('shells', lambda s: map(int, s.split())), 'shells': ('shells', lambda s: list(map(int, s.split()))),
'ewindow': ('ewindow', self.parse_energy_window)} 'ewindow': ('ewindow', self.parse_energy_window)}
self.gr_optional = { self.gr_optional = {
@ -142,7 +142,7 @@ class ConfigParameters:
else: else:
# Check if a set of indices is given # Check if a set of indices is given
try: try:
l_tmp = map(int, par_str.split()) l_tmp = list(map(int, par_str.split()))
l_tmp.sort() l_tmp.sort()
# Subtract 1 so that VASP indices (starting with 1) are converted # Subtract 1 so that VASP indices (starting with 1) are converted
# to Python indices (starting with 0) # to Python indices (starting with 0)
@ -160,7 +160,7 @@ class ConfigParameters:
ion_list = [] ion_list = []
nion = 0 nion = 0
for cl in classes: for cl in classes:
ions = map(int, re.findall(patt2, cl)) ions = list(map(int, re.findall(patt2, cl)))
ion_list.append([ion - 1 for ion in ions]) ion_list.append([ion - 1 for ion in ions])
nion += len(ions) nion += len(ions)
@ -218,7 +218,7 @@ class ConfigParameters:
Energy window is given by two floats, with the first one being smaller Energy window is given by two floats, with the first one being smaller
than the second one. than the second one.
""" """
ftmp = map(float, par_str.split()) ftmp = list(map(float, par_str.split()))
assert len(ftmp) == 2, "EWINDOW must be specified by exactly two floats" assert len(ftmp) == 2, "EWINDOW must be specified by exactly two floats"
assert ftmp[0] < ftmp[1], "The first float in EWINDOW must be smaller than the second one" assert ftmp[0] < ftmp[1], "The first float in EWINDOW must be smaller than the second one"
return tuple(ftmp) return tuple(ftmp)
@ -233,7 +233,7 @@ class ConfigParameters:
Band window is given by two ints, with the first one being smaller Band window is given by two ints, with the first one being smaller
than the second one. than the second one.
""" """
ftmp = map(int, par_str.split()) ftmp = list(map(int, par_str.split()))
assert len(ftmp) == 2, "BANDS must be specified by exactly two ints" assert len(ftmp) == 2, "BANDS must be specified by exactly two ints"
assert ftmp[0] < ftmp[1], "The first int in BANDS must be smaller than the second one" assert ftmp[0] < ftmp[1], "The first int in BANDS must be smaller than the second one"
return tuple(ftmp) return tuple(ftmp)
@ -250,7 +250,7 @@ class ConfigParameters:
""" """
str_rows = par_str.split('\n') str_rows = par_str.split('\n')
try: try:
rows = [map(float, s.split()) for s in str_rows] rows = [list(map(float, s.split())) for s in str_rows]
except ValueError: except ValueError:
err_mess = "Cannot parse a matrix string:\n%s"%(par_str) err_mess = "Cannot parse a matrix string:\n%s"%(par_str)
raise ValueError(err_mess) raise ValueError(err_mess)
@ -339,11 +339,11 @@ class ConfigParameters:
For required parameters `exception=True` must be set. For required parameters `exception=True` must be set.
""" """
parsed = {} parsed = {}
for par in param_set.keys(): for par in list(param_set.keys()):
key = param_set[par][0] key = param_set[par][0]
try: try:
par_str = self.cp.get(section, par) par_str = self.cp.get(section, par)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): except (configparser.NoOptionError, configparser.NoSectionError):
if exception: if exception:
message = "Required parameter '%s' not found in section [%s]"%(par, section) message = "Required parameter '%s' not found in section [%s]"%(par, section)
raise Exception(message) raise Exception(message)
@ -354,7 +354,7 @@ class ConfigParameters:
continue continue
if self.verbosity > 0: if self.verbosity > 0:
print " %s = %s"%(par, par_str) print(" %s = %s"%(par, par_str))
parse_fun = param_set[par][1] parse_fun = param_set[par][1]
parsed[key] = parse_fun(par_str) parsed[key] = parse_fun(par_str)
@ -376,23 +376,23 @@ class ConfigParameters:
sections = self.cp.sections() sections = self.cp.sections()
sh_patt1 = re.compile('shell +.*', re.IGNORECASE) sh_patt1 = re.compile('shell +.*', re.IGNORECASE)
sec_shells = filter(sh_patt1.match, sections) sec_shells = list(filter(sh_patt1.match, sections))
self.nshells = len(sec_shells) self.nshells = len(sec_shells)
assert self.nshells > 0, "No projected shells found in the input file" assert self.nshells > 0, "No projected shells found in the input file"
if self.verbosity > 0: if self.verbosity > 0:
print print()
if self.nshells > 1: if self.nshells > 1:
print " Found %i projected shells"%(self.nshells) print(" Found %i projected shells"%(self.nshells))
else: else:
print " Found 1 projected shell" print(" Found 1 projected shell")
# Get shell indices # Get shell indices
sh_patt2 = re.compile('shell +([0-9]*)$', re.IGNORECASE) sh_patt2 = re.compile('shell +([0-9]*)$', re.IGNORECASE)
try: try:
get_ind = lambda s: int(sh_patt2.match(s).groups()[0]) get_ind = lambda s: int(sh_patt2.match(s).groups()[0])
sh_inds = map(get_ind, sec_shells) sh_inds = list(map(get_ind, sec_shells))
except (ValueError, AttributeError): except (ValueError, AttributeError):
raise ValueError("Failed to extract shell indices from a list: %s"%(sec_shells)) raise ValueError("Failed to extract shell indices from a list: %s"%(sec_shells))
@ -405,7 +405,7 @@ class ConfigParameters:
# Ideally, indices should run from 1 to <nshells> # Ideally, indices should run from 1 to <nshells>
# If it's not the case, issue a warning # If it's not the case, issue a warning
sh_inds.sort() sh_inds.sort()
if sh_inds != range(1, len(sh_inds) + 1): if sh_inds != list(range(1, len(sh_inds) + 1)):
issue_warning("Shell indices are not uniform or not starting from 1. " issue_warning("Shell indices are not uniform or not starting from 1. "
"This might be an indication of a incorrect setup.") "This might be an indication of a incorrect setup.")
@ -418,8 +418,8 @@ class ConfigParameters:
section = self.sh_sections[ind] section = self.sh_sections[ind]
if self.verbosity > 0: if self.verbosity > 0:
print print()
print " Shell parameters:" print(" Shell parameters:")
# Shell required parameters # Shell required parameters
parsed = self.parse_parameter_set(section, self.sh_required, exception=True) parsed = self.parse_parameter_set(section, self.sh_required, exception=True)
shell.update(parsed) shell.update(parsed)
@ -453,7 +453,7 @@ class ConfigParameters:
sections = self.cp.sections() sections = self.cp.sections()
gr_patt = re.compile('group +(.*)', re.IGNORECASE) gr_patt = re.compile('group +(.*)', re.IGNORECASE)
sec_groups = filter(gr_patt.match, sections) sec_groups = list(filter(gr_patt.match, sections))
self.ngroups = len(sec_groups) self.ngroups = len(sec_groups)
@ -471,8 +471,8 @@ class ConfigParameters:
group['index'] = gr_ind group['index'] = gr_ind
if self.verbosity > 0: if self.verbosity > 0:
print print()
print " Group parameters:" print(" Group parameters:")
# Group required parameters # Group required parameters
parsed = self.parse_parameter_set(section, self.gr_required, exception=True) parsed = self.parse_parameter_set(section, self.gr_required, exception=True)
group.update(parsed) group.update(parsed)
@ -514,18 +514,18 @@ class ConfigParameters:
sh_gr_required = dict(self.gr_required) sh_gr_required = dict(self.gr_required)
sh_gr_required.pop('shells') sh_gr_required.pop('shells')
try: try:
for par in sh_gr_required.keys(): for par in list(sh_gr_required.keys()):
key = sh_gr_required[par][0] key = sh_gr_required[par][0]
value = self.shells[0].pop(key) value = self.shells[0].pop(key)
self.groups[0][key] = value self.groups[0][key] = value
except KeyError: except KeyError:
message = "One [Shell] section is specified but no explicit [Group] section is provided." message = "One [Shell] section is specified but no explicit [Group] section is provided."
message += " In this case the [Shell] section must contain all required group information.\n" message += " In this case the [Shell] section must contain all required group information.\n"
message += " Required parameters are: %s"%(sh_gr_required.keys()) message += " Required parameters are: %s"%(list(sh_gr_required.keys()))
raise KeyError(message) raise KeyError(message)
# Do the same for optional group parameters, but do not raise an exception # Do the same for optional group parameters, but do not raise an exception
for par in self.gr_optional.keys(): for par in list(self.gr_optional.keys()):
try: try:
key = self.gr_optional[par][0] key = self.gr_optional[par][0]
value = self.shells[0].pop(key) value = self.shells[0].pop(key)
@ -562,7 +562,7 @@ class ConfigParameters:
# remove them and issue a warning. # remove them and issue a warning.
# #
# First, required group parameters # First, required group parameters
for par in self.gr_required.keys(): for par in list(self.gr_required.keys()):
try: try:
key = self.gr_required[par][0] key = self.gr_required[par][0]
value = shell.pop(key) value = shell.pop(key)
@ -573,7 +573,7 @@ class ConfigParameters:
continue continue
# Second, optional group parameters # Second, optional group parameters
for par in self.gr_optional.keys(): for par in list(self.gr_optional.keys()):
try: try:
key = self.gr_optional[par][0] key = self.gr_optional[par][0]
value = shell.pop(key) value = shell.pop(key)
@ -591,7 +591,7 @@ class ConfigParameters:
sh_refs_used.sort() sh_refs_used.sort()
# Check that all shells are referenced in the groups # Check that all shells are referenced in the groups
assert sh_refs_used == range(self.nshells), "Some shells are not inside any of the groups" assert sh_refs_used == list(range(self.nshells)), "Some shells are not inside any of the groups"
################################################################################ ################################################################################
@ -605,7 +605,7 @@ class ConfigParameters:
""" """
self.general = {} self.general = {}
sections = self.cp.sections() sections = self.cp.sections()
gen_section = filter(lambda s: s.lower() == 'general', sections) gen_section = [s for s in sections if s.lower() == 'general']
# If no [General] section is found parse a dummy section name to the parser # If no [General] section is found parse a dummy section name to the parser
# to reset parameters to their default values # to reset parameters to their default values
if len(gen_section) > 1: if len(gen_section) > 1:

View File

@ -55,9 +55,9 @@ r"""
""" """
import itertools as it import itertools as it
import numpy as np import numpy as np
from proj_group import ProjectorGroup from .proj_group import ProjectorGroup
from proj_shell import ProjectorShell from .proj_shell import ProjectorShell
from proj_shell import ComplementShell from .proj_shell import ComplementShell
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
@ -71,9 +71,9 @@ def issue_warning(message):
""" """
Issues a warning. Issues a warning.
""" """
print print()
print " !!! WARNING !!!: " + message print(" !!! WARNING !!!: " + message)
print print()
################################################################################ ################################################################################
# check_data_consistency() # check_data_consistency()
@ -129,18 +129,18 @@ def generate_plo(conf_pars, el_struct):
# check if at least one shell is correlated # check if at least one shell is correlated
assert np.any([shell['corr'] for shell in conf_pars.shells]), 'at least one shell has be CORR = True' assert np.any([shell['corr'] for shell in conf_pars.shells]), 'at least one shell has be CORR = True'
nshell = len(conf_pars.shells) nshell = len(conf_pars.shells)
print print()
print " Generating %i shell%s..."%(nshell, '' if nshell == 1 else 's') print(" Generating %i shell%s..."%(nshell, '' if nshell == 1 else 's'))
pshells = [] pshells = []
for sh_par in conf_pars.shells: for sh_par in conf_pars.shells:
pshell = ProjectorShell(sh_par, proj_raw, el_struct.proj_params, el_struct.kmesh, el_struct.structure, el_struct.nc_flag) pshell = ProjectorShell(sh_par, proj_raw, el_struct.proj_params, el_struct.kmesh, el_struct.structure, el_struct.nc_flag)
print print()
print " Shell : %s"%(pshell.user_index) print(" Shell : %s"%(pshell.user_index))
print " Orbital l : %i"%(pshell.lorb) print(" Orbital l : %i"%(pshell.lorb))
print " Number of ions: %i"%(pshell.nion) print(" Number of ions: %i"%(pshell.nion))
print " Dimension : %i"%(pshell.ndim) print(" Dimension : %i"%(pshell.ndim))
print " Correlated : %r"%(pshell.corr) print(" Correlated : %r"%(pshell.corr))
print " Ion sort : %r"%(pshell.ion_sort) print(" Ion sort : %r"%(pshell.ion_sort))
pshells.append(pshell) pshells.append(pshell)
@ -157,45 +157,45 @@ def generate_plo(conf_pars, el_struct):
#with HDFArchive(testout, 'w') as h5test: #with HDFArchive(testout, 'w') as h5test:
# h5test['hk'] = pgroup.hk # h5test['hk'] = pgroup.hk
# DEBUG output # DEBUG output
print "Density matrix:" print("Density matrix:")
nimp = 0.0 nimp = 0.0
ov_all = [] ov_all = []
for ish in pgroup.ishells: for ish in pgroup.ishells:
if not isinstance(pshells[pgroup.ishells[ish]],ComplementShell): if not isinstance(pshells[pgroup.ishells[ish]],ComplementShell):
print " Shell %i"%(ish + 1) print(" Shell %i"%(ish + 1))
dm_all, ov_all_ = pshells[ish].density_matrix(el_struct) dm_all, ov_all_ = pshells[ish].density_matrix(el_struct)
ov_all.append(ov_all_[0]) ov_all.append(ov_all_[0])
spin_fac = 2 if dm_all.shape[0] == 1 else 1 spin_fac = 2 if dm_all.shape[0] == 1 else 1
for io in xrange(dm_all.shape[1]): for io in range(dm_all.shape[1]):
print " Site %i"%(io + 1) print(" Site %i"%(io + 1))
dm = spin_fac * dm_all[:, io, : ,:].sum(0) dm = spin_fac * dm_all[:, io, : ,:].sum(0)
for row in dm: for row in dm:
print ''.join(map("{0:14.7f}".format, row)) print(''.join(map("{0:14.7f}".format, row)))
ndm = dm.trace() ndm = dm.trace()
if pshells[ish].corr: if pshells[ish].corr:
nimp += ndm nimp += ndm
print " trace: ", ndm print(" trace: ", ndm)
print print()
print " Impurity density:", nimp print(" Impurity density:", nimp)
print print()
print "Overlap:" print("Overlap:")
for io, ov in enumerate(ov_all): for io, ov in enumerate(ov_all):
print " Site %i"%(io + 1) print(" Site %i"%(io + 1))
print ov[0,...] print(ov[0,...])
print print()
print "Local Hamiltonian:" print("Local Hamiltonian:")
for ish in pgroup.ishells: for ish in pgroup.ishells:
if not isinstance(pshells[pgroup.ishells[ish]],ComplementShell): if not isinstance(pshells[pgroup.ishells[ish]],ComplementShell):
print " Shell %i"%(ish + 1) print(" Shell %i"%(ish + 1))
loc_ham = pshells[pgroup.ishells[ish]].local_hamiltonian(el_struct) loc_ham = pshells[pgroup.ishells[ish]].local_hamiltonian(el_struct)
for io in xrange(loc_ham.shape[1]): for io in range(loc_ham.shape[1]):
print " Site %i"%(io + 1) print(" Site %i"%(io + 1))
for row in loc_ham[:, io, :, :].sum(0): for row in loc_ham[:, io, :, :].sum(0):
print ''.join(map("{0:14.7f}".format, row)) print(''.join(map("{0:14.7f}".format, row)))
# END DEBUG output # END DEBUG output
if 'dosmesh' in conf_pars.general: if 'dosmesh' in conf_pars.general:
print print()
print "Evaluating DOS..." print("Evaluating DOS...")
mesh_pars = conf_pars.general['dosmesh'] mesh_pars = conf_pars.general['dosmesh']
if np.isnan(mesh_pars['emin']): if np.isnan(mesh_pars['emin']):
dos_emin = pgroup.emin dos_emin = pgroup.emin
@ -208,12 +208,12 @@ def generate_plo(conf_pars, el_struct):
emesh = np.linspace(dos_emin, dos_emax, n_points) emesh = np.linspace(dos_emin, dos_emax, n_points)
for ish in pgroup.ishells: for ish in pgroup.ishells:
if not isinstance(pshells[pgroup.ishells[ish]],ComplementShell) or True: if not isinstance(pshells[pgroup.ishells[ish]],ComplementShell) or True:
print " Shell %i"%(ish + 1) print(" Shell %i"%(ish + 1))
dos = pshells[pgroup.ishells[ish]].density_of_states(el_struct, emesh) dos = pshells[pgroup.ishells[ish]].density_of_states(el_struct, emesh)
de = emesh[1] - emesh[0] de = emesh[1] - emesh[0]
ntot = (dos[1:,...] + dos[:-1,...]).sum(0) / 2 * de ntot = (dos[1:,...] + dos[:-1,...]).sum(0) / 2 * de
print " Total number of states:", ntot print(" Total number of states:", ntot)
for io in xrange(dos.shape[2]): for io in range(dos.shape[2]):
np.savetxt('pdos_%i_%i.dat'%(ish,io), np.vstack((emesh.T, dos[:, 0, io, :].T)).T) np.savetxt('pdos_%i_%i.dat'%(ish,io), np.vstack((emesh.T, dos[:, 0, io, :].T)).T)
pgroups.append(pgroup) pgroups.append(pgroup)
@ -254,7 +254,7 @@ def kpoints_output(basename, el_struct):
f.write("%i\n"%(nktot)) f.write("%i\n"%(nktot))
# TODO: add the output of reciprocal lattice vectors # TODO: add the output of reciprocal lattice vectors
f.write("# List of k-points with weights\n") f.write("# List of k-points with weights\n")
for ik in xrange(nktot): for ik in range(nktot):
kx, ky, kz = kmesh['kpoints'][ik, :] kx, ky, kz = kmesh['kpoints'][ik, :]
kwght = kmesh['kweights'][ik] kwght = kmesh['kweights'][ik]
f.write("%15.10f%15.10f%15.10f%20.10f\n"%(kx, ky, kz, kwght)) f.write("%15.10f%15.10f%15.10f%20.10f\n"%(kx, ky, kz, kwght))
@ -266,7 +266,7 @@ def kpoints_output(basename, el_struct):
f.write("\n# Number of tetrahedra and volume: ntet, volt\n") f.write("\n# Number of tetrahedra and volume: ntet, volt\n")
f.write("%i %s\n"%(ntet, volt)) f.write("%i %s\n"%(ntet, volt))
f.write("# List of tetrahedra: imult, ik1, ..., ik4\n") f.write("# List of tetrahedra: imult, ik1, ..., ik4\n")
for it in xrange(ntet): for it in range(ntet):
f.write(' '.join(map("{0:d}".format, *kmesh['itet'][it, :])) + '\n') f.write(' '.join(map("{0:d}".format, *kmesh['itet'][it, :])) + '\n')
except KeyError: except KeyError:
pass pass
@ -315,14 +315,14 @@ def ctrl_output(conf_pars, el_struct, ng):
header = json.dumps(head_dict, indent=4, separators=(',', ': ')) header = json.dumps(head_dict, indent=4, separators=(',', ': '))
print " Storing ctrl-file..." print(" Storing ctrl-file...")
with open(ctrl_fname, 'wt') as f: with open(ctrl_fname, 'wt') as f:
f.write(header + "\n") f.write(header + "\n")
f.write("#END OF HEADER\n") f.write("#END OF HEADER\n")
f.write("# k-points and weights\n") f.write("# k-points and weights\n")
labels = ['kx', 'ky', 'kz', 'kweight'] labels = ['kx', 'ky', 'kz', 'kweight']
out = "".join(map(lambda s: s.center(15), labels)) out = "".join([s.center(15) for s in labels])
f.write("#" + out + "\n") f.write("#" + out + "\n")
for ik, kp in enumerate(el_struct.kmesh['kpoints']): for ik, kp in enumerate(el_struct.kmesh['kpoints']):
tmp1 = "".join(map("{0:15.10f}".format, kp)) tmp1 = "".join(map("{0:15.10f}".format, kp))
@ -330,7 +330,7 @@ def ctrl_output(conf_pars, el_struct, ng):
f.write(out + "\n") f.write(out + "\n")
f.write("# k-points and weights cartesian\n") f.write("# k-points and weights cartesian\n")
labels = ['kx', 'ky', 'kz'] labels = ['kx', 'ky', 'kz']
out = "".join(map(lambda s: s.center(15), labels)) out = "".join([s.center(15) for s in labels])
f.write("#" + out + "\n") f.write("#" + out + "\n")
for ik, kp in enumerate(el_struct.kmesh['kpoints_cart']): for ik, kp in enumerate(el_struct.kmesh['kpoints_cart']):
out = "".join(map("{0:15.10f}".format, kp)) out = "".join(map("{0:15.10f}".format, kp))
@ -381,7 +381,7 @@ def plo_output(conf_pars, el_struct, pshells, pgroups):
""" """
for ig, pgroup in enumerate(pgroups): for ig, pgroup in enumerate(pgroups):
plo_fname = conf_pars.general['basename'] + '.pg%i'%(ig + 1) plo_fname = conf_pars.general['basename'] + '.pg%i'%(ig + 1)
print " Storing PLO-group file '%s'..."%(plo_fname) print(" Storing PLO-group file '%s'..."%(plo_fname))
head_dict = {} head_dict = {}
@ -394,7 +394,7 @@ def plo_output(conf_pars, el_struct, pshells, pgroups):
# Number of electrons within the window # Number of electrons within the window
head_dict['nelect'] = pgroup.nelect_window(el_struct) head_dict['nelect'] = pgroup.nelect_window(el_struct)
print " Density within window:", head_dict['nelect'] print(" Density within window:", head_dict['nelect'])
head_shells = [] head_shells = []
for ish in pgroup.ishells: for ish in pgroup.ishells:
@ -430,13 +430,13 @@ def plo_output(conf_pars, el_struct, pshells, pgroups):
f.write("# Eigenvalues within the energy window: %s, %s\n"%(pgroup.emin, pgroup.emax)) f.write("# Eigenvalues within the energy window: %s, %s\n"%(pgroup.emin, pgroup.emax))
nk, nband, ns_band = el_struct.eigvals.shape nk, nband, ns_band = el_struct.eigvals.shape
for isp in xrange(ns_band): for isp in range(ns_band):
f.write("# is = %i\n"%(isp + 1)) f.write("# is = %i\n"%(isp + 1))
for ik in xrange(nk): for ik in range(nk):
ib1, ib2 = pgroup.ib_win[ik, isp, 0], pgroup.ib_win[ik, isp, 1] ib1, ib2 = pgroup.ib_win[ik, isp, 0], pgroup.ib_win[ik, isp, 1]
# Output band indices in Fortran convention! # Output band indices in Fortran convention!
f.write(" %i %i\n"%(ib1 + 1, ib2 + 1)) f.write(" %i %i\n"%(ib1 + 1, ib2 + 1))
for ib in xrange(ib1, ib2 + 1): for ib in range(ib1, ib2 + 1):
eigv_ef = el_struct.eigvals[ik, ib, isp] - el_struct.efermi eigv_ef = el_struct.eigvals[ik, ib, isp] - el_struct.efermi
f_weight = el_struct.ferw[isp, ik, ib] f_weight = el_struct.ferw[isp, ik, ib]
f.write("%13.8f %12.7f\n"%(eigv_ef, f_weight)) f.write("%13.8f %12.7f\n"%(eigv_ef, f_weight))
@ -449,15 +449,15 @@ def plo_output(conf_pars, el_struct, pshells, pgroups):
f.write("# Shell %i\n"%(ish)) f.write("# Shell %i\n"%(ish))
nion, ns, nk, nlm, nb = shell.proj_win.shape nion, ns, nk, nlm, nb = shell.proj_win.shape
for isp in xrange(ns): for isp in range(ns):
f.write("# is = %i\n"%(isp + 1)) f.write("# is = %i\n"%(isp + 1))
for ik in xrange(nk): for ik in range(nk):
f.write("# ik = %i\n"%(ik + 1)) f.write("# ik = %i\n"%(ik + 1))
for ion in xrange(nion): for ion in range(nion):
for ilm in xrange(nlm): for ilm in range(nlm):
ib1, ib2 = pgroup.ib_win[ik, isp, 0], pgroup.ib_win[ik, isp, 1] ib1, ib2 = pgroup.ib_win[ik, isp, 0], pgroup.ib_win[ik, isp, 1]
ib_win = ib2 - ib1 + 1 ib_win = ib2 - ib1 + 1
for ib in xrange(ib_win): for ib in range(ib_win):
p = shell.proj_win[ion, isp, ik, ilm, ib] p = shell.proj_win[ion, isp, ik, ilm, ib]
f.write("{0:16.10f}{1:16.10f}\n".format(p.real, p.imag)) f.write("{0:16.10f}{1:16.10f}\n".format(p.real, p.imag))
f.write("\n") f.write("\n")
@ -494,7 +494,7 @@ def hk_output(conf_pars, el_struct, pgroups):
for ig, pgroup in enumerate(pgroups): for ig, pgroup in enumerate(pgroups):
hk_fname = conf_pars.general['basename'] + '.hk%i'%(ig + 1) hk_fname = conf_pars.general['basename'] + '.hk%i'%(ig + 1)
print " Storing HK-group file '%s'..."%(hk_fname) print(" Storing HK-group file '%s'..."%(hk_fname))
head_shells = [] head_shells = []
for ish in pgroup.ishells: for ish in pgroup.ishells:
@ -528,13 +528,13 @@ def hk_output(conf_pars, el_struct, pgroups):
f.write('%i %i %i %i # atom sort l dim\n'%(head['ion_list'][0],head['ion_sort'][0],head['lorb'],head['ndim'])) f.write('%i %i %i %i # atom sort l dim\n'%(head['ion_list'][0],head['ion_sort'][0],head['lorb'],head['ndim']))
norbs = pgroup.hk.shape[2] norbs = pgroup.hk.shape[2]
for isp in xrange(ns_band): for isp in range(ns_band):
for ik in xrange(nk): for ik in range(nk):
for io in xrange(norbs): for io in range(norbs):
for iop in xrange(norbs): for iop in range(norbs):
f.write(" {0:14.10f}".format(pgroup.hk[isp,ik,io,iop].real)) f.write(" {0:14.10f}".format(pgroup.hk[isp,ik,io,iop].real))
f.write("\n") f.write("\n")
for io in xrange(norbs): for io in range(norbs):
for iop in xrange(norbs): for iop in range(norbs):
f.write(" {0:14.10f}".format(pgroup.hk[isp,ik,io,iop].imag)) f.write(" {0:14.10f}".format(pgroup.hk[isp,ik,io,iop].imag))
f.write("\n") f.write("\n")

View File

@ -30,7 +30,7 @@ r"""
Storage and manipulation of projector groups. Storage and manipulation of projector groups.
""" """
import numpy as np import numpy as np
from proj_shell import ComplementShell from .proj_shell import ComplementShell
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
################################################################################ ################################################################################
@ -89,8 +89,8 @@ class ProjectorGroup:
assert np.all( n_bands == n_bands[0,0] ), "At each band the same number of bands has to be selected for calculating the complement (to end up with an equal number of orbitals at each k-point)." assert np.all( n_bands == n_bands[0,0] ), "At each band the same number of bands has to be selected for calculating the complement (to end up with an equal number of orbitals at each k-point)."
if n_orbs == n_bands[0,0]: if n_orbs == n_bands[0,0]:
self.complement = False self.complement = False
print "\nWARNING: The total number of orbitals in this group is " print("\nWARNING: The total number of orbitals in this group is ")
print "equal to the number of bands. Setting COMPLEMENT to FALSE!\n" print("equal to the number of bands. Setting COMPLEMENT to FALSE!\n")
# Select projectors within the energy window # Select projectors within the energy window
@ -112,8 +112,8 @@ class ProjectorGroup:
self.nelect = 0 self.nelect = 0
nk, ns_band, _ = self.ib_win.shape nk, ns_band, _ = self.ib_win.shape
rspin = 2.0 if ns_band == 1 else 1.0 rspin = 2.0 if ns_band == 1 else 1.0
for isp in xrange(ns_band): for isp in range(ns_band):
for ik in xrange(nk): for ik in range(nk):
ib1 = self.ib_win[ik, isp, 0] ib1 = self.ib_win[ik, isp, 0]
ib2 = self.ib_win[ik, isp, 1]+1 ib2 = self.ib_win[ik, isp, 1]+1
occ = el_struct.ferw[isp, ik, ib1:ib2] occ = el_struct.ferw[isp, ik, ib1:ib2]
@ -154,8 +154,8 @@ class ProjectorGroup:
_, ns, nk, _, _ = self.shells[0].proj_win.shape _, ns, nk, _, _ = self.shells[0].proj_win.shape
p_mat = np.zeros((ndim, self.nb_max), dtype=np.complex128) p_mat = np.zeros((ndim, self.nb_max), dtype=np.complex128)
# Note that 'ns' and 'nk' are the same for all shells # Note that 'ns' and 'nk' are the same for all shells
for isp in xrange(ns): for isp in range(ns):
for ik in xrange(nk): for ik in range(nk):
nb = self.ib_win[ik, isp, 1] - self.ib_win[ik, isp, 0] + 1 nb = self.ib_win[ik, isp, 1] - self.ib_win[ik, isp, 0] + 1
# Combine all projectors of the group to one block projector # Combine all projectors of the group to one block projector
for bl_map in block_maps: for bl_map in block_maps:
@ -203,8 +203,8 @@ class ProjectorGroup:
self.hk = np.zeros((ns,nk,ndim,ndim), dtype=np.complex128) self.hk = np.zeros((ns,nk,ndim,ndim), dtype=np.complex128)
# Note that 'ns' and 'nk' are the same for all shells # Note that 'ns' and 'nk' are the same for all shells
for isp in xrange(ns): for isp in range(ns):
for ik in xrange(nk): for ik in range(nk):
bmin = self.ib_win[ik, isp, 0] bmin = self.ib_win[ik, isp, 0]
bmax = self.ib_win[ik, isp, 1]+1 bmax = self.ib_win[ik, isp, 1]+1
@ -247,7 +247,7 @@ class ProjectorGroup:
""" """
print '\nCalculating complement\n' print('\nCalculating complement\n')
block_maps, ndim = self.get_block_matrix_map() block_maps, ndim = self.get_block_matrix_map()
_, ns, nk, _, _ = self.shells[0].proj_win.shape _, ns, nk, _, _ = self.shells[0].proj_win.shape
@ -257,8 +257,8 @@ class ProjectorGroup:
# Note that 'ns' and 'nk' are the same for all shells # Note that 'ns' and 'nk' are the same for all shells
for isp in xrange(ns): for isp in range(ns):
for ik in xrange(nk): for ik in range(nk):
bmin = self.ib_win[ik, isp, 0] bmin = self.ib_win[ik, isp, 0]
bmax = self.ib_win[ik, isp, 1]+1 bmax = self.ib_win[ik, isp, 1]+1
@ -362,7 +362,7 @@ class ProjectorGroup:
_shell = self.shells[ish] _shell = self.shells[ish]
nion, ns, nk, nlm, nb_max = _shell.proj_win.shape nion, ns, nk, nlm, nb_max = _shell.proj_win.shape
ndim = max(ndim, nlm) ndim = max(ndim, nlm)
for ion in xrange(nion): for ion in range(nion):
i1_bl = 0 i1_bl = 0
i2_bl = nlm i2_bl = nlm
block = {'bmat_range': (i1_bl, i2_bl)} block = {'bmat_range': (i1_bl, i2_bl)}
@ -378,7 +378,7 @@ class ProjectorGroup:
for ish in self.ishells: for ish in self.ishells:
_shell = self.shells[ish] _shell = self.shells[ish]
nion, ns, nk, nlm, nb_max = _shell.proj_win.shape nion, ns, nk, nlm, nb_max = _shell.proj_win.shape
for ion in xrange(nion): for ion in range(nion):
i2_bl = i1_bl + nlm i2_bl = i1_bl + nlm
block = {'bmat_range': (i1_bl, i2_bl)} block = {'bmat_range': (i1_bl, i2_bl)}
block['shell_ion'] = (ish, ion) block['shell_ion'] = (ish, ion)
@ -456,14 +456,14 @@ class ProjectorGroup:
ib_min = 10000000 ib_min = 10000000
ib_max = 0 ib_max = 0
for isp in xrange(ns_band): for isp in range(ns_band):
for ik in xrange(nk): for ik in range(nk):
for ib in xrange(nband): for ib in range(nband):
en = eigvals[ik, ib, isp] en = eigvals[ik, ib, isp]
if en >= self.emin: if en >= self.emin:
break break
ib1 = ib ib1 = ib
for ib in xrange(ib1, nband): for ib in range(ib1, nband):
en = eigvals[ik, ib, isp] en = eigvals[ik, ib, isp]
if en > self.emax: if en > self.emax:
break break

View File

@ -33,9 +33,9 @@ def issue_warning(message):
""" """
Issues a warning. Issues a warning.
""" """
print print()
print " !!! WARNING !!!: " + message print(" !!! WARNING !!!: " + message)
print print()
import itertools as it import itertools as it
import numpy as np import numpy as np
@ -165,7 +165,7 @@ class ProjectorShell:
if is_complex: if is_complex:
raw_matrices = raw_matrices[:, ::2] + raw_matrices[:, 1::2] * 1j raw_matrices = raw_matrices[:, ::2] + raw_matrices[:, 1::2] * 1j
for io in xrange(nion): for io in range(nion):
i1 = io * nr i1 = io * nr
i2 = (io + 1) * nr i2 = (io + 1) * nr
self.tmatrices[io, :, :] = raw_matrices[i1:i2, :] self.tmatrices[io, :, :] = raw_matrices[i1:i2, :]
@ -193,7 +193,7 @@ class ProjectorShell:
ndim = nrow ndim = nrow
self.tmatrices = np.zeros((nion, nrow, nm), dtype=np.complex128) self.tmatrices = np.zeros((nion, nrow, nm), dtype=np.complex128)
for io in xrange(nion): for io in range(nion):
self.tmatrices[io, :, :] = raw_matrix self.tmatrices[io, :, :] = raw_matrix
return ndim return ndim
@ -206,7 +206,7 @@ class ProjectorShell:
# We still need the matrices for the output # We still need the matrices for the output
self.tmatrices = np.zeros((nion, ndim, ndim), dtype=np.complex128) self.tmatrices = np.zeros((nion, ndim, ndim), dtype=np.complex128)
for io in xrange(nion): for io in range(nion):
self.tmatrices[io, :, :] = np.identity(ndim, dtype=np.complex128) self.tmatrices[io, :, :] = np.identity(ndim, dtype=np.complex128)
return ndim return ndim
@ -236,20 +236,20 @@ class ProjectorShell:
# for a non-collinear case 'ndim' is 'ns * nm' # for a non-collinear case 'ndim' is 'ns * nm'
ndim = self.tmatrices.shape[1] ndim = self.tmatrices.shape[1]
self.proj_arr = np.zeros((nion, ns, nk, ndim, nb), dtype=np.complex128) self.proj_arr = np.zeros((nion, ns, nk, ndim, nb), dtype=np.complex128)
for ik in xrange(nk): for ik in range(nk):
kp = kmesh['kpoints'][ik] kp = kmesh['kpoints'][ik]
for io, ion in enumerate(self.ion_list): for io, ion in enumerate(self.ion_list):
proj_k = np.zeros((ns, nlm, nb), dtype=np.complex128) proj_k = np.zeros((ns, nlm, nb), dtype=np.complex128)
qcoord = structure['qcoords'][ion] qcoord = structure['qcoords'][ion]
# kphase = np.exp(-2.0j * np.pi * np.dot(kp, qcoord)) # kphase = np.exp(-2.0j * np.pi * np.dot(kp, qcoord))
# kphase = 1.0 # kphase = 1.0
for m in xrange(nlm): for m in range(nlm):
# Here we search for the index of the projector with the given isite/l/m indices # Here we search for the index of the projector with the given isite/l/m indices
for ip, par in enumerate(proj_params): for ip, par in enumerate(proj_params):
if par['isite'] - 1 == ion and par['l'] == self.lorb and par['m'] == m: if par['isite'] - 1 == ion and par['l'] == self.lorb and par['m'] == m:
proj_k[:, m, :] = proj_raw[ip, :, ik, :] #* kphase proj_k[:, m, :] = proj_raw[ip, :, ik, :] #* kphase
break break
for isp in xrange(ns): for isp in range(ns):
self.proj_arr[io, isp, ik, :, :] = np.dot(self.tmatrices[io, :, :], proj_k[isp, :, :]) self.proj_arr[io, isp, ik, :, :] = np.dot(self.tmatrices[io, :, :], proj_k[isp, :, :])
else: else:
@ -257,7 +257,7 @@ class ProjectorShell:
self.proj_arr = np.zeros((nion, ns, nk, nlm, nb), dtype=np.complex128) self.proj_arr = np.zeros((nion, ns, nk, nlm, nb), dtype=np.complex128)
for io, ion in enumerate(self.ion_list): for io, ion in enumerate(self.ion_list):
qcoord = structure['qcoords'][ion] qcoord = structure['qcoords'][ion]
for m in xrange(nlm): for m in range(nlm):
# Here we search for the index of the projector with the given isite/l/m indices # Here we search for the index of the projector with the given isite/l/m indices
for ip, par in enumerate(proj_params): for ip, par in enumerate(proj_params):
if par['isite'] - 1 == ion and par['l'] == self.lorb and par['m'] == m: if par['isite'] - 1 == ion and par['l'] == self.lorb and par['m'] == m:
@ -291,8 +291,8 @@ class ProjectorShell:
# Select projectors for a given energy window # Select projectors for a given energy window
ns_band = self.ib_win.shape[1] ns_band = self.ib_win.shape[1]
for isp in xrange(ns): for isp in range(ns):
for ik in xrange(nk): for ik in range(nk):
# TODO: for non-collinear case something else should be done here # TODO: for non-collinear case something else should be done here
is_b = min(isp, ns_band) is_b = min(isp, ns_band)
ib1 = self.ib_win[ik, is_b, 0] ib1 = self.ib_win[ik, is_b, 0]
@ -328,9 +328,9 @@ class ProjectorShell:
ib1 = self.ib_min ib1 = self.ib_min
ib2 = self.ib_max + 1 ib2 = self.ib_max + 1
if site_diag: if site_diag:
for isp in xrange(ns): for isp in range(ns):
for ik, weight, occ in it.izip(it.count(), kweights, occnums[isp, :, :]): for ik, weight, occ in it.izip(it.count(), kweights, occnums[isp, :, :]):
for io in xrange(nion): for io in range(nion):
proj_k = self.proj_win[io, isp, ik, ...] proj_k = self.proj_win[io, isp, ik, ...]
occ_mats[isp, io, :, :] += np.dot(proj_k * occ[ib1:ib2], occ_mats[isp, io, :, :] += np.dot(proj_k * occ[ib1:ib2],
proj_k.conj().T).real * weight proj_k.conj().T).real * weight
@ -338,9 +338,9 @@ class ProjectorShell:
proj_k.conj().T).real * weight proj_k.conj().T).real * weight
else: else:
proj_k = np.zeros((ndim, nbtot), dtype=np.complex128) proj_k = np.zeros((ndim, nbtot), dtype=np.complex128)
for isp in xrange(ns): for isp in range(ns):
for ik, weight, occ in it.izip(it.count(), kweights, occnums[isp, :, :]): for ik, weight, occ in it.izip(it.count(), kweights, occnums[isp, :, :]):
for io in xrange(nion): for io in range(nion):
i1 = io * nlm i1 = io * nlm
i2 = (io + 1) * nlm i2 = (io + 1) * nlm
proj_k[i1:i2, :] = self.proj_win[io, isp, ik, ...] proj_k[i1:i2, :] = self.proj_win[io, isp, ik, ...]
@ -375,10 +375,10 @@ class ProjectorShell:
occnums = el_struct.ferw occnums = el_struct.ferw
ib1 = self.ib_min ib1 = self.ib_min
ib2 = self.ib_max + 1 ib2 = self.ib_max + 1
for isp in xrange(ns): for isp in range(ns):
for ik, weight, occ, eigk in it.izip(it.count(), kweights, occnums[isp, :, :], for ik, weight, occ, eigk in it.izip(it.count(), kweights, occnums[isp, :, :],
el_struct.eigvals[:, ib1:ib2, isp]): el_struct.eigvals[:, ib1:ib2, isp]):
for io in xrange(nion): for io in range(nion):
proj_k = self.proj_win[io, isp, ik, ...] proj_k = self.proj_win[io, isp, ik, ...]
loc_ham[isp, io, :, :] += np.dot(proj_k * (eigk - el_struct.efermi), loc_ham[isp, io, :, :] += np.dot(proj_k * (eigk - el_struct.efermi),
proj_k.conj().T).real * weight proj_k.conj().T).real * weight
@ -410,13 +410,13 @@ class ProjectorShell:
ne = len(emesh) ne = len(emesh)
dos = np.zeros((ne, ns, nion, nlm)) dos = np.zeros((ne, ns, nion, nlm))
w_k = np.zeros((nk, nb_max, ns, nion, nlm), dtype=np.complex128) w_k = np.zeros((nk, nb_max, ns, nion, nlm), dtype=np.complex128)
for isp in xrange(ns): for isp in range(ns):
for ik in xrange(nk): for ik in range(nk):
is_b = min(isp, ns_band) is_b = min(isp, ns_band)
ib1 = self.ib_win[ik, is_b, 0] ib1 = self.ib_win[ik, is_b, 0]
ib2 = self.ib_win[ik, is_b, 1] + 1 ib2 = self.ib_win[ik, is_b, 1] + 1
for ib_g in xrange(ib1, ib2): for ib_g in range(ib1, ib2):
for io in xrange(nion): for io in range(nion):
# Note the difference between 'ib' and 'ibn': # Note the difference between 'ib' and 'ibn':
# 'ib' counts from 0 to 'nb_k - 1' # 'ib' counts from 0 to 'nb_k - 1'
# 'ibn' counts from 'ib1 - ib_min' to 'ib2 - ib_min' # 'ibn' counts from 'ib1 - ib_min' to 'ib2 - ib_min'
@ -429,13 +429,13 @@ class ProjectorShell:
itt = el_struct.kmesh['itet'].T itt = el_struct.kmesh['itet'].T
# k-indices are starting from 0 in Python # k-indices are starting from 0 in Python
itt[1:, :] -= 1 itt[1:, :] -= 1
for isp in xrange(ns): for isp in range(ns):
for ib, eigk in enumerate(el_struct.eigvals[:, self.ib_min:self.ib_max+1, isp].T): for ib, eigk in enumerate(el_struct.eigvals[:, self.ib_min:self.ib_max+1, isp].T):
for ie, e in enumerate(emesh): for ie, e in enumerate(emesh):
eigk_ef = eigk - el_struct.efermi eigk_ef = eigk - el_struct.efermi
cti = atm.dos_tetra_weights_3d(eigk_ef, e, itt) cti = atm.dos_tetra_weights_3d(eigk_ef, e, itt)
for im in xrange(nlm): for im in range(nlm):
for io in xrange(nion): for io in range(nion):
dos[ie, isp, io, im] += np.sum((cti * w_k[itt[1:, :], ib, isp, io, im].real).sum(0) * itt[0, :]) dos[ie, isp, io, im] += np.sum((cti * w_k[itt[1:, :], ib, isp, io, im].real).sum(0) * itt[0, :])
dos *= 2 * el_struct.kmesh['volt'] dos *= 2 * el_struct.kmesh['volt']

View File

@ -31,7 +31,7 @@ import time
import signal import signal
import sys import sys
import pytriqs.utility.mpi as mpi import pytriqs.utility.mpi as mpi
import converter from . import converter
from shutil import copyfile from shutil import copyfile
xch = sys.excepthook xch = sys.excepthook
@ -63,7 +63,7 @@ def is_vasp_running(vasp_pid):
if mpi.is_master_node(): if mpi.is_master_node():
try: try:
os.kill(vasp_pid, 0) os.kill(vasp_pid, 0)
except OSError, e: except OSError as e:
pid_exists = e.errno == errno.EPERM pid_exists = e.errno == errno.EPERM
else: else:
pid_exists = True pid_exists = True
@ -85,7 +85,7 @@ def get_dft_energy():
try: try:
dft_energy = float(line.split()[2]) dft_energy = float(line.split()[2])
except ValueError: except ValueError:
print "Cannot read energy from OSZICAR, setting it to zero" print("Cannot read energy from OSZICAR, setting it to zero")
dft_energy = 0.0 dft_energy = 0.0
return dft_energy return dft_energy
@ -111,7 +111,7 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
iter = 0 iter = 0
while vasp_running: while vasp_running:
if debug: print bcolors.RED + "rank %s"%(mpi.rank) + bcolors.ENDC if debug: print(bcolors.RED + "rank %s"%(mpi.rank) + bcolors.ENDC)
mpi.report(" Waiting for VASP lock to disappear...") mpi.report(" Waiting for VASP lock to disappear...")
mpi.barrier() mpi.barrier()
while is_vasp_lock_present(): while is_vasp_lock_present():
@ -125,30 +125,30 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
# Tell VASP to stop if the maximum number of iterations is reached # Tell VASP to stop if the maximum number of iterations is reached
if debug: print bcolors.MAGENTA + "rank %s"%(mpi.rank) + bcolors.ENDC if debug: print(bcolors.MAGENTA + "rank %s"%(mpi.rank) + bcolors.ENDC)
err = 0 err = 0
exc = None exc = None
if debug: print bcolors.BLUE + "plovasp: rank %s"%(mpi.rank) + bcolors.ENDC if debug: print(bcolors.BLUE + "plovasp: rank %s"%(mpi.rank) + bcolors.ENDC)
if mpi.is_master_node(): if mpi.is_master_node():
converter.generate_and_output_as_text(cfg_file, vasp_dir='./') converter.generate_and_output_as_text(cfg_file, vasp_dir='./')
# Read energy from OSZICAR # Read energy from OSZICAR
dft_energy = get_dft_energy() dft_energy = get_dft_energy()
mpi.barrier() mpi.barrier()
if debug: print bcolors.GREEN + "rank %s"%(mpi.rank) + bcolors.ENDC if debug: print(bcolors.GREEN + "rank %s"%(mpi.rank) + bcolors.ENDC)
corr_energy, dft_dc = dmft_cycle() corr_energy, dft_dc = dmft_cycle()
mpi.barrier() mpi.barrier()
if mpi.is_master_node(): if mpi.is_master_node():
total_energy = dft_energy + corr_energy - dft_dc total_energy = dft_energy + corr_energy - dft_dc
print print()
print "="*80 print("="*80)
print " Total energy: ", total_energy print(" Total energy: ", total_energy)
print " DFT energy: ", dft_energy print(" DFT energy: ", dft_energy)
print " Corr. energy: ", corr_energy print(" Corr. energy: ", corr_energy)
print " DFT DC: ", dft_dc print(" DFT DC: ", dft_dc)
print "="*80 print("="*80)
print print()
# check if we should do additional VASP calculations # check if we should do additional VASP calculations
# in the standard VASP version, VASP writes out GAMMA itself # in the standard VASP version, VASP writes out GAMMA itself
@ -176,8 +176,8 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
copyfile(src='GAMMA_recent',dst='GAMMA') copyfile(src='GAMMA_recent',dst='GAMMA')
iter += 1 iter += 1
if iter == n_iter: if iter == n_iter:
print "\n Maximum number of iterations reached." print("\n Maximum number of iterations reached.")
print " Aborting VASP iterations...\n" print(" Aborting VASP iterations...\n")
f_stop = open('STOPCAR', 'wt') f_stop = open('STOPCAR', 'wt')
f_stop.write("LABORT = .TRUE.\n") f_stop.write("LABORT = .TRUE.\n")
f_stop.close() f_stop.close()
@ -200,28 +200,28 @@ def main():
vasp_pid = int(sys.argv[1]) vasp_pid = int(sys.argv[1])
except (ValueError, KeyError): except (ValueError, KeyError):
if mpi.is_master_node(): if mpi.is_master_node():
print "ERROR: VASP process pid must be provided as the first argument" print("ERROR: VASP process pid must be provided as the first argument")
raise raise
try: try:
n_iter = int(sys.argv[2]) n_iter = int(sys.argv[2])
except (ValueError, KeyError): except (ValueError, KeyError):
if mpi.is_master_node(): if mpi.is_master_node():
print "ERROR: Number of iterations must be provided as the second argument" print("ERROR: Number of iterations must be provided as the second argument")
raise raise
try: try:
n_iter_dft = int(sys.argv[3]) n_iter_dft = int(sys.argv[3])
except (ValueError, KeyError): except (ValueError, KeyError):
if mpi.is_master_node(): if mpi.is_master_node():
print "ERROR: Number of VASP iterations with fixed charge density must be provided as the third argument" print("ERROR: Number of VASP iterations with fixed charge density must be provided as the third argument")
raise raise
try: try:
dmft_script = re.sub("\.py$", "", sys.argv[4]) dmft_script = re.sub("\.py$", "", sys.argv[4])
except: except:
if mpi.is_master_node(): if mpi.is_master_node():
print "ERROR: User-defined DMFT script must be provided as the fourth argument" print("ERROR: User-defined DMFT script must be provided as the fourth argument")
raise raise
# Optional parameter: config-file name # Optional parameter: config-file name

View File

@ -83,12 +83,12 @@ class VaspData:
except (IOError, StopIteration): except (IOError, StopIteration):
self.eigenval.eigs = None self.eigenval.eigs = None
self.eigenval.ferw = None self.eigenval.ferw = None
print "!!! WARNING !!!: Error reading from EIGENVAL, trying LOCPROJ" print("!!! WARNING !!!: Error reading from EIGENVAL, trying LOCPROJ")
try: try:
self.doscar.from_file(vasp_dir) self.doscar.from_file(vasp_dir)
except (IOError, StopIteration): except (IOError, StopIteration):
if efermi_required: if efermi_required:
print "!!! WARNING !!!: Error reading from Efermi from DOSCAR, trying LOCPROJ" print("!!! WARNING !!!: Error reading from Efermi from DOSCAR, trying LOCPROJ")
try: try:
self.plocar.efermi self.plocar.efermi
self.doscar.efermi = self.plocar.efermi self.doscar.efermi = self.plocar.efermi
@ -96,7 +96,7 @@ class VaspData:
raise Exception("Efermi cannot be read from DOSCAR or LOCPROJ") raise Exception("Efermi cannot be read from DOSCAR or LOCPROJ")
else: else:
# TODO: This a hack. Find out a way to determine ncdij without DOSCAR # TODO: This a hack. Find out a way to determine ncdij without DOSCAR
print "!!! WARNING !!!: Error reading from DOSCAR, taking Efermi from config" print("!!! WARNING !!!: Error reading from DOSCAR, taking Efermi from config")
self.doscar.ncdij = self.plocar.nspin self.doscar.ncdij = self.plocar.nspin
################################################################################ ################################################################################
@ -161,10 +161,10 @@ class Plocar:
# Read the first line of LOCPROJ to get the dimensions # Read the first line of LOCPROJ to get the dimensions
with open(locproj_filename, 'rt') as f: with open(locproj_filename, 'rt') as f:
line = f.readline() line = f.readline()
nproj, nspin, nk, nband = map(int, line.split()) nproj, nspin, nk, nband = list(map(int, line.split()))
plo = np.zeros((nproj, nspin, nk, nband), dtype=np.complex128) plo = np.zeros((nproj, nspin, nk, nband), dtype=np.complex128)
proj_params = [{} for i in xrange(nproj)] proj_params = [{} for i in range(nproj)]
iproj_site = 0 iproj_site = 0
is_first_read = True is_first_read = True
@ -173,7 +173,7 @@ class Plocar:
while line: while line:
isite = int(line.split()[1]) isite = int(line.split()[1])
if not is_first_read: if not is_first_read:
for il in xrange(norb): for il in range(norb):
ip_new = iproj_site * norb + il ip_new = iproj_site * norb + il
ip_prev = (iproj_site - 1) * norb + il ip_prev = (iproj_site - 1) * norb + il
proj_params[ip_new]['label'] = proj_params[ip_prev]['label'] proj_params[ip_new]['label'] = proj_params[ip_prev]['label']
@ -181,8 +181,8 @@ class Plocar:
proj_params[ip_new]['l'] = proj_params[ip_prev]['l'] proj_params[ip_new]['l'] = proj_params[ip_prev]['l']
proj_params[ip_new]['m'] = proj_params[ip_prev]['m'] proj_params[ip_new]['m'] = proj_params[ip_prev]['m']
for ispin in xrange(nspin): for ispin in range(nspin):
for ik in xrange(nk): for ik in range(nk):
# Parse the orbital labels and convert them to l,m-indices # Parse the orbital labels and convert them to l,m-indices
line = self.search_for(f, "^ *band") line = self.search_for(f, "^ *band")
if is_first_read: if is_first_read:
@ -202,10 +202,10 @@ class Plocar:
is_first_read = False is_first_read = False
# Read the block of nk * ns * nband complex numbers # Read the block of nk * ns * nband complex numbers
for ib in xrange(nband): for ib in range(nband):
line = f.readline() line = f.readline()
rtmp = map(float, line.split()[1:]) rtmp = list(map(float, line.split()[1:]))
for il in xrange(norb): for il in range(norb):
ctmp = complex(rtmp[2 * il], rtmp[2 * il + 1]) ctmp = complex(rtmp[2 * il], rtmp[2 * il + 1])
plo[iproj_site * norb + il, ispin, ik, ib] = ctmp plo[iproj_site * norb + il, ispin, ik, ib] = ctmp
@ -213,9 +213,9 @@ class Plocar:
iproj_site += 1 iproj_site += 1
line = self.search_for(f, "^ *ISITE") line = self.search_for(f, "^ *ISITE")
print "Read parameters:" print("Read parameters:")
for il, par in enumerate(proj_params): for il, par in enumerate(proj_params):
print il, " -> ", par print(il, " -> ", par)
return proj_params, plo return proj_params, plo
@ -242,17 +242,17 @@ class Plocar:
line = f.readline() line = f.readline()
line = line.split("#")[0] line = line.split("#")[0]
sline = line.split() sline = line.split()
self.ncdij, nk, self.nband, nproj = map(int, sline[:4]) self.ncdij, nk, self.nband, nproj = list(map(int, sline[:4]))
self.nspin = 1 if self.ncdij == 1 else 2 self.nspin = 1 if self.ncdij == 1 else 2
self.nspin_band = 2 if self.ncdij == 2 else 1 self.nspin_band = 2 if self.ncdij == 2 else 1
try: try:
self.efermi = float(sline[4]) self.efermi = float(sline[4])
except: except:
print "!!! WARNING !!!: Error reading E-Fermi from LOCPROJ, trying DOSCAR" print("!!! WARNING !!!: Error reading E-Fermi from LOCPROJ, trying DOSCAR")
plo = np.zeros((nproj, self.nspin, nk, self.nband), dtype=np.complex128) plo = np.zeros((nproj, self.nspin, nk, self.nband), dtype=np.complex128)
proj_params = [{} for i in xrange(nproj)] proj_params = [{} for i in range(nproj)]
iproj_site = 0 iproj_site = 0
is_first_read = True is_first_read = True
@ -284,26 +284,26 @@ class Plocar:
patt = re.compile("^orbital") patt = re.compile("^orbital")
# FIXME: fix spin indices for NCDIJ = 4 (non-collinear) # FIXME: fix spin indices for NCDIJ = 4 (non-collinear)
assert self.ncdij < 4, "Non-collinear case is not implemented" assert self.ncdij < 4, "Non-collinear case is not implemented"
for ispin in xrange(self.nspin): for ispin in range(self.nspin):
for ik in xrange(nk): for ik in range(nk):
for ib in xrange(self.nband): for ib in range(self.nband):
line = "" line = ""
while not line: while not line:
line = f.readline().strip() line = f.readline().strip()
sline = line.split() sline = line.split()
isp_, ik_, ib_ = map(int, sline[1:4]) isp_, ik_, ib_ = list(map(int, sline[1:4]))
assert isp_ == ispin + 1 and ik_ == ik + 1 and ib_ == ib + 1, "Inconsistency in reading LOCPROJ" assert isp_ == ispin + 1 and ik_ == ik + 1 and ib_ == ib + 1, "Inconsistency in reading LOCPROJ"
self.eigs[ik, ib, ispin] = float(sline[4]) self.eigs[ik, ib, ispin] = float(sline[4])
self.ferw[ik, ib, ispin] = float(sline[5]) self.ferw[ik, ib, ispin] = float(sline[5])
for ip in xrange(nproj): for ip in range(nproj):
line = f.readline() line = f.readline()
sline = line.split() sline = line.split()
ctmp = complex(float(sline[1]), float(sline[2])) ctmp = complex(float(sline[1]), float(sline[2]))
plo[ip, ispin, ik, ib] = ctmp plo[ip, ispin, ik, ib] = ctmp
print "Read parameters:" print("Read parameters:")
for il, par in enumerate(proj_params): for il, par in enumerate(proj_params):
print il, " -> ", par print(il, " -> ", par)
return proj_params, plo return proj_params, plo
@ -366,16 +366,16 @@ class Poscar:
f = read_lines(vasp_dir + poscar_filename) f = read_lines(vasp_dir + poscar_filename)
# Comment line # Comment line
comment = f.next().rstrip() comment = f.next().rstrip()
print " Found POSCAR, title line: %s"%(comment) print(" Found POSCAR, title line: %s"%(comment))
# Read scale # Read scale
sline = readline_remove_comments() sline = readline_remove_comments()
ascale = float(sline) ascale = float(sline)
# Read lattice vectors # Read lattice vectors
self.a_brav = np.zeros((3, 3)) self.a_brav = np.zeros((3, 3))
for ia in xrange(3): for ia in range(3):
sline = readline_remove_comments() sline = readline_remove_comments()
self.a_brav[ia, :] = map(float, sline.split()) self.a_brav[ia, :] = list(map(float, sline.split()))
# Negative scale means that it is a volume scale # Negative scale means that it is a volume scale
if ascale < 0: if ascale < 0:
vscale = -ascale vscale = -ascale
@ -389,13 +389,13 @@ class Poscar:
sline = readline_remove_comments() sline = readline_remove_comments()
try: try:
# Old v4.6 format: no element names # Old v4.6 format: no element names
self.nions = map(int, sline.split()) self.nions = list(map(int, sline.split()))
self.el_names = ['El%i'%(i) for i in xrange(len(self.nions))] self.el_names = ['El%i'%(i) for i in range(len(self.nions))]
except ValueError: except ValueError:
# New v5.x format: read element names first # New v5.x format: read element names first
self.el_names = sline.split() self.el_names = sline.split()
sline = readline_remove_comments() sline = readline_remove_comments()
self.nions = map(int, sline.split()) self.nions = list(map(int, sline.split()))
# Set the number of atom sorts (types) and the total # Set the number of atom sorts (types) and the total
# number of atoms in the unit cell # number of atoms in the unit cell
@ -415,23 +415,23 @@ class Poscar:
# Read atomic positions # Read atomic positions
self.q_types = [] self.q_types = []
self.type_of_ion = [] self.type_of_ion = []
for it in xrange(self.ntypes): for it in range(self.ntypes):
# Array mapping ion index to type # Array mapping ion index to type
self.type_of_ion += self.nions[it] * [it] self.type_of_ion += self.nions[it] * [it]
q_at_it = np.zeros((self.nions[it], 3)) q_at_it = np.zeros((self.nions[it], 3))
for iq in xrange(self.nions[it]): for iq in range(self.nions[it]):
sline = readline_remove_comments() sline = readline_remove_comments()
qcoord = map(float, sline.split()[:3]) qcoord = list(map(float, sline.split()[:3]))
if cartesian: if cartesian:
qcoord = np.dot(brec, qcoord) qcoord = np.dot(brec, qcoord)
q_at_it[iq, :] = qcoord q_at_it[iq, :] = qcoord
self.q_types.append(q_at_it) self.q_types.append(q_at_it)
print " Total number of ions:", self.nq print(" Total number of ions:", self.nq)
print " Number of types:", self.ntypes print(" Number of types:", self.ntypes)
print " Number of ions for each type:", self.nions print(" Number of ions for each type:", self.nions)
# print # print
# print " Coords:" # print " Coords:"
@ -485,23 +485,23 @@ class Kpoints:
ibz_file = read_lines(vasp_dir + ibz_filename) ibz_file = read_lines(vasp_dir + ibz_filename)
# Skip comment line # Skip comment line
line = ibz_file.next() line = next(ibz_file)
# Number of k-points # Number of k-points
line = ibz_file.next() line = next(ibz_file)
self.nktot = int(line.strip().split()[0]) self.nktot = int(line.strip().split()[0])
print print()
print " {0:>26} {1:d}".format("Total number of k-points:", self.nktot) print(" {0:>26} {1:d}".format("Total number of k-points:", self.nktot))
self.kpts = np.zeros((self.nktot, 3)) self.kpts = np.zeros((self.nktot, 3))
self.kwghts = np.zeros((self.nktot)) self.kwghts = np.zeros((self.nktot))
# Skip comment line # Skip comment line
line = ibz_file.next() line = next(ibz_file)
for ik in xrange(self.nktot): for ik in range(self.nktot):
line = ibz_file.next() line = next(ibz_file)
sline = line.strip().split() sline = line.strip().split()
self.kpts[ik, :] = map(float, sline[:3]) self.kpts[ik, :] = list(map(float, sline[:3]))
self.kwghts[ik] = float(sline[3]) self.kwghts[ik] = float(sline[3])
self.kwghts /= self.nktot self.kwghts /= self.nktot
@ -509,23 +509,23 @@ class Kpoints:
# Attempt to read tetrahedra # Attempt to read tetrahedra
# Skip comment line ("Tetrahedra") # Skip comment line ("Tetrahedra")
try: try:
line = ibz_file.next() line = next(ibz_file)
# Number of tetrahedra and volume = 1/(6*nkx*nky*nkz) # Number of tetrahedra and volume = 1/(6*nkx*nky*nkz)
line = ibz_file.next() line = next(ibz_file)
sline = line.split() sline = line.split()
self.ntet = int(sline[0]) self.ntet = int(sline[0])
self.volt = float(sline[1]) self.volt = float(sline[1])
print " {0:>26} {1:d}".format("Total number of tetrahedra:", self.ntet) print(" {0:>26} {1:d}".format("Total number of tetrahedra:", self.ntet))
# Traditionally, itet[it, 0] contains multiplicity # Traditionally, itet[it, 0] contains multiplicity
self.itet = np.zeros((self.ntet, 5), dtype=int) self.itet = np.zeros((self.ntet, 5), dtype=int)
for it in xrange(self.ntet): for it in range(self.ntet):
line = ibz_file.next() line = next(ibz_file)
self.itet[it, :] = map(int, line.split()[:5]) self.itet[it, :] = list(map(int, line.split()[:5]))
except StopIteration, ValueError: except StopIteration as ValueError:
print " No tetrahedron data found in %s. Skipping..."%(ibz_filename) print(" No tetrahedron data found in %s. Skipping..."%(ibz_filename))
self.ntet = 0 self.ntet = 0
# data = { 'nktot': nktot, # data = { 'nktot': nktot,
@ -572,14 +572,14 @@ class Eigenval:
self.ispin = int(sline[3]) self.ispin = int(sline[3])
# Second line: cell volume and lengths of lattice vectors (skip) # Second line: cell volume and lengths of lattice vectors (skip)
sline = f.next() sline = next(f)
# Third line: temperature (skip) # Third line: temperature (skip)
sline = f.next() sline = next(f)
# Fourth and fifth line: useless # Fourth and fifth line: useless
sline = f.next() sline = next(f)
sline = f.next() sline = next(f)
# Sixth line: NELECT, NKTOT, NBTOT # Sixth line: NELECT, NKTOT, NBTOT
sline = f.next().split() sline = f.next().split()
@ -593,16 +593,16 @@ class Eigenval:
self.eigs = np.zeros((self.nktot, self.nband, self.ispin)) self.eigs = np.zeros((self.nktot, self.nband, self.ispin))
self.ferw = np.zeros((self.nktot, self.nband, self.ispin)) self.ferw = np.zeros((self.nktot, self.nband, self.ispin))
for ik in xrange(self.nktot): for ik in range(self.nktot):
sline = f.next() # Empty line sline = next(f) # Empty line
sline = f.next() # k-point info sline = next(f) # k-point info
tmp = map(float, sline.split()) tmp = list(map(float, sline.split()))
self.kpts[ik, :] = tmp[:3] self.kpts[ik, :] = tmp[:3]
self.kwghts[ik] = tmp[3] self.kwghts[ik] = tmp[3]
for ib in xrange(self.nband): for ib in range(self.nband):
sline = f.next().split() sline = f.next().split()
tmp = map(float, sline) tmp = list(map(float, sline))
assert len(tmp) == 2 * self.ispin + 1, "EIGENVAL file is incorrect (probably from old versions of VASP)" assert len(tmp) == 2 * self.ispin + 1, "EIGENVAL file is incorrect (probably from old versions of VASP)"
self.eigs[ik, ib, :] = tmp[1:self.ispin+1] self.eigs[ik, ib, :] = tmp[1:self.ispin+1]
self.ferw[ik, ib, :] = tmp[self.ispin+1:] self.ferw[ik, ib, :] = tmp[self.ispin+1:]
@ -639,8 +639,8 @@ class Doscar:
self.ncdij = int(sline[3]) self.ncdij = int(sline[3])
# Skip next 4 lines # Skip next 4 lines
for _ in xrange(4): for _ in range(4):
sline = f.next() sline = next(f)
# Sixth line: EMAX, EMIN, NEDOS, EFERMI, 1.0 # Sixth line: EMAX, EMIN, NEDOS, EFERMI, 1.0
sline = f.next().split() sline = f.next().split()
@ -666,54 +666,54 @@ def read_symmcar(vasp_dir, symm_filename='SYMMCAR'):
symmcar_exist = False symmcar_exist = False
sym_file = read_lines(vasp_dir + symm_filename) sym_file = read_lines(vasp_dir + symm_filename)
line = sym_file.next() line = next(sym_file)
nrot = extract_int_par('NROT') nrot = extract_int_par('NROT')
line = sym_file.next() line = next(sym_file)
ntrans = extract_int_par('NPCELL') ntrans = extract_int_par('NPCELL')
# Lmax # Lmax
line = sym_file.next() line = next(sym_file)
lmax = extract_int_par('LMAX') lmax = extract_int_par('LMAX')
mmax = 2 * lmax + 1 mmax = 2 * lmax + 1
# Nion # Nion
line = sym_file.next() line = next(sym_file)
nion = extract_int_par('NION') nion = extract_int_par('NION')
print " {0:>26} {1:d}".format("Number of rotations:", nrot) print(" {0:>26} {1:d}".format("Number of rotations:", nrot))
print " {0:>26} {1:d}".format("Number of translations:", ntrans) print(" {0:>26} {1:d}".format("Number of translations:", ntrans))
print " {0:>26} {1:d}".format("Number of ions:", nion) print(" {0:>26} {1:d}".format("Number of ions:", nion))
print " {0:>26} {1:d}".format("L_max:", lmax) print(" {0:>26} {1:d}".format("L_max:", lmax))
rot_mats = np.zeros((nrot, lmax+1, mmax, mmax)) rot_mats = np.zeros((nrot, lmax+1, mmax, mmax))
rot_map = np.zeros((nrot, ntrans, nion), dtype=np.int32) rot_map = np.zeros((nrot, ntrans, nion), dtype=np.int32)
for irot in xrange(nrot): for irot in range(nrot):
# Empty line # Empty line
line = sym_file.next() line = next(sym_file)
# IROT index (skip it) # IROT index (skip it)
line = sym_file.next() line = next(sym_file)
# ISYMOP matrix (can be also skipped) # ISYMOP matrix (can be also skipped)
line = sym_file.next() line = next(sym_file)
line = sym_file.next() line = next(sym_file)
line = sym_file.next() line = next(sym_file)
# Skip comment " Permutation map..." # Skip comment " Permutation map..."
line = sym_file.next() line = next(sym_file)
# Permutations (in chunks of 20 indices per line) # Permutations (in chunks of 20 indices per line)
for it in xrange(ntrans): for it in range(ntrans):
for ibl in xrange((nion - 1) / 20 + 1): for ibl in range((nion - 1) / 20 + 1):
i1 = ibl * 20 i1 = ibl * 20
i2 = (ibl + 1) * 20 i2 = (ibl + 1) * 20
line = sym_file.next() line = next(sym_file)
rot_map[irot, it, i1:i2] = map(int, line.split()) rot_map[irot, it, i1:i2] = list(map(int, line.split()))
for l in xrange(lmax + 1): for l in range(lmax + 1):
mmax = 2 * l + 1 mmax = 2 * l + 1
# Comment: "L = ..." # Comment: "L = ..."
line = sym_file.next() line = next(sym_file)
for m in xrange(mmax): for m in range(mmax):
line = sym_file.next() line = next(sym_file)
rot_mats[irot, l, m, :mmax] = map(float, line.split()[:mmax]) rot_mats[irot, l, m, :mmax] = list(map(float, line.split()[:mmax]))
data.update({ 'nrot': nrot, 'ntrans': ntrans, data.update({ 'nrot': nrot, 'ntrans': ntrans,
'lmax': lmax, 'nion': nion, 'lmax': lmax, 'nion': nion,

View File

@ -27,7 +27,7 @@
from types import * from types import *
import numpy import numpy
from pytriqs.archive import * from pytriqs.archive import *
from converter_tools import * from .converter_tools import *
import os.path import os.path
try: try:
import simplejson as json import simplejson as json
@ -150,7 +150,7 @@ class VaspConverter(ConverterTools):
# R is a generator : each R.Next() will return the next number in the file # R is a generator : each R.Next() will return the next number in the file
jheader, rf = self.read_header_and_data(self.ctrl_file) jheader, rf = self.read_header_and_data(self.ctrl_file)
print jheader print(jheader)
ctrl_head = json.loads(jheader) ctrl_head = json.loads(jheader)
ng = ctrl_head['ngroups'] ng = ctrl_head['ngroups']
@ -163,12 +163,12 @@ class VaspConverter(ConverterTools):
kpts_cart = numpy.zeros((n_k, 3)) kpts_cart = numpy.zeros((n_k, 3))
bz_weights = numpy.zeros(n_k) bz_weights = numpy.zeros(n_k)
try: try:
for ik in xrange(n_k): for ik in range(n_k):
kx, ky, kz = rf.next(), rf.next(), rf.next() kx, ky, kz = next(rf), next(rf), next(rf)
kpts[ik, :] = kx, ky, kz kpts[ik, :] = kx, ky, kz
bz_weights[ik] = rf.next() bz_weights[ik] = next(rf)
for ik in xrange(n_k): for ik in range(n_k):
kx, ky, kz = rf.next(), rf.next(), rf.next() kx, ky, kz = next(rf), next(rf), next(rf)
kpts_cart[ik, :] = kx, ky, kz kpts_cart[ik, :] = kx, ky, kz
except StopIteration: except StopIteration:
raise "VaspConverter: error reading %s"%self.ctrl_file raise "VaspConverter: error reading %s"%self.ctrl_file
@ -186,7 +186,7 @@ class VaspConverter(ConverterTools):
assert ng == 1, "Only one group is allowed at the moment" assert ng == 1, "Only one group is allowed at the moment"
try: try:
for ig in xrange(ng): for ig in range(ng):
gr_file = self.basename + '.pg%i'%(ig + 1) gr_file = self.basename + '.pg%i'%(ig + 1)
jheader, rf = self.read_header_and_data(gr_file) jheader, rf = self.read_header_and_data(gr_file)
gr_head = json.loads(jheader) gr_head = json.loads(jheader)
@ -203,9 +203,9 @@ class VaspConverter(ConverterTools):
shells = [] shells = []
corr_shells = [] corr_shells = []
shion_to_shell = [[] for ish in xrange(len(p_shells))] shion_to_shell = [[] for ish in range(len(p_shells))]
cr_shion_to_shell = [[] for ish in xrange(len(p_shells))] cr_shion_to_shell = [[] for ish in range(len(p_shells))]
shorbs_to_globalorbs = [[] for ish in xrange(len(p_shells))] shorbs_to_globalorbs = [[] for ish in range(len(p_shells))]
last_dimension = 0 last_dimension = 0
crshorbs_to_globalorbs = [] crshorbs_to_globalorbs = []
icsh = 0 icsh = 0
@ -243,7 +243,7 @@ class VaspConverter(ConverterTools):
n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(self, corr_shells) n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(self, corr_shells)
if mpi.is_master_node(): if mpi.is_master_node():
print " No. of inequivalent shells:", n_inequiv_shells print(" No. of inequivalent shells:", n_inequiv_shells)
# NB!: these rotation matrices are specific to Wien2K! Set to identity in VASP # NB!: these rotation matrices are specific to Wien2K! Set to identity in VASP
use_rotations = 1 use_rotations = 1
@ -272,19 +272,19 @@ class VaspConverter(ConverterTools):
# else: # else:
hopping = numpy.zeros([n_k, n_spin_blocs, nb_max, nb_max], numpy.complex_) hopping = numpy.zeros([n_k, n_spin_blocs, nb_max, nb_max], numpy.complex_)
f_weights = numpy.zeros([n_k, n_spin_blocs, nb_max], numpy.complex_) f_weights = numpy.zeros([n_k, n_spin_blocs, nb_max], numpy.complex_)
band_window = [numpy.zeros((n_k, 2), dtype=int) for isp in xrange(n_spin_blocs)] band_window = [numpy.zeros((n_k, 2), dtype=int) for isp in range(n_spin_blocs)]
n_orbitals = numpy.zeros([n_k, n_spin_blocs], numpy.int) n_orbitals = numpy.zeros([n_k, n_spin_blocs], numpy.int)
for isp in xrange(n_spin_blocs): for isp in range(n_spin_blocs):
for ik in xrange(n_k): for ik in range(n_k):
ib1, ib2 = int(rf.next()), int(rf.next()) ib1, ib2 = int(next(rf)), int(next(rf))
band_window[isp][ik, :2] = ib1, ib2 band_window[isp][ik, :2] = ib1, ib2
nb = ib2 - ib1 + 1 nb = ib2 - ib1 + 1
n_orbitals[ik, isp] = nb n_orbitals[ik, isp] = nb
for ib in xrange(nb): for ib in range(nb):
hopping[ik, isp, ib, ib] = rf.next() hopping[ik, isp, ib, ib] = next(rf)
f_weights[ik, isp, ib] = rf.next() f_weights[ik, isp, ib] = next(rf)
if self.proj_or_hk == 'hk': if self.proj_or_hk == 'hk':
hopping = numpy.zeros([n_k, n_spin_blocs, n_orbs, n_orbs], numpy.complex_) hopping = numpy.zeros([n_k, n_spin_blocs, n_orbs, n_orbs], numpy.complex_)
@ -298,15 +298,15 @@ class VaspConverter(ConverterTools):
f_hk.readline() f_hk.readline()
count += 1 count += 1
rf_hk = self.read_data(f_hk) rf_hk = self.read_data(f_hk)
for isp in xrange(n_spin_blocs): for isp in range(n_spin_blocs):
for ik in xrange(n_k): for ik in range(n_k):
n_orbitals[ik, isp] = n_orbs n_orbitals[ik, isp] = n_orbs
for ib in xrange(n_orbs): for ib in range(n_orbs):
for jb in xrange(n_orbs): for jb in range(n_orbs):
hopping[ik, isp, ib, jb] = rf_hk.next() hopping[ik, isp, ib, jb] = next(rf_hk)
for ib in xrange(n_orbs): for ib in range(n_orbs):
for jb in xrange(n_orbs): for jb in range(n_orbs):
hopping[ik, isp, ib, jb] += 1j*rf_hk.next() hopping[ik, isp, ib, jb] += 1j*next(rf_hk)
rf_hk.close() rf_hk.close()
# Projectors # Projectors
@ -328,14 +328,14 @@ class VaspConverter(ConverterTools):
# use cases and decide which solution is to be made permanent. # use cases and decide which solution is to be made permanent.
# #
for ish, sh in enumerate(p_shells): for ish, sh in enumerate(p_shells):
for isp in xrange(n_spin_blocs): for isp in range(n_spin_blocs):
for ik in xrange(n_k): for ik in range(n_k):
for ion in xrange(len(sh['ion_list'])): for ion in range(len(sh['ion_list'])):
for ilm in xrange(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1]): for ilm in range(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1]):
for ib in xrange(n_orbitals[ik, isp]): for ib in range(n_orbitals[ik, isp]):
# This is to avoid confusion with the order of arguments # This is to avoid confusion with the order of arguments
pr = rf.next() pr = next(rf)
pi = rf.next() pi = next(rf)
proj_mat_csc[ik, isp, ilm, ib] = complex(pr, pi) proj_mat_csc[ik, isp, ilm, ib] = complex(pr, pi)
# now save only projectors with flag 'corr' to proj_mat # now save only projectors with flag 'corr' to proj_mat
@ -343,22 +343,22 @@ class VaspConverter(ConverterTools):
if self.proj_or_hk == 'proj': if self.proj_or_hk == 'proj':
for ish, sh in enumerate(p_shells): for ish, sh in enumerate(p_shells):
if sh['corr']: if sh['corr']:
for isp in xrange(n_spin_blocs): for isp in range(n_spin_blocs):
for ik in xrange(n_k): for ik in range(n_k):
for ion in xrange(len(sh['ion_list'])): for ion in range(len(sh['ion_list'])):
icsh = shion_to_shell[ish][ion] icsh = shion_to_shell[ish][ion]
for iclm,ilm in enumerate(xrange(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1])): for iclm,ilm in enumerate(range(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1])):
for ib in xrange(n_orbitals[ik, isp]): for ib in range(n_orbitals[ik, isp]):
proj_mat[ik,isp,icsh,iclm,ib] = proj_mat_csc[ik,isp,ilm,ib] proj_mat[ik,isp,icsh,iclm,ib] = proj_mat_csc[ik,isp,ilm,ib]
elif self.proj_or_hk == 'hk': elif self.proj_or_hk == 'hk':
for ish, sh in enumerate(p_shells): for ish, sh in enumerate(p_shells):
if sh['corr']: if sh['corr']:
for ion in xrange(len(sh['ion_list'])): for ion in range(len(sh['ion_list'])):
icsh = shion_to_shell[ish][ion] icsh = shion_to_shell[ish][ion]
for isp in xrange(n_spin_blocs): for isp in range(n_spin_blocs):
for ik in xrange(n_k): for ik in range(n_k):
for iclm,ilm in enumerate(xrange(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1])): for iclm,ilm in enumerate(range(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1])):
proj_mat[ik,isp,icsh,iclm,ilm] = 1.0 proj_mat[ik,isp,icsh,iclm,ilm] = 1.0
#corr_shell.pop('ion_list') #corr_shell.pop('ion_list')
@ -445,13 +445,13 @@ class VaspConverter(ConverterTools):
if os.path.exists(f): if os.path.exists(f):
mpi.report("Reading input from %s..."%f) mpi.report("Reading input from %s..."%f)
R = ConverterTools.read_fortran_file(self, f, self.fortran_to_replace) R = ConverterTools.read_fortran_file(self, f, self.fortran_to_replace)
assert int(R.next()) == n_k, "convert_misc_input: Number of k-points is inconsistent in oubwin file!" assert int(next(R)) == n_k, "convert_misc_input: Number of k-points is inconsistent in oubwin file!"
assert int(R.next()) == SO, "convert_misc_input: SO is inconsistent in oubwin file!" assert int(next(R)) == SO, "convert_misc_input: SO is inconsistent in oubwin file!"
for ik in xrange(n_k): for ik in range(n_k):
R.next() next(R)
band_window[isp][ik,0] = R.next() # lowest band band_window[isp][ik,0] = next(R) # lowest band
band_window[isp][ik,1] = R.next() # highest band band_window[isp][ik,1] = next(R) # highest band
R.next() next(R)
things_to_save.append('band_window') things_to_save.append('band_window')
R.close() # Reading done! R.close() # Reading done!

View File

@ -48,7 +48,7 @@ from types import *
import numpy import numpy
import math import math
from pytriqs.archive import * from pytriqs.archive import *
from converter_tools import * from .converter_tools import *
from itertools import product from itertools import product
import os.path import os.path
@ -125,19 +125,19 @@ class Wannier90Converter(ConverterTools):
# conversion # conversion
try: try:
# read k - point mesh generation option # read k - point mesh generation option
kmesh_mode = int(R.next()) kmesh_mode = int(next(R))
if kmesh_mode >= 0: if kmesh_mode >= 0:
# read k-point mesh size from input # read k-point mesh size from input
nki = [int(R.next()) for idir in range(3)] nki = [int(next(R)) for idir in range(3)]
else: else:
# some default grid, if everything else fails... # some default grid, if everything else fails...
nki = [8, 8, 8] nki = [8, 8, 8]
# read the total number of electrons per cell # read the total number of electrons per cell
density_required = float(R.next()) density_required = float(next(R))
# we do not read shells, because we have no additional shells beyond correlated ones, # we do not read shells, because we have no additional shells beyond correlated ones,
# and the data will be copied from corr_shells into shells (see below) # and the data will be copied from corr_shells into shells (see below)
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell, # number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
n_corr_shells = int(R.next()) n_corr_shells = int(next(R))
# now read the information about the correlated shells (atom, sort, # now read the information about the correlated shells (atom, sort,
# l, dim, SO flag, irep): # l, dim, SO flag, irep):
corr_shells = [{name: int(val) for name, val in zip( corr_shells = [{name: int(val) for name, val in zip(
@ -423,7 +423,7 @@ class Wannier90Converter(ConverterTools):
ir += 1 ir += 1
# for each direct lattice vector R read the block of the # for each direct lattice vector R read the block of the
# Hamiltonian H(R) # Hamiltonian H(R)
for ir, jj, ii in product(range(nrpt), range(num_wf), range(num_wf)): for ir, jj, ii in product(list(range(nrpt)), list(range(num_wf)), list(range(num_wf))):
# advance one line, split the line into tokens # advance one line, split the line into tokens
currpos += 1 currpos += 1
cline = hr_data[currpos].split() cline = hr_data[currpos].split()
@ -569,7 +569,7 @@ class Wannier90Converter(ConverterTools):
nkpt = msize[0] * msize[1] * msize[2] nkpt = msize[0] * msize[1] * msize[2]
kmesh = numpy.zeros((nkpt, 3), dtype=float) kmesh = numpy.zeros((nkpt, 3), dtype=float)
ii = 0 ii = 0
for ix, iy, iz in product(range(msize[0]), range(msize[1]), range(msize[2])): for ix, iy, iz in product(list(range(msize[0])), list(range(msize[1])), list(range(msize[2]))):
kmesh[ii, :] = [float(ix) / msize[0], float(iy) / kmesh[ii, :] = [float(ix) / msize[0], float(iy) /
msize[1], float(iz) / msize[2]] msize[1], float(iz) / msize[2]]
ii += 1 ii += 1
@ -601,8 +601,8 @@ class Wannier90Converter(ConverterTools):
twopi = 2 * numpy.pi twopi = 2 * numpy.pi
h_of_k = [numpy.zeros((norb, norb), dtype=numpy.complex_) h_of_k = [numpy.zeros((norb, norb), dtype=numpy.complex_)
for ik in range(self.n_k)] for ik in range(self.n_k)]
ridx = numpy.array(range(self.nrpt)) ridx = numpy.array(list(range(self.nrpt)))
for ik, ir in product(range(self.n_k), ridx): for ik, ir in product(list(range(self.n_k)), ridx):
rdotk = twopi * numpy.dot(self.k_mesh[ik], self.rvec[ir]) rdotk = twopi * numpy.dot(self.k_mesh[ik], self.rvec[ir])
factor = (math.cos(rdotk) + 1j * math.sin(rdotk)) / \ factor = (math.cos(rdotk) + 1j * math.sin(rdotk)) / \
float(self.rdeg[ir]) float(self.rdeg[ir])

View File

@ -23,7 +23,7 @@
from types import * from types import *
import numpy import numpy
from pytriqs.archive import * from pytriqs.archive import *
from converter_tools import * from .converter_tools import *
import os.path import os.path
@ -114,23 +114,23 @@ class Wien2kConverter(ConverterTools):
R = ConverterTools.read_fortran_file( R = ConverterTools.read_fortran_file(
self, self.dft_file, self.fortran_to_replace) self, self.dft_file, self.fortran_to_replace)
try: try:
energy_unit = R.next() # read the energy convertion factor energy_unit = next(R) # read the energy convertion factor
# read the number of k points # read the number of k points
n_k = int(R.next()) n_k = int(next(R))
k_dep_projection = 1 k_dep_projection = 1
# flag for spin-polarised calculation # flag for spin-polarised calculation
SP = int(R.next()) SP = int(next(R))
# flag for spin-orbit calculation # flag for spin-orbit calculation
SO = int(R.next()) SO = int(next(R))
charge_below = R.next() # total charge below energy window charge_below = next(R) # total charge below energy window
# total density required, for setting the chemical potential # total density required, for setting the chemical potential
density_required = R.next() density_required = next(R)
symm_op = 1 # Use symmetry groups for the k-sum symm_op = 1 # Use symmetry groups for the k-sum
# the information on the non-correlated shells is not important # the information on the non-correlated shells is not important
# here, maybe skip: # here, maybe skip:
# number of shells (e.g. Fe d, As p, O p) in the unit cell, # number of shells (e.g. Fe d, As p, O p) in the unit cell,
n_shells = int(R.next()) n_shells = int(next(R))
# corresponds to index R in formulas # corresponds to index R in formulas
# now read the information about the shells (atom, sort, l, dim): # now read the information about the shells (atom, sort, l, dim):
shell_entries = ['atom', 'sort', 'l', 'dim'] shell_entries = ['atom', 'sort', 'l', 'dim']
@ -138,7 +138,7 @@ class Wien2kConverter(ConverterTools):
shell_entries, R)} for ish in range(n_shells)] shell_entries, R)} for ish in range(n_shells)]
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell, # number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
n_corr_shells = int(R.next()) n_corr_shells = int(next(R))
# corresponds to index R in formulas # corresponds to index R in formulas
# now read the information about the shells (atom, sort, l, dim, SO # now read the information about the shells (atom, sort, l, dim, SO
# flag, irep): # flag, irep):
@ -161,14 +161,14 @@ class Wien2kConverter(ConverterTools):
for icrsh in range(n_corr_shells): for icrsh in range(n_corr_shells):
for i in range(corr_shells[icrsh]['dim']): # read real part: for i in range(corr_shells[icrsh]['dim']): # read real part:
for j in range(corr_shells[icrsh]['dim']): for j in range(corr_shells[icrsh]['dim']):
rot_mat[icrsh][i, j] = R.next() rot_mat[icrsh][i, j] = next(R)
# read imaginary part: # read imaginary part:
for i in range(corr_shells[icrsh]['dim']): for i in range(corr_shells[icrsh]['dim']):
for j in range(corr_shells[icrsh]['dim']): for j in range(corr_shells[icrsh]['dim']):
rot_mat[icrsh][i, j] += 1j * R.next() rot_mat[icrsh][i, j] += 1j * next(R)
if (SP == 1): # read time inversion flag: if (SP == 1): # read time inversion flag:
rot_mat_time_inv[icrsh] = int(R.next()) rot_mat_time_inv[icrsh] = int(next(R))
# Read here the info for the transformation of the basis: # Read here the info for the transformation of the basis:
n_reps = [1 for i in range(n_inequiv_shells)] n_reps = [1 for i in range(n_inequiv_shells)]
@ -176,8 +176,8 @@ class Wien2kConverter(ConverterTools):
T = [] T = []
for ish in range(n_inequiv_shells): for ish in range(n_inequiv_shells):
# number of representatives ("subsets"), e.g. t2g and eg # number of representatives ("subsets"), e.g. t2g and eg
n_reps[ish] = int(R.next()) n_reps[ish] = int(next(R))
dim_reps[ish] = [int(R.next()) for i in range( dim_reps[ish] = [int(next(R)) for i in range(
n_reps[ish])] # dimensions of the subsets n_reps[ish])] # dimensions of the subsets
# The transformation matrix: # The transformation matrix:
@ -189,10 +189,10 @@ class Wien2kConverter(ConverterTools):
# now read it from file: # now read it from file:
for i in range(lmax): for i in range(lmax):
for j in range(lmax): for j in range(lmax):
T[ish][i, j] = R.next() T[ish][i, j] = next(R)
for i in range(lmax): for i in range(lmax):
for j in range(lmax): for j in range(lmax):
T[ish][i, j] += 1j * R.next() T[ish][i, j] += 1j * next(R)
# Spin blocks to be read: # Spin blocks to be read:
n_spin_blocs = SP + 1 - SO n_spin_blocs = SP + 1 - SO
@ -201,7 +201,7 @@ class Wien2kConverter(ConverterTools):
n_orbitals = numpy.zeros([n_k, n_spin_blocs], numpy.int) n_orbitals = numpy.zeros([n_k, n_spin_blocs], numpy.int)
for isp in range(n_spin_blocs): for isp in range(n_spin_blocs):
for ik in range(n_k): for ik in range(n_k):
n_orbitals[ik, isp] = int(R.next()) n_orbitals[ik, isp] = int(next(R))
# Initialise the projectors: # Initialise the projectors:
proj_mat = numpy.zeros([n_k, n_spin_blocs, n_corr_shells, max( proj_mat = numpy.zeros([n_k, n_spin_blocs, n_corr_shells, max(
@ -216,12 +216,12 @@ class Wien2kConverter(ConverterTools):
for isp in range(n_spin_blocs): for isp in range(n_spin_blocs):
for i in range(n_orb): for i in range(n_orb):
for j in range(n_orbitals[ik][isp]): for j in range(n_orbitals[ik][isp]):
proj_mat[ik, isp, icrsh, i, j] = R.next() proj_mat[ik, isp, icrsh, i, j] = next(R)
# now Imag part: # now Imag part:
for isp in range(n_spin_blocs): for isp in range(n_spin_blocs):
for i in range(n_orb): for i in range(n_orb):
for j in range(n_orbitals[ik][isp]): for j in range(n_orbitals[ik][isp]):
proj_mat[ik, isp, icrsh, i, j] += 1j * R.next() proj_mat[ik, isp, icrsh, i, j] += 1j * next(R)
# now define the arrays for weights and hopping ... # now define the arrays for weights and hopping ...
# w(k_index), default normalisation # w(k_index), default normalisation
@ -231,7 +231,7 @@ class Wien2kConverter(ConverterTools):
# weights in the file # weights in the file
for ik in range(n_k): for ik in range(n_k):
bz_weights[ik] = R.next() bz_weights[ik] = next(R)
# if the sum over spins is in the weights, take it out again!! # if the sum over spins is in the weights, take it out again!!
sm = sum(bz_weights) sm = sum(bz_weights)
@ -244,7 +244,7 @@ class Wien2kConverter(ConverterTools):
for ik in range(n_k): for ik in range(n_k):
n_orb = n_orbitals[ik, isp] n_orb = n_orbitals[ik, isp]
for i in range(n_orb): for i in range(n_orb):
hopping[ik, isp, i, i] = R.next() * energy_unit hopping[ik, isp, i, i] = next(R) * energy_unit
# keep some things that we need for reading parproj: # keep some things that we need for reading parproj:
things_to_set = ['n_shells', 'shells', 'n_corr_shells', 'corr_shells', things_to_set = ['n_shells', 'shells', 'n_corr_shells', 'corr_shells',
@ -252,7 +252,7 @@ class Wien2kConverter(ConverterTools):
for it in things_to_set: for it in things_to_set:
setattr(self, it, locals()[it]) setattr(self, it, locals()[it])
except StopIteration: # a more explicit error if the file is corrupted. except StopIteration: # a more explicit error if the file is corrupted.
raise IOError, "Wien2k_converter : reading file %s failed!" % self.dft_file raise IOError("Wien2k_converter : reading file %s failed!" % self.dft_file)
R.close() R.close()
# Reading done! # Reading done!
@ -308,7 +308,7 @@ class Wien2kConverter(ConverterTools):
R = ConverterTools.read_fortran_file( R = ConverterTools.read_fortran_file(
self, self.parproj_file, self.fortran_to_replace) self, self.parproj_file, self.fortran_to_replace)
n_parproj = [int(R.next()) for i in range(self.n_shells)] n_parproj = [int(next(R)) for i in range(self.n_shells)]
n_parproj = numpy.array(n_parproj) n_parproj = numpy.array(n_parproj)
# Initialise P, here a double list of matrices: # Initialise P, here a double list of matrices:
@ -328,39 +328,39 @@ class Wien2kConverter(ConverterTools):
# read real part: # read real part:
for i in range(self.shells[ish]['dim']): for i in range(self.shells[ish]['dim']):
for j in range(self.n_orbitals[ik][isp]): for j in range(self.n_orbitals[ik][isp]):
proj_mat_all[ik, isp, ish, ir, i, j] = R.next() proj_mat_all[ik, isp, ish, ir, i, j] = next(R)
for isp in range(self.n_spin_blocs): for isp in range(self.n_spin_blocs):
# read imaginary part: # read imaginary part:
for i in range(self.shells[ish]['dim']): for i in range(self.shells[ish]['dim']):
for j in range(self.n_orbitals[ik][isp]): for j in range(self.n_orbitals[ik][isp]):
proj_mat_all[ik, isp, ish, proj_mat_all[ik, isp, ish,
ir, i, j] += 1j * R.next() ir, i, j] += 1j * next(R)
# now read the Density Matrix for this orbital below the energy # now read the Density Matrix for this orbital below the energy
# window: # window:
for isp in range(self.n_spin_blocs): for isp in range(self.n_spin_blocs):
for i in range(self.shells[ish]['dim']): # read real part: for i in range(self.shells[ish]['dim']): # read real part:
for j in range(self.shells[ish]['dim']): for j in range(self.shells[ish]['dim']):
dens_mat_below[isp][ish][i, j] = R.next() dens_mat_below[isp][ish][i, j] = next(R)
for isp in range(self.n_spin_blocs): for isp in range(self.n_spin_blocs):
# read imaginary part: # read imaginary part:
for i in range(self.shells[ish]['dim']): for i in range(self.shells[ish]['dim']):
for j in range(self.shells[ish]['dim']): for j in range(self.shells[ish]['dim']):
dens_mat_below[isp][ish][i, j] += 1j * R.next() dens_mat_below[isp][ish][i, j] += 1j * next(R)
if (self.SP == 0): if (self.SP == 0):
dens_mat_below[isp][ish] /= 2.0 dens_mat_below[isp][ish] /= 2.0
# Global -> local rotation matrix for this shell: # Global -> local rotation matrix for this shell:
for i in range(self.shells[ish]['dim']): # read real part: for i in range(self.shells[ish]['dim']): # read real part:
for j in range(self.shells[ish]['dim']): for j in range(self.shells[ish]['dim']):
rot_mat_all[ish][i, j] = R.next() rot_mat_all[ish][i, j] = next(R)
for i in range(self.shells[ish]['dim']): # read imaginary part: for i in range(self.shells[ish]['dim']): # read imaginary part:
for j in range(self.shells[ish]['dim']): for j in range(self.shells[ish]['dim']):
rot_mat_all[ish][i, j] += 1j * R.next() rot_mat_all[ish][i, j] += 1j * next(R)
if (self.SP): if (self.SP):
rot_mat_all_time_inv[ish] = int(R.next()) rot_mat_all_time_inv[ish] = int(next(R))
R.close() R.close()
# Reading done! # Reading done!
@ -404,13 +404,13 @@ class Wien2kConverter(ConverterTools):
mpi.report("Reading input from %s..." % self.band_file) mpi.report("Reading input from %s..." % self.band_file)
R = ConverterTools.read_fortran_file( R = ConverterTools.read_fortran_file(
self, self.band_file, self.fortran_to_replace) self, self.band_file, self.fortran_to_replace)
n_k = int(R.next()) n_k = int(next(R))
# read the list of n_orbitals for all k points # read the list of n_orbitals for all k points
n_orbitals = numpy.zeros([n_k, self.n_spin_blocs], numpy.int) n_orbitals = numpy.zeros([n_k, self.n_spin_blocs], numpy.int)
for isp in range(self.n_spin_blocs): for isp in range(self.n_spin_blocs):
for ik in range(n_k): for ik in range(n_k):
n_orbitals[ik, isp] = int(R.next()) n_orbitals[ik, isp] = int(next(R))
# Initialise the projectors: # Initialise the projectors:
proj_mat = numpy.zeros([n_k, self.n_spin_blocs, self.n_corr_shells, max( proj_mat = numpy.zeros([n_k, self.n_spin_blocs, self.n_corr_shells, max(
@ -425,12 +425,12 @@ class Wien2kConverter(ConverterTools):
for isp in range(self.n_spin_blocs): for isp in range(self.n_spin_blocs):
for i in range(n_orb): for i in range(n_orb):
for j in range(n_orbitals[ik, isp]): for j in range(n_orbitals[ik, isp]):
proj_mat[ik, isp, icrsh, i, j] = R.next() proj_mat[ik, isp, icrsh, i, j] = next(R)
# now Imag part: # now Imag part:
for isp in range(self.n_spin_blocs): for isp in range(self.n_spin_blocs):
for i in range(n_orb): for i in range(n_orb):
for j in range(n_orbitals[ik, isp]): for j in range(n_orbitals[ik, isp]):
proj_mat[ik, isp, icrsh, i, j] += 1j * R.next() proj_mat[ik, isp, icrsh, i, j] += 1j * next(R)
hopping = numpy.zeros([n_k, self.n_spin_blocs, numpy.max( hopping = numpy.zeros([n_k, self.n_spin_blocs, numpy.max(
n_orbitals), numpy.max(n_orbitals)], numpy.complex_) n_orbitals), numpy.max(n_orbitals)], numpy.complex_)
@ -441,10 +441,10 @@ class Wien2kConverter(ConverterTools):
for ik in range(n_k): for ik in range(n_k):
n_orb = n_orbitals[ik, isp] n_orb = n_orbitals[ik, isp]
for i in range(n_orb): for i in range(n_orb):
hopping[ik, isp, i, i] = R.next() * self.energy_unit hopping[ik, isp, i, i] = next(R) * self.energy_unit
# now read the partial projectors: # now read the partial projectors:
n_parproj = [int(R.next()) for i in range(self.n_shells)] n_parproj = [int(next(R)) for i in range(self.n_shells)]
n_parproj = numpy.array(n_parproj) n_parproj = numpy.array(n_parproj)
# Initialise P, here a double list of matrices: # Initialise P, here a double list of matrices:
@ -460,20 +460,20 @@ class Wien2kConverter(ConverterTools):
for i in range(self.shells[ish]['dim']): for i in range(self.shells[ish]['dim']):
for j in range(n_orbitals[ik, isp]): for j in range(n_orbitals[ik, isp]):
proj_mat_all[ik, isp, ish, proj_mat_all[ik, isp, ish,
ir, i, j] = R.next() ir, i, j] = next(R)
# read imaginary part: # read imaginary part:
for i in range(self.shells[ish]['dim']): for i in range(self.shells[ish]['dim']):
for j in range(n_orbitals[ik, isp]): for j in range(n_orbitals[ik, isp]):
proj_mat_all[ik, isp, ish, proj_mat_all[ik, isp, ish,
ir, i, j] += 1j * R.next() ir, i, j] += 1j * next(R)
R.close() R.close()
except KeyError: except KeyError:
raise IOError, "convert_bands_input : Needed data not found in hdf file. Consider calling convert_dft_input first!" raise IOError("convert_bands_input : Needed data not found in hdf file. Consider calling convert_dft_input first!")
except StopIteration: # a more explicit error if the file is corrupted. except StopIteration: # a more explicit error if the file is corrupted.
raise IOError, "Wien2k_converter : reading file %s failed!" % self.band_file raise IOError("Wien2k_converter : reading file %s failed!" % self.band_file)
# Reading done! # Reading done!
@ -507,7 +507,7 @@ class Wien2kConverter(ConverterTools):
# Check if SP, SO and n_k are already in h5 # Check if SP, SO and n_k are already in h5
with HDFArchive(self.hdf_file, 'r') as ar: with HDFArchive(self.hdf_file, 'r') as ar:
if not (self.dft_subgrp in ar): if not (self.dft_subgrp in ar):
raise IOError, "convert_misc_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp raise IOError("convert_misc_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp)
SP = ar[self.dft_subgrp]['SP'] SP = ar[self.dft_subgrp]['SP']
SO = ar[self.dft_subgrp]['SO'] SO = ar[self.dft_subgrp]['SO']
n_k = ar[self.dft_subgrp]['n_k'] n_k = ar[self.dft_subgrp]['n_k']
@ -539,19 +539,19 @@ class Wien2kConverter(ConverterTools):
mpi.report("Reading input from %s..." % f) mpi.report("Reading input from %s..." % f)
R = ConverterTools.read_fortran_file( R = ConverterTools.read_fortran_file(
self, f, self.fortran_to_replace) self, f, self.fortran_to_replace)
n_k_oubwin = int(R.next()) n_k_oubwin = int(next(R))
if (n_k_oubwin != n_k): if (n_k_oubwin != n_k):
mpi.report( mpi.report(
"convert_misc_input : WARNING : n_k in case.oubwin is different from n_k in case.klist") "convert_misc_input : WARNING : n_k in case.oubwin is different from n_k in case.klist")
assert int( assert int(
R.next()) == SO, "convert_misc_input: SO is inconsistent in oubwin file!" next(R)) == SO, "convert_misc_input: SO is inconsistent in oubwin file!"
band_window[isp] = numpy.zeros((n_k_oubwin, 2), dtype=int) band_window[isp] = numpy.zeros((n_k_oubwin, 2), dtype=int)
for ik in xrange(n_k_oubwin): for ik in range(n_k_oubwin):
R.next() next(R)
band_window[isp][ik, 0] = R.next() # lowest band band_window[isp][ik, 0] = next(R) # lowest band
band_window[isp][ik, 1] = R.next() # highest band band_window[isp][ik, 1] = next(R) # highest band
R.next() next(R)
things_to_save.append('band_window') things_to_save.append('band_window')
R.close() # Reading done! R.close() # Reading done!
@ -578,7 +578,7 @@ class Wien2kConverter(ConverterTools):
things_to_save.extend( things_to_save.extend(
['lattice_type', 'lattice_constants', 'lattice_angles']) ['lattice_type', 'lattice_constants', 'lattice_angles'])
except IOError: except IOError:
raise IOError, "convert_misc_input: reading file %s failed" % self.struct_file raise IOError("convert_misc_input: reading file %s failed" % self.struct_file)
# Read relevant data from .outputs file # Read relevant data from .outputs file
####################################### #######################################
@ -610,7 +610,7 @@ class Wien2kConverter(ConverterTools):
things_to_save.extend(['n_symmetries', 'rot_symmetries']) things_to_save.extend(['n_symmetries', 'rot_symmetries'])
things_to_save.append('rot_symmetries') things_to_save.append('rot_symmetries')
except IOError: except IOError:
raise IOError, "convert_misc_input: reading file %s failed" % self.outputs_file raise IOError("convert_misc_input: reading file %s failed" % self.outputs_file)
# Save it to the HDF: # Save it to the HDF:
with HDFArchive(self.hdf_file, 'a') as ar: with HDFArchive(self.hdf_file, 'a') as ar:
@ -635,7 +635,7 @@ class Wien2kConverter(ConverterTools):
# Check if SP, SO and n_k are already in h5 # Check if SP, SO and n_k are already in h5
with HDFArchive(self.hdf_file, 'r') as ar: with HDFArchive(self.hdf_file, 'r') as ar:
if not (self.dft_subgrp in ar): if not (self.dft_subgrp in ar):
raise IOError, "convert_transport_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp raise IOError("convert_transport_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp)
SP = ar[self.dft_subgrp]['SP'] SP = ar[self.dft_subgrp]['SP']
SO = ar[self.dft_subgrp]['SO'] SO = ar[self.dft_subgrp]['SO']
n_k = ar[self.dft_subgrp]['n_k'] n_k = ar[self.dft_subgrp]['n_k']
@ -665,20 +665,20 @@ class Wien2kConverter(ConverterTools):
band_window_optics = [] band_window_optics = []
for isp, f in enumerate(files): for isp, f in enumerate(files):
if not os.path.exists(f): if not os.path.exists(f):
raise IOError, "convert_transport_input: File %s does not exist" % f raise IOError("convert_transport_input: File %s does not exist" % f)
mpi.report("Reading input from %s..." % f) mpi.report("Reading input from %s..." % f)
R = ConverterTools.read_fortran_file( R = ConverterTools.read_fortran_file(
self, f, {'D': 'E', '(': '', ')': '', ',': ' '}) self, f, {'D': 'E', '(': '', ')': '', ',': ' '})
band_window_optics_isp = [] band_window_optics_isp = []
for ik in xrange(n_k): for ik in range(n_k):
R.next() next(R)
nu1 = int(R.next()) nu1 = int(next(R))
nu2 = int(R.next()) nu2 = int(next(R))
band_window_optics_isp.append((nu1, nu2)) band_window_optics_isp.append((nu1, nu2))
n_bands = nu2 - nu1 + 1 n_bands = nu2 - nu1 + 1
for _ in range(4): for _ in range(4):
R.next() next(R)
if n_bands <= 0: if n_bands <= 0:
velocity_xyz = numpy.zeros((1, 1, 3), dtype=complex) velocity_xyz = numpy.zeros((1, 1, 3), dtype=complex)
else: else:
@ -688,7 +688,7 @@ class Wien2kConverter(ConverterTools):
for nu_j in range(nu_i, n_bands): for nu_j in range(nu_i, n_bands):
for i in range(3): for i in range(3):
velocity_xyz[nu_i][nu_j][ velocity_xyz[nu_i][nu_j][
i] = R.next() + R.next() * 1j i] = next(R) + next(R) * 1j
if (nu_i != nu_j): if (nu_i != nu_j):
velocity_xyz[nu_j][nu_i][i] = velocity_xyz[ velocity_xyz[nu_j][nu_i][i] = velocity_xyz[
nu_i][nu_j][i].conjugate() nu_i][nu_j][i].conjugate()
@ -737,13 +737,13 @@ class Wien2kConverter(ConverterTools):
self, symm_file, self.fortran_to_replace) self, symm_file, self.fortran_to_replace)
try: try:
n_symm = int(R.next()) # Number of symmetry operations n_symm = int(next(R)) # Number of symmetry operations
n_atoms = int(R.next()) # number of atoms involved n_atoms = int(next(R)) # number of atoms involved
perm = [[int(R.next()) for i in range(n_atoms)] perm = [[int(next(R)) for i in range(n_atoms)]
for j in range(n_symm)] # list of permutations of the atoms for j in range(n_symm)] # list of permutations of the atoms
if SP: if SP:
# time inversion for SO coupling # time inversion for SO coupling
time_inv = [int(R.next()) for j in range(n_symm)] time_inv = [int(next(R)) for j in range(n_symm)]
else: else:
time_inv = [0 for j in range(n_symm)] time_inv = [0 for j in range(n_symm)]
@ -757,11 +757,11 @@ class Wien2kConverter(ConverterTools):
for i in range(orbits[orb]['dim']): for i in range(orbits[orb]['dim']):
for j in range(orbits[orb]['dim']): for j in range(orbits[orb]['dim']):
# real part # real part
mat[i_symm][orb][i, j] = R.next() mat[i_symm][orb][i, j] = next(R)
for i in range(orbits[orb]['dim']): for i in range(orbits[orb]['dim']):
for j in range(orbits[orb]['dim']): for j in range(orbits[orb]['dim']):
mat[i_symm][orb][i, j] += 1j * \ mat[i_symm][orb][i, j] += 1j * \
R.next() # imaginary part next(R) # imaginary part
mat_tinv = [numpy.identity(orbits[orb]['dim'], numpy.complex_) mat_tinv = [numpy.identity(orbits[orb]['dim'], numpy.complex_)
for orb in range(n_orbits)] for orb in range(n_orbits)]
@ -773,14 +773,14 @@ class Wien2kConverter(ConverterTools):
for i in range(orbits[orb]['dim']): for i in range(orbits[orb]['dim']):
for j in range(orbits[orb]['dim']): for j in range(orbits[orb]['dim']):
# real part # real part
mat_tinv[orb][i, j] = R.next() mat_tinv[orb][i, j] = next(R)
for i in range(orbits[orb]['dim']): for i in range(orbits[orb]['dim']):
for j in range(orbits[orb]['dim']): for j in range(orbits[orb]['dim']):
mat_tinv[orb][i, j] += 1j * \ mat_tinv[orb][i, j] += 1j * \
R.next() # imaginary part next(R) # imaginary part
except StopIteration: # a more explicit error if the file is corrupted. except StopIteration: # a more explicit error if the file is corrupted.
raise IOError, "Wien2k_converter : reading file %s failed!" %symm_file raise IOError("Wien2k_converter : reading file %s failed!" %symm_file)
R.close() R.close()
# Reading done! # Reading done!

View File

@ -27,8 +27,8 @@ from pytriqs.gf import *
import pytriqs.utility.mpi as mpi import pytriqs.utility.mpi as mpi
from pytriqs.utility.comparison_tests import assert_arrays_are_close from pytriqs.utility.comparison_tests import assert_arrays_are_close
from pytriqs.archive import * from pytriqs.archive import *
from symmetry import * from .symmetry import *
from block_structure import BlockStructure from .block_structure import BlockStructure
from sets import Set from sets import Set
from itertools import product from itertools import product
from warnings import warn from warnings import warn
@ -127,10 +127,10 @@ class SumkDFT(object):
# GF structure used for the local things in the k sums # GF structure used for the local things in the k sums
# Most general form allowing for all hybridisation, i.e. largest # Most general form allowing for all hybridisation, i.e. largest
# blocks possible # blocks possible
self.gf_struct_sumk = [[(sp, range(self.corr_shells[icrsh]['dim'])) for sp in self.spin_block_names[self.corr_shells[icrsh]['SO']]] self.gf_struct_sumk = [[(sp, list(range(self.corr_shells[icrsh]['dim']))) for sp in self.spin_block_names[self.corr_shells[icrsh]['SO']]]
for icrsh in range(self.n_corr_shells)] for icrsh in range(self.n_corr_shells)]
# First set a standard gf_struct solver: # First set a standard gf_struct solver:
self.gf_struct_solver = [dict([(sp, range(self.corr_shells[self.inequiv_to_corr[ish]]['dim'])) self.gf_struct_solver = [dict([(sp, list(range(self.corr_shells[self.inequiv_to_corr[ish]]['dim'])))
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]]) for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]])
for ish in range(self.n_inequiv_shells)] for ish in range(self.n_inequiv_shells)]
# Set standard (identity) maps from gf_struct_sumk <-> # Set standard (identity) maps from gf_struct_sumk <->
@ -273,7 +273,7 @@ class SumkDFT(object):
try: try:
list_to_return.append(ar[subgrp][it]) list_to_return.append(ar[subgrp][it])
except: except:
raise ValueError, "load: %s not found, and so not loaded." % it raise ValueError("load: %s not found, and so not loaded." % it)
return list_to_return return list_to_return
################ ################
@ -324,7 +324,7 @@ class SumkDFT(object):
projmat = self.proj_mat[ik, isp, ish, 0:dim, 0:n_orb] projmat = self.proj_mat[ik, isp, ish, 0:dim, 0:n_orb]
elif shells == 'all': elif shells == 'all':
if ir is None: if ir is None:
raise ValueError, "downfold: provide ir if treating all shells." raise ValueError("downfold: provide ir if treating all shells.")
dim = self.shells[ish]['dim'] dim = self.shells[ish]['dim']
projmat = self.proj_mat_all[ik, isp, ish, ir, 0:dim, 0:n_orb] projmat = self.proj_mat_all[ik, isp, ish, ir, 0:dim, 0:n_orb]
elif shells == 'csc': elif shells == 'csc':
@ -379,7 +379,7 @@ class SumkDFT(object):
projmat = self.proj_mat[ik, isp, ish, 0:dim, 0:n_orb] projmat = self.proj_mat[ik, isp, ish, 0:dim, 0:n_orb]
elif shells == 'all': elif shells == 'all':
if ir is None: if ir is None:
raise ValueError, "upfold: provide ir if treating all shells." raise ValueError("upfold: provide ir if treating all shells.")
dim = self.shells[ish]['dim'] dim = self.shells[ish]['dim']
projmat = self.proj_mat_all[ik, isp, ish, ir, 0:dim, 0:n_orb] projmat = self.proj_mat_all[ik, isp, ish, ir, 0:dim, 0:n_orb]
elif shells == 'csc': elif shells == 'csc':
@ -495,7 +495,7 @@ class SumkDFT(object):
ntoi = self.spin_names_to_ind[self.SO] ntoi = self.spin_names_to_ind[self.SO]
spn = self.spin_block_names[self.SO] spn = self.spin_block_names[self.SO]
if (iw_or_w != "iw") and (iw_or_w != "w"): if (iw_or_w != "iw") and (iw_or_w != "w"):
raise ValueError, "lattice_gf: Implemented only for Re/Im frequency functions." raise ValueError("lattice_gf: Implemented only for Re/Im frequency functions.")
if not hasattr(self, "Sigma_imp_" + iw_or_w): if not hasattr(self, "Sigma_imp_" + iw_or_w):
with_Sigma = False with_Sigma = False
if broadening is None: if broadening is None:
@ -521,12 +521,12 @@ class SumkDFT(object):
else: else:
if iw_or_w == "iw": if iw_or_w == "iw":
if beta is None: if beta is None:
raise ValueError, "lattice_gf: Give the beta for the lattice GfReFreq." raise ValueError("lattice_gf: Give the beta for the lattice GfReFreq.")
# Default number of Matsubara frequencies # Default number of Matsubara frequencies
mesh = MeshImFreq(beta=beta, S='Fermion', n_max=1025) mesh = MeshImFreq(beta=beta, S='Fermion', n_max=1025)
elif iw_or_w == "w": elif iw_or_w == "w":
if mesh is None: if mesh is None:
raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq." raise ValueError("lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq.")
mesh = MeshReFreq(mesh[0], mesh[1], mesh[2]) mesh = MeshReFreq(mesh[0], mesh[1], mesh[2])
# Check if G_latt is present # Check if G_latt is present
@ -547,7 +547,7 @@ class SumkDFT(object):
# Set up G_latt # Set up G_latt
if set_up_G_latt: if set_up_G_latt:
block_structure = [ block_structure = [
range(self.n_orbitals[ik, ntoi[sp]]) for sp in spn] list(range(self.n_orbitals[ik, ntoi[sp]])) for sp in spn]
gf_struct = [(spn[isp], block_structure[isp]) gf_struct = [(spn[isp], block_structure[isp])
for isp in range(self.n_spin_blocks[self.SO])] for isp in range(self.n_spin_blocks[self.SO])]
block_ind_list = [block for block, inner in gf_struct] block_ind_list = [block for block, inner in gf_struct]
@ -624,13 +624,13 @@ class SumkDFT(object):
SK_Sigma_imp = self.Sigma_imp_w SK_Sigma_imp = self.Sigma_imp_w
else: else:
raise ValueError, "put_Sigma: This type of Sigma is not handled." raise ValueError("put_Sigma: This type of Sigma is not handled.")
# transform the CTQMC blocks to the full matrix: # transform the CTQMC blocks to the full matrix:
for icrsh in range(self.n_corr_shells): for icrsh in range(self.n_corr_shells):
# ish is the index of the inequivalent shell corresponding to icrsh # ish is the index of the inequivalent shell corresponding to icrsh
ish = self.corr_to_inequiv[icrsh] ish = self.corr_to_inequiv[icrsh]
for block, inner in self.gf_struct_solver[ish].iteritems(): for block, inner in self.gf_struct_solver[ish].items():
for ind1 in inner: for ind1 in inner:
for ind2 in inner: for ind2 in inner:
block_sumk, ind1_sumk = self.solver_to_sumk[ block_sumk, ind1_sumk = self.solver_to_sumk[
@ -678,19 +678,19 @@ class SumkDFT(object):
G_loc = [self.Sigma_imp_iw[icrsh].copy() for icrsh in range( G_loc = [self.Sigma_imp_iw[icrsh].copy() for icrsh in range(
self.n_corr_shells)] # this list will be returned self.n_corr_shells)] # this list will be returned
beta = G_loc[0].mesh.beta beta = G_loc[0].mesh.beta
G_loc_inequiv = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, mesh=G_loc[0].mesh)) for block, inner in self.gf_struct_solver[ish].iteritems()], G_loc_inequiv = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, mesh=G_loc[0].mesh)) for block, inner in self.gf_struct_solver[ish].items()],
make_copies=False) for ish in range(self.n_inequiv_shells)] make_copies=False) for ish in range(self.n_inequiv_shells)]
elif iw_or_w == "w": elif iw_or_w == "w":
G_loc = [self.Sigma_imp_w[icrsh].copy() for icrsh in range( G_loc = [self.Sigma_imp_w[icrsh].copy() for icrsh in range(
self.n_corr_shells)] # this list will be returned self.n_corr_shells)] # this list will be returned
mesh = G_loc[0].mesh mesh = G_loc[0].mesh
G_loc_inequiv = [BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=mesh)) for block, inner in self.gf_struct_solver[ish].iteritems()], G_loc_inequiv = [BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=mesh)) for block, inner in self.gf_struct_solver[ish].items()],
make_copies=False) for ish in range(self.n_inequiv_shells)] make_copies=False) for ish in range(self.n_inequiv_shells)]
for icrsh in range(self.n_corr_shells): for icrsh in range(self.n_corr_shells):
G_loc[icrsh].zero() # initialize to zero G_loc[icrsh].zero() # initialize to zero
ikarray = numpy.array(range(self.n_k)) ikarray = numpy.array(list(range(self.n_k)))
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
if iw_or_w == 'iw': if iw_or_w == 'iw':
G_latt = self.lattice_gf( G_latt = self.lattice_gf(
@ -729,7 +729,7 @@ class SumkDFT(object):
# transform to CTQMC blocks: # transform to CTQMC blocks:
for ish in range(self.n_inequiv_shells): for ish in range(self.n_inequiv_shells):
for block, inner in self.gf_struct_solver[ish].iteritems(): for block, inner in self.gf_struct_solver[ish].items():
for ind1 in inner: for ind1 in inner:
for ind2 in inner: for ind2 in inner:
block_sumk, ind1_sumk = self.solver_to_sumk[ block_sumk, ind1_sumk = self.solver_to_sumk[
@ -782,7 +782,7 @@ class SumkDFT(object):
for ish in range(self.n_corr_shells)] for ish in range(self.n_corr_shells)]
if include_shells is None: if include_shells is None:
include_shells = range(self.n_inequiv_shells) include_shells = list(range(self.n_inequiv_shells))
for ish in include_shells: for ish in include_shells:
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]: for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]:
@ -815,7 +815,7 @@ class SumkDFT(object):
for i in range(num_blocs): for i in range(num_blocs):
blocs[i].sort() blocs[i].sort()
self.gf_struct_solver[ish].update( self.gf_struct_solver[ish].update(
[('%s_%s' % (sp, i), range(len(blocs[i])))]) [('%s_%s' % (sp, i), list(range(len(blocs[i]))))])
# Construct sumk_to_solver taking (sumk_block, sumk_index) --> (solver_block, solver_inner) # Construct sumk_to_solver taking (sumk_block, sumk_index) --> (solver_block, solver_inner)
# and solver_to_sumk taking (solver_block, solver_inner) --> # and solver_to_sumk taking (solver_block, solver_inner) -->
@ -834,7 +834,7 @@ class SumkDFT(object):
# Now calculate degeneracies of orbitals # Now calculate degeneracies of orbitals
dm = {} dm = {}
for block, inner in self.gf_struct_solver[ish].iteritems(): for block, inner in self.gf_struct_solver[ish].items():
# get dm for the blocks: # get dm for the blocks:
dm[block] = numpy.zeros( dm[block] = numpy.zeros(
[len(inner), len(inner)], numpy.complex_) [len(inner), len(inner)], numpy.complex_)
@ -847,8 +847,8 @@ class SumkDFT(object):
dm[block][ind1, ind2] = dens_mat[ish][ dm[block][ind1, ind2] = dens_mat[ish][
block_sumk][ind1_sumk, ind2_sumk] block_sumk][ind1_sumk, ind2_sumk]
for block1 in self.gf_struct_solver[ish].iterkeys(): for block1 in self.gf_struct_solver[ish].keys():
for block2 in self.gf_struct_solver[ish].iterkeys(): for block2 in self.gf_struct_solver[ish].keys():
if dm[block1].shape == dm[block2].shape: if dm[block1].shape == dm[block2].shape:
if ((abs(dm[block1] - dm[block2]) < threshold).all()) and (block1 != block2): if ((abs(dm[block1] - dm[block2]) < threshold).all()) and (block1 != block2):
ind1 = -1 ind1 = -1
@ -969,7 +969,7 @@ class SumkDFT(object):
if include_shells is None: if include_shells is None:
# include all shells # include all shells
include_shells = range(self.n_inequiv_shells) include_shells = list(range(self.n_inequiv_shells))
for ish in include_shells: for ish in include_shells:
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]: for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]:
@ -1002,7 +1002,7 @@ class SumkDFT(object):
for i in range(num_blocs): for i in range(num_blocs):
blocs[i].sort() blocs[i].sort()
self.gf_struct_solver[ish].update( self.gf_struct_solver[ish].update(
[('%s_%s' % (sp, i), range(len(blocs[i])))]) [('%s_%s' % (sp, i), list(range(len(blocs[i]))))])
# Construct sumk_to_solver taking (sumk_block, sumk_index) --> (solver_block, solver_inner) # Construct sumk_to_solver taking (sumk_block, sumk_index) --> (solver_block, solver_inner)
# and solver_to_sumk taking (solver_block, solver_inner) --> # and solver_to_sumk taking (solver_block, solver_inner) -->
@ -1021,7 +1021,7 @@ class SumkDFT(object):
# transform G to the new structure # transform G to the new structure
full_structure = BlockStructure.full_structure( full_structure = BlockStructure.full_structure(
[{sp:range(self.corr_shells[self.inequiv_to_corr[ish]]['dim']) [{sp:list(range(self.corr_shells[self.inequiv_to_corr[ish]]['dim']))
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]} for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]}
for ish in range(self.n_inequiv_shells)],None) for ish in range(self.n_inequiv_shells)],None)
G_transformed = [ G_transformed = [
@ -1073,7 +1073,7 @@ class SumkDFT(object):
if include_shells is None: if include_shells is None:
# include all shells # include all shells
include_shells = range(self.n_inequiv_shells) include_shells = list(range(self.n_inequiv_shells))
# We consider two blocks equal, if their Green's functions obey # We consider two blocks equal, if their Green's functions obey
# maybe_conjugate1( v1^dagger G1 v1 ) = maybe_conjugate2( v2^dagger G2 v2 ) # maybe_conjugate1( v1^dagger G1 v1 ) = maybe_conjugate2( v2^dagger G2 v2 )
@ -1086,8 +1086,8 @@ class SumkDFT(object):
# where our goal is to find T # where our goal is to find T
# we just try whether there is such a T with and without conjugation # we just try whether there is such a T with and without conjugation
for ish in include_shells: for ish in include_shells:
for block1 in self.gf_struct_solver[ish].iterkeys(): for block1 in self.gf_struct_solver[ish].keys():
for block2 in self.gf_struct_solver[ish].iterkeys(): for block2 in self.gf_struct_solver[ish].keys():
if block1==block2: continue if block1==block2: continue
# check if the blocks are already present in the deg_shells # check if the blocks are already present in the deg_shells
@ -1298,7 +1298,7 @@ class SumkDFT(object):
dens_mat[icrsh][sp] = numpy.zeros( dens_mat[icrsh][sp] = numpy.zeros(
[self.corr_shells[icrsh]['dim'], self.corr_shells[icrsh]['dim']], numpy.complex_) [self.corr_shells[icrsh]['dim'], self.corr_shells[icrsh]['dim']], numpy.complex_)
ikarray = numpy.array(range(self.n_k)) ikarray = numpy.array(list(range(self.n_k)))
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
if method == "using_gf": if method == "using_gf":
@ -1327,7 +1327,7 @@ class SumkDFT(object):
MMat[isp][inu, inu] = 0.0 MMat[isp][inu, inu] = 0.0
else: else:
raise ValueError, "density_matrix: the method '%s' is not supported." % method raise ValueError("density_matrix: the method '%s' is not supported." % method)
for icrsh in range(self.n_corr_shells): for icrsh in range(self.n_corr_shells):
for isp, sp in enumerate(self.spin_block_names[self.corr_shells[icrsh]['SO']]): for isp, sp in enumerate(self.spin_block_names[self.corr_shells[icrsh]['SO']]):
@ -1527,10 +1527,10 @@ class SumkDFT(object):
spn = self.spin_block_names[self.corr_shells[icrsh]['SO']] spn = self.spin_block_names[self.corr_shells[icrsh]['SO']]
Ncr = {sp: 0.0 for sp in spn} Ncr = {sp: 0.0 for sp in spn}
for block, inner in self.gf_struct_solver[ish].iteritems(): for block, inner in self.gf_struct_solver[ish].items():
bl = self.solver_to_sumk_block[ish][block] bl = self.solver_to_sumk_block[ish][block]
Ncr[bl] += dens_mat[block].real.trace() Ncr[bl] += dens_mat[block].real.trace()
Ncrtot = sum(Ncr.itervalues()) Ncrtot = sum(Ncr.values())
for sp in spn: for sp in spn:
self.dc_imp[icrsh][sp] = numpy.identity(dim, numpy.float_) self.dc_imp[icrsh][sp] = numpy.identity(dim, numpy.float_)
if self.SP == 0: # average the densities if there is no SP: if self.SP == 0: # average the densities if there is no SP:
@ -1543,7 +1543,7 @@ class SumkDFT(object):
if use_dc_value is None: if use_dc_value is None:
if U_interact is None and J_hund is None: if U_interact is None and J_hund is None:
raise ValueError, "set_dc: either provide U_interact and J_hund or set use_dc_value to dc value." raise ValueError("set_dc: either provide U_interact and J_hund or set use_dc_value to dc value.")
if use_dc_formula == 0: # FLL if use_dc_formula == 0: # FLL
@ -1733,7 +1733,7 @@ class SumkDFT(object):
if mu is None: if mu is None:
mu = self.chemical_potential mu = self.chemical_potential
dens = 0.0 dens = 0.0
ikarray = numpy.array(range(self.n_k)) ikarray = numpy.array(list(range(self.n_k)))
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
G_latt = self.lattice_gf( G_latt = self.lattice_gf(
ik=ik, mu=mu, iw_or_w=iw_or_w, with_Sigma=with_Sigma, with_dc=with_dc, broadening=broadening) ik=ik, mu=mu, iw_or_w=iw_or_w, with_Sigma=with_Sigma, with_dc=with_dc, broadening=broadening)
@ -1848,7 +1848,7 @@ class SumkDFT(object):
# Convert Fermi weights to a density matrix # Convert Fermi weights to a density matrix
dens_mat_dft = {} dens_mat_dft = {}
for sp in spn: for sp in spn:
dens_mat_dft[sp] = [fermi_weights[ik, ntoi[sp], :].astype(numpy.complex_) for ik in xrange(self.n_k)] dens_mat_dft[sp] = [fermi_weights[ik, ntoi[sp], :].astype(numpy.complex_) for ik in range(self.n_k)]
# Set up deltaN: # Set up deltaN:
@ -1857,7 +1857,7 @@ class SumkDFT(object):
deltaN[sp] = [numpy.zeros([self.n_orbitals[ik, ntoi[sp]], self.n_orbitals[ deltaN[sp] = [numpy.zeros([self.n_orbitals[ik, ntoi[sp]], self.n_orbitals[
ik, ntoi[sp]]], numpy.complex_) for ik in range(self.n_k)] ik, ntoi[sp]]], numpy.complex_) for ik in range(self.n_k)]
ikarray = numpy.array(range(self.n_k)) ikarray = numpy.array(list(range(self.n_k)))
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
G_latt_iw = self.lattice_gf( G_latt_iw = self.lattice_gf(
ik=ik, mu=self.chemical_potential, iw_or_w="iw") ik=ik, mu=self.chemical_potential, iw_or_w="iw")
@ -1946,7 +1946,7 @@ class SumkDFT(object):
to_write = {f: (0, 'up'), f1: (1, 'down')} to_write = {f: (0, 'up'), f1: (1, 'down')}
if self.SO == 1: if self.SO == 1:
to_write = {f: (0, 'ud'), f1: (0, 'ud')} to_write = {f: (0, 'ud'), f1: (0, 'ud')}
for fout in to_write.iterkeys(): for fout in to_write.keys():
isp, sp = to_write[fout] isp, sp = to_write[fout]
for ik in range(self.n_k): for ik in range(self.n_k):
fout.write("%s\n" % self.n_orbitals[ik, isp]) fout.write("%s\n" % self.n_orbitals[ik, isp])
@ -1963,12 +1963,12 @@ class SumkDFT(object):
if mpi.is_master_node(): if mpi.is_master_node():
with open(filename, 'w') as f: with open(filename, 'w') as f:
f.write(" %i -1 ! Number of k-points, default number of bands\n"%(self.n_k)) f.write(" %i -1 ! Number of k-points, default number of bands\n"%(self.n_k))
for ik in xrange(self.n_k): for ik in range(self.n_k):
ib1 = band_window[0][ik, 0] ib1 = band_window[0][ik, 0]
ib2 = band_window[0][ik, 1] ib2 = band_window[0][ik, 1]
f.write(" %i %i %i\n"%(ik + 1, ib1, ib2)) f.write(" %i %i %i\n"%(ik + 1, ib1, ib2))
for inu in xrange(self.n_orbitals[ik, 0]): for inu in range(self.n_orbitals[ik, 0]):
for imu in xrange(self.n_orbitals[ik, 0]): for imu in range(self.n_orbitals[ik, 0]):
valre = (deltaN['up'][ik][inu, imu].real + deltaN['down'][ik][inu, imu].real) / 2.0 valre = (deltaN['up'][ik][inu, imu].real + deltaN['down'][ik][inu, imu].real) / 2.0
valim = (deltaN['up'][ik][inu, imu].imag + deltaN['down'][ik][inu, imu].imag) / 2.0 valim = (deltaN['up'][ik][inu, imu].imag + deltaN['down'][ik][inu, imu].imag) / 2.0
f.write(" %.14f %.14f"%(valre, valim)) f.write(" %.14f %.14f"%(valre, valim))

View File

@ -23,8 +23,8 @@ from types import *
import numpy import numpy
from pytriqs.gf import * from pytriqs.gf import *
import pytriqs.utility.mpi as mpi import pytriqs.utility.mpi as mpi
from symmetry import * from .symmetry import *
from sumk_dft import SumkDFT from .sumk_dft import SumkDFT
from scipy.integrate import * from scipy.integrate import *
from scipy.interpolate import * from scipy.interpolate import *
@ -79,7 +79,7 @@ class SumkDFTTools(SumkDFT):
DOS projected to atoms and resolved into orbital contributions. DOS projected to atoms and resolved into orbital contributions.
""" """
if (mesh is None) and (not with_Sigma): if (mesh is None) and (not with_Sigma):
raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq." raise ValueError("lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq.")
if mesh is None: if mesh is None:
om_mesh = [x.real for x in self.Sigma_imp_w[0].mesh] om_mesh = [x.real for x in self.Sigma_imp_w[0].mesh]
om_min = om_mesh[0] om_min = om_mesh[0]
@ -111,7 +111,7 @@ class SumkDFTTools(SumkDFT):
DOSproj_orb[ish][sp] = numpy.zeros( DOSproj_orb[ish][sp] = numpy.zeros(
[n_om, dim, dim], numpy.complex_) [n_om, dim, dim], numpy.complex_)
ikarray = numpy.array(range(self.n_k)) ikarray = numpy.array(list(range(self.n_k)))
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
G_latt_w = self.lattice_gf( G_latt_w = self.lattice_gf(
@ -217,7 +217,7 @@ class SumkDFTTools(SumkDFT):
DOS projected to atoms and resolved into orbital contributions. DOS projected to atoms and resolved into orbital contributions.
""" """
if (mesh is None) and (not with_Sigma): if (mesh is None) and (not with_Sigma):
raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq." raise ValueError("lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq.")
if mesh is None: if mesh is None:
om_mesh = [x.real for x in self.Sigma_imp_w[0].mesh] om_mesh = [x.real for x in self.Sigma_imp_w[0].mesh]
om_min = om_mesh[0] om_min = om_mesh[0]
@ -229,12 +229,12 @@ class SumkDFTTools(SumkDFT):
om_mesh = numpy.linspace(om_min, om_max, n_om) om_mesh = numpy.linspace(om_min, om_max, n_om)
spn = self.spin_block_names[self.SO] spn = self.spin_block_names[self.SO]
gf_struct_parproj = [[(sp, range(self.shells[ish]['dim'])) for sp in spn] gf_struct_parproj = [[(sp, list(range(self.shells[ish]['dim']))) for sp in spn]
for ish in range(self.n_shells)] for ish in range(self.n_shells)]
#print(self.proj_mat_csc.shape[2]) #print(self.proj_mat_csc.shape[2])
#print(spn) #print(spn)
n_local_orbs = self.proj_mat_csc.shape[2] n_local_orbs = self.proj_mat_csc.shape[2]
gf_struct_parproj_all = [[(sp, range(n_local_orbs)) for sp in spn]] gf_struct_parproj_all = [[(sp, list(range(n_local_orbs))) for sp in spn]]
glist_all = [GfReFreq(indices=inner, window=(om_min, om_max), n_points=n_om) glist_all = [GfReFreq(indices=inner, window=(om_min, om_max), n_points=n_om)
for block, inner in gf_struct_parproj_all[0]] for block, inner in gf_struct_parproj_all[0]]
@ -251,7 +251,7 @@ class SumkDFTTools(SumkDFT):
DOSproj_orb[sp] = numpy.zeros( DOSproj_orb[sp] = numpy.zeros(
[n_om, dim, dim], numpy.complex_) [n_om, dim, dim], numpy.complex_)
ikarray = numpy.array(range(self.n_k)) ikarray = numpy.array(list(range(self.n_k)))
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
G_latt_w = self.lattice_gf( G_latt_w = self.lattice_gf(
@ -352,7 +352,7 @@ class SumkDFTTools(SumkDFT):
self.symmpar = Symmetry(self.hdf_file, subgroup=self.symmpar_data) self.symmpar = Symmetry(self.hdf_file, subgroup=self.symmpar_data)
if (mesh is None) and (not with_Sigma): if (mesh is None) and (not with_Sigma):
raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq." raise ValueError("lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq.")
if mesh is None: if mesh is None:
om_mesh = [x.real for x in self.Sigma_imp_w[0].mesh] om_mesh = [x.real for x in self.Sigma_imp_w[0].mesh]
om_min = om_mesh[0] om_min = om_mesh[0]
@ -365,7 +365,7 @@ class SumkDFTTools(SumkDFT):
G_loc = [] G_loc = []
spn = self.spin_block_names[self.SO] spn = self.spin_block_names[self.SO]
gf_struct_parproj = [[(sp, range(self.shells[ish]['dim'])) for sp in spn] gf_struct_parproj = [[(sp, list(range(self.shells[ish]['dim']))) for sp in spn]
for ish in range(self.n_shells)] for ish in range(self.n_shells)]
for ish in range(self.n_shells): for ish in range(self.n_shells):
glist = [GfReFreq(indices=inner, window=(om_min, om_max), n_points=n_om) glist = [GfReFreq(indices=inner, window=(om_min, om_max), n_points=n_om)
@ -386,7 +386,7 @@ class SumkDFTTools(SumkDFT):
DOSproj_orb[ish][sp] = numpy.zeros( DOSproj_orb[ish][sp] = numpy.zeros(
[n_om, dim, dim], numpy.complex_) [n_om, dim, dim], numpy.complex_)
ikarray = numpy.array(range(self.n_k)) ikarray = numpy.array(list(range(self.n_k)))
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
G_latt_w = self.lattice_gf( G_latt_w = self.lattice_gf(
@ -526,12 +526,12 @@ class SumkDFTTools(SumkDFT):
if not ishell is None: if not ishell is None:
gf_struct_parproj = [ gf_struct_parproj = [
(sp, range(self.shells[ishell]['dim'])) for sp in spn] (sp, list(range(self.shells[ishell]['dim']))) for sp in spn]
G_loc = BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=self.Sigma_imp_w[0].mesh)) G_loc = BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=self.Sigma_imp_w[0].mesh))
for block, inner in gf_struct_parproj], make_copies=False) for block, inner in gf_struct_parproj], make_copies=False)
G_loc.zero() G_loc.zero()
ikarray = numpy.array(range(self.n_k)) ikarray = numpy.array(list(range(self.n_k)))
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
G_latt_w = self.lattice_gf( G_latt_w = self.lattice_gf(
@ -653,7 +653,7 @@ class SumkDFTTools(SumkDFT):
for ish in range(self.n_shells)] for ish in range(self.n_shells)]
for isp in range(len(spn))] for isp in range(len(spn))]
# Set up G_loc # Set up G_loc
gf_struct_parproj = [[(sp, range(self.shells[ish]['dim'])) for sp in spn] gf_struct_parproj = [[(sp, list(range(self.shells[ish]['dim']))) for sp in spn]
for ish in range(self.n_shells)] for ish in range(self.n_shells)]
if with_Sigma: if with_Sigma:
G_loc = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, mesh=self.Sigma_imp_iw[0].mesh)) G_loc = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, mesh=self.Sigma_imp_iw[0].mesh))
@ -667,7 +667,7 @@ class SumkDFTTools(SumkDFT):
for ish in range(self.n_shells): for ish in range(self.n_shells):
G_loc[ish].zero() G_loc[ish].zero()
ikarray = numpy.array(range(self.n_k)) ikarray = numpy.array(list(range(self.n_k)))
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
G_latt_iw = self.lattice_gf( G_latt_iw = self.lattice_gf(
@ -828,10 +828,10 @@ class SumkDFTTools(SumkDFT):
if mpi.is_master_node(): if mpi.is_master_node():
ar = HDFArchive(self.hdf_file, 'r') ar = HDFArchive(self.hdf_file, 'r')
if not (self.transp_data in ar): if not (self.transp_data in ar):
raise IOError, "transport_distribution: No %s subgroup in hdf file found! Call convert_transp_input first." % self.transp_data raise IOError("transport_distribution: No %s subgroup in hdf file found! Call convert_transp_input first." % self.transp_data)
# check if outputs file was converted # check if outputs file was converted
if not ('n_symmetries' in ar['dft_misc_input']): if not ('n_symmetries' in ar['dft_misc_input']):
raise IOError, "transport_distribution: n_symmetries missing. Check if case.outputs file is present and call convert_misc_input() or convert_dft_input()." raise IOError("transport_distribution: n_symmetries missing. Check if case.outputs file is present and call convert_misc_input() or convert_dft_input().")
self.read_transport_input_from_hdf() self.read_transport_input_from_hdf()
@ -894,7 +894,7 @@ class SumkDFTTools(SumkDFT):
for i, g in self.Sigma_imp_w[icrsh]: for i, g in self.Sigma_imp_w[icrsh]:
for iL in g.indices[0]: for iL in g.indices[0]:
for iR in g.indices[0]: for iR in g.indices[0]:
for iom in xrange(n_om): for iom in range(n_om):
g.data[iom, int(iL), int(iR)] = Sigma_save[ g.data[iom, int(iL), int(iR)] = Sigma_save[
i].data[ioffset + iom, int(iL), int(iR)] i].data[ioffset + iom, int(iL), int(iR)]
else: else:
@ -913,18 +913,18 @@ class SumkDFTTools(SumkDFT):
self.Om_mesh = iOm_mesh * d_omega self.Om_mesh = iOm_mesh * d_omega
if mpi.is_master_node(): if mpi.is_master_node():
print "Chemical potential: ", mu print("Chemical potential: ", mu)
print "Using n_om = %s points in the energy_window [%s,%s]" % (n_om, self.omega[0], self.omega[-1]), print("Using n_om = %s points in the energy_window [%s,%s]" % (n_om, self.omega[0], self.omega[-1]), end=' ')
print "where the omega vector is:" print("where the omega vector is:")
print self.omega print(self.omega)
print "Calculation requested for Omega mesh: ", numpy.array(Om_mesh) print("Calculation requested for Omega mesh: ", numpy.array(Om_mesh))
print "Omega mesh automatically repined to: ", self.Om_mesh print("Omega mesh automatically repined to: ", self.Om_mesh)
self.Gamma_w = {direction: numpy.zeros( self.Gamma_w = {direction: numpy.zeros(
(len(self.Om_mesh), n_om), dtype=numpy.float_) for direction in self.directions} (len(self.Om_mesh), n_om), dtype=numpy.float_) for direction in self.directions}
# Sum over all k-points # Sum over all k-points
ikarray = numpy.array(range(self.n_k)) ikarray = numpy.array(list(range(self.n_k)))
for ik in mpi.slice_array(ikarray): for ik in mpi.slice_array(ikarray):
# Calculate G_w for ik and initialize A_kw # Calculate G_w for ik and initialize A_kw
G_w = self.lattice_gf(ik, mu, iw_or_w="w", beta=beta, G_w = self.lattice_gf(ik, mu, iw_or_w="w", beta=beta,
@ -938,7 +938,7 @@ class SumkDFTTools(SumkDFT):
A_kw[isp] = copy.deepcopy(G_w[self.spin_block_names[self.SO][ A_kw[isp] = copy.deepcopy(G_w[self.spin_block_names[self.SO][
isp]].data.swapaxes(0, 1).swapaxes(1, 2)) isp]].data.swapaxes(0, 1).swapaxes(1, 2))
# calculate A(k,w) for each frequency # calculate A(k,w) for each frequency
for iw in xrange(n_om): for iw in range(n_om):
A_kw[isp][:, :, iw] = -1.0 / (2.0 * numpy.pi * 1j) * ( A_kw[isp][:, :, iw] = -1.0 / (2.0 * numpy.pi * 1j) * (
A_kw[isp][:, :, iw] - numpy.conjugate(numpy.transpose(A_kw[isp][:, :, iw]))) A_kw[isp][:, :, iw] - numpy.conjugate(numpy.transpose(A_kw[isp][:, :, iw])))
@ -963,7 +963,7 @@ class SumkDFTTools(SumkDFT):
# calculate Gamma_w for each direction from the velocities # calculate Gamma_w for each direction from the velocities
# vel_R and the spectral function A_kw # vel_R and the spectral function A_kw
for direction in self.directions: for direction in self.directions:
for iw in xrange(n_om): for iw in range(n_om):
for iq in range(len(self.Om_mesh)): for iq in range(len(self.Om_mesh)):
if(iw + iOm_mesh[iq] >= n_om or self.omega[iw] < -self.Om_mesh[iq] + energy_window[0] or self.omega[iw] > self.Om_mesh[iq] + energy_window[1]): if(iw + iOm_mesh[iq] >= n_om or self.omega[iw] < -self.Om_mesh[iq] + energy_window[0] or self.omega[iw] > self.Om_mesh[iq] + energy_window[1]):
continue continue
@ -1033,7 +1033,7 @@ class SumkDFTTools(SumkDFT):
else: else:
# rectangular integration for w-grid (orignal implementation) # rectangular integration for w-grid (orignal implementation)
d_w = self.omega[1] - self.omega[0] d_w = self.omega[1] - self.omega[0]
for iw in xrange(self.Gamma_w[direction].shape[1]): for iw in range(self.Gamma_w[direction].shape[1]):
A += A_int[iw] * d_w A += A_int[iw] * d_w
A = A * numpy.pi * (2.0 - self.SP) A = A * numpy.pi * (2.0 - self.SP)
else: else:
@ -1083,16 +1083,16 @@ class SumkDFTTools(SumkDFT):
(n_q,), numpy.nan) for direction in self.directions} (n_q,), numpy.nan) for direction in self.directions}
for direction in self.directions: for direction in self.directions:
for iq in xrange(n_q): for iq in range(n_q):
A0[direction][iq] = self.transport_coefficient( A0[direction][iq] = self.transport_coefficient(
direction, iq=iq, n=0, beta=beta, method=method) direction, iq=iq, n=0, beta=beta, method=method)
A1[direction][iq] = self.transport_coefficient( A1[direction][iq] = self.transport_coefficient(
direction, iq=iq, n=1, beta=beta, method=method) direction, iq=iq, n=1, beta=beta, method=method)
A2[direction][iq] = self.transport_coefficient( A2[direction][iq] = self.transport_coefficient(
direction, iq=iq, n=2, beta=beta, method=method) direction, iq=iq, n=2, beta=beta, method=method)
print "A_0 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A0[direction][iq]) print("A_0 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A0[direction][iq]))
print "A_1 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A1[direction][iq]) print("A_1 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A1[direction][iq]))
print "A_2 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A2[direction][iq]) print("A_2 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A2[direction][iq]))
if ~numpy.isnan(A1[direction][iq]): if ~numpy.isnan(A1[direction][iq]):
# Seebeck and kappa are overwritten if there is more than one Omega = # Seebeck and kappa are overwritten if there is more than one Omega =
# 0 in Om_mesh # 0 in Om_mesh
@ -1102,11 +1102,11 @@ class SumkDFTTools(SumkDFT):
self.kappa[direction] *= 293178.0 self.kappa[direction] *= 293178.0
self.optic_cond[direction] = beta * \ self.optic_cond[direction] = beta * \
A0[direction] * 10700.0 / numpy.pi A0[direction] * 10700.0 / numpy.pi
for iq in xrange(n_q): for iq in range(n_q):
print "Conductivity in direction %s for Omega = %.2f %f x 10^4 Ohm^-1 cm^-1" % (direction, self.Om_mesh[iq], self.optic_cond[direction][iq]) print("Conductivity in direction %s for Omega = %.2f %f x 10^4 Ohm^-1 cm^-1" % (direction, self.Om_mesh[iq], self.optic_cond[direction][iq]))
if not (numpy.isnan(A1[direction][iq])): if not (numpy.isnan(A1[direction][iq])):
print "Seebeck in direction %s for Omega = 0.00 %f x 10^(-6) V/K" % (direction, self.seebeck[direction]) print("Seebeck in direction %s for Omega = 0.00 %f x 10^(-6) V/K" % (direction, self.seebeck[direction]))
print "kappa in direction %s for Omega = 0.00 %f W/(m * K)" % (direction, self.kappa[direction]) print("kappa in direction %s for Omega = 0.00 %f W/(m * K)" % (direction, self.kappa[direction]))
return self.optic_cond, self.seebeck, self.kappa return self.optic_cond, self.seebeck, self.kappa

View File

@ -113,7 +113,7 @@ class TransBasis:
# transform the CTQMC blocks to the full matrix: # transform the CTQMC blocks to the full matrix:
# ish is the index of the inequivalent shell corresponding to icrsh # ish is the index of the inequivalent shell corresponding to icrsh
ish = self.SK.corr_to_inequiv[0] ish = self.SK.corr_to_inequiv[0]
for block, inner in self.gf_struct_solver[ish].iteritems(): for block, inner in self.gf_struct_solver[ish].items():
for ind1 in inner: for ind1 in inner:
for ind2 in inner: for ind2 in inner:
gfrotated[self.SK.solver_to_sumk_block[ish][block]][ gfrotated[self.SK.solver_to_sumk_block[ish][block]][
@ -126,7 +126,7 @@ class TransBasis:
gfreturn = gf_to_rot.copy() gfreturn = gf_to_rot.copy()
# Put back into CTQMC basis: # Put back into CTQMC basis:
for block, inner in self.gf_struct_solver[ish].iteritems(): for block, inner in self.gf_struct_solver[ish].items():
for ind1 in inner: for ind1 in inner:
for ind2 in inner: for ind2 in inner:
gfreturn[block][ind1, ind2] << gfrotated[ gfreturn[block][ind1, ind2] << gfrotated[

View File

@ -5,15 +5,15 @@ import numpy
import subprocess import subprocess
if len(sys.argv) < 2: if len(sys.argv) < 2:
print "Usage: python update_archive.py old_archive [v1.0|v1.2]" print("Usage: python update_archive.py old_archive [v1.0|v1.2]")
sys.exit() sys.exit()
print """ print("""
This script is an attempt to update your archive to TRIQS 1.2. This script is an attempt to update your archive to TRIQS 1.2.
Please keep a copy of your old archive as this script is Please keep a copy of your old archive as this script is
** not guaranteed ** to work for your archive. ** not guaranteed ** to work for your archive.
If you encounter any problem please report it on github! If you encounter any problem please report it on github!
""" """)
def convert_shells(shells): def convert_shells(shells):
@ -63,26 +63,26 @@ A = h5py.File(filename)
old_to_new = {'SumK_LDA': 'dft_input', 'SumK_LDA_ParProj': 'dft_parproj_input', old_to_new = {'SumK_LDA': 'dft_input', 'SumK_LDA_ParProj': 'dft_parproj_input',
'SymmCorr': 'dft_symmcorr_input', 'SymmPar': 'dft_symmpar_input', 'SumK_LDA_Bands': 'dft_bands_input'} 'SymmCorr': 'dft_symmcorr_input', 'SymmPar': 'dft_symmpar_input', 'SumK_LDA_Bands': 'dft_bands_input'}
for old, new in old_to_new.iteritems(): for old, new in old_to_new.items():
if old not in A.keys(): if old not in list(A.keys()):
continue continue
print "Changing %s to %s ..." % (old, new) print("Changing %s to %s ..." % (old, new))
A.copy(old, new) A.copy(old, new)
del(A[old]) del(A[old])
# Move output items from dft_input to user_data # Move output items from dft_input to user_data
move_to_output = ['chemical_potential', 'dc_imp', 'dc_energ'] move_to_output = ['chemical_potential', 'dc_imp', 'dc_energ']
for obj in move_to_output: for obj in move_to_output:
if obj in A['dft_input'].keys(): if obj in list(A['dft_input'].keys()):
if 'user_data' not in A: if 'user_data' not in A:
A.create_group('user_data') A.create_group('user_data')
print "Moving %s to user_data ..." % obj print("Moving %s to user_data ..." % obj)
A.copy('dft_input/' + obj, 'user_data/' + obj) A.copy('dft_input/' + obj, 'user_data/' + obj)
del(A['dft_input'][obj]) del(A['dft_input'][obj])
# Delete obsolete quantities # Delete obsolete quantities
to_delete = ['gf_struct_solver', 'map_inv', 'map', 'deg_shells', 'h_field'] to_delete = ['gf_struct_solver', 'map_inv', 'map', 'deg_shells', 'h_field']
for obj in to_delete: for obj in to_delete:
if obj in A['dft_input'].keys(): if obj in list(A['dft_input'].keys()):
del(A['dft_input'][obj]) del(A['dft_input'][obj])
if from_v == 'v1.0': if from_v == 'v1.0':
@ -109,11 +109,11 @@ if 'n_inequiv_shells' not in A['dft_input']:
# Rename variables # Rename variables
groups = ['dft_symmcorr_input', 'dft_symmpar_input'] groups = ['dft_symmcorr_input', 'dft_symmpar_input']
for group in groups: for group in groups:
if group not in A.keys(): if group not in list(A.keys()):
continue continue
if 'n_s' not in A[group]: if 'n_s' not in A[group]:
continue continue
print "Changing n_s to n_symm ..." print("Changing n_s to n_symm ...")
A[group].move('n_s', 'n_symm') A[group].move('n_s', 'n_symm')
# Convert orbits to list of dicts # Convert orbits to list of dicts
orbits_old = HDFArchive(filename, 'r')[group]['orbits'] orbits_old = HDFArchive(filename, 'r')[group]['orbits']
@ -125,11 +125,11 @@ for group in groups:
groups = ['dft_parproj_input'] groups = ['dft_parproj_input']
for group in groups: for group in groups:
if group not in A.keys(): if group not in list(A.keys()):
continue continue
if 'proj_mat_pc' not in A[group]: if 'proj_mat_pc' not in A[group]:
continue continue
print "Changing proj_mat_pc to proj_mat_all ..." print("Changing proj_mat_pc to proj_mat_all ...")
A[group].move('proj_mat_pc', 'proj_mat_all') A[group].move('proj_mat_pc', 'proj_mat_all')
A.close() A.close()
@ -137,6 +137,6 @@ A.close()
# Repack to reclaim disk space # Repack to reclaim disk space
retcode = subprocess.call(["h5repack", "-i%s" % filename, "-otemphgfrt.h5"]) retcode = subprocess.call(["h5repack", "-i%s" % filename, "-otemphgfrt.h5"])
if retcode != 0: if retcode != 0:
print "h5repack failed!" print("h5repack failed!")
else: else:
subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % filename]) subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % filename])

View File

@ -24,7 +24,7 @@ triqs_hash = "@TRIQS_GIT_HASH@"
dft_tools_hash = "@DFT_TOOLS_GIT_HASH@" dft_tools_hash = "@DFT_TOOLS_GIT_HASH@"
def show_version(): def show_version():
print "\nYou are using the dft_tools version %s\n"%version print("\nYou are using the dft_tools version %s\n"%version)
def show_git_hash(): def show_git_hash():
print "\nYou are using the dft_tools git hash %s based on triqs git hash %s\n"%(dft_tools_hash, triqs_hash) print("\nYou are using the dft_tools git hash %s based on triqs git hash %s\n"%(dft_tools_hash, triqs_hash))

View File

@ -213,7 +213,7 @@ for conjugate in conjugate_values:
# first, construct the old format of the deg shells # first, construct the old format of the deg shells
for ish in range(len(SK.deg_shells)): for ish in range(len(SK.deg_shells)):
for gr in range(len(SK.deg_shells[ish])): for gr in range(len(SK.deg_shells[ish])):
SK.deg_shells[ish][gr] = SK.deg_shells[ish][gr].keys() SK.deg_shells[ish][gr] = list(SK.deg_shells[ish][gr].keys())
# symmetrizing the GF as is has to leave it unchanged # symmetrizing the GF as is has to leave it unchanged
G_new_symm << G_pre_transform G_new_symm << G_pre_transform

View File

@ -35,13 +35,13 @@ Hloc[8:,8:] = Hloc1
V = get_random_hermitian(2) # the hopping elements from impurity to bath V = get_random_hermitian(2) # the hopping elements from impurity to bath
b1 = np.random.rand() # the bath energy of the first bath level b1 = np.random.rand() # the bath energy of the first bath level
b2 = np.random.rand() # the bath energy of the second bath level b2 = np.random.rand() # the bath energy of the second bath level
delta = GfReFreq(window=(-10,10), indices=range(2), n_points=1001) delta = GfReFreq(window=(-10,10), indices=list(range(2)), n_points=1001)
delta[0,0] << (V[0,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[0,1].conjugate()*inverse(Omega-b2+0.02j))/2.0 delta[0,0] << (V[0,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[0,1].conjugate()*inverse(Omega-b2+0.02j))/2.0
delta[0,1] << (V[0,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[1,1].conjugate()*inverse(Omega-b2+0.02j))/2.0 delta[0,1] << (V[0,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[1,1].conjugate()*inverse(Omega-b2+0.02j))/2.0
delta[1,0] << (V[1,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[0,1].conjugate()*inverse(Omega-b2+0.02j))/2.0 delta[1,0] << (V[1,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[0,1].conjugate()*inverse(Omega-b2+0.02j))/2.0
delta[1,1] << (V[1,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[1,1].conjugate()*inverse(Omega-b2+0.02j))/2.0 delta[1,1] << (V[1,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[1,1].conjugate()*inverse(Omega-b2+0.02j))/2.0
# construct G # construct G
G = BlockGf(name_block_generator=[('ud',GfReFreq(window=(-10,10), indices=range(10), n_points=1001))], make_copies=False) G = BlockGf(name_block_generator=[('ud',GfReFreq(window=(-10,10), indices=list(range(10)), n_points=1001))], make_copies=False)
for i in range(0,10,2): for i in range(0,10,2):
G['ud'][i:i+2,i:i+2] << inverse(Omega-delta+0.02j) G['ud'][i:i+2,i:i+2] << inverse(Omega-delta+0.02j)
G['ud'] << inverse(inverse(G['ud']) - Hloc) G['ud'] << inverse(inverse(G['ud']) - Hloc)
@ -58,7 +58,7 @@ assert SK.gf_struct_sumk == [[('ud', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])], [('ud', [
"wrong gf_struct_sumk" "wrong gf_struct_sumk"
for i in range(5): for i in range(5):
assert 'ud_{}'.format(i) in SK.gf_struct_solver[0], "missing block" assert 'ud_{}'.format(i) in SK.gf_struct_solver[0], "missing block"
assert SK.gf_struct_solver[0]['ud_{}'.format(i)] == range(2), "wrong block size" assert SK.gf_struct_solver[0]['ud_{}'.format(i)] == list(range(2)), "wrong block size"
for i in range(10): for i in range(10):
assert SK.sumk_to_solver[0]['ud',i] == ('ud_{}'.format(i/2), i%2), "wrong mapping" assert SK.sumk_to_solver[0]['ud',i] == ('ud_{}'.format(i/2), i%2), "wrong mapping"
@ -101,7 +101,7 @@ assert SK.gf_struct_sumk == [[('ud', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])], [('ud', [
"wrong gf_struct_sumk" "wrong gf_struct_sumk"
for i in range(5): for i in range(5):
assert 'ud_{}'.format(i) in SK.gf_struct_solver[0], "missing block" assert 'ud_{}'.format(i) in SK.gf_struct_solver[0], "missing block"
assert SK.gf_struct_solver[0]['ud_{}'.format(i)] == range(2), "wrong block size" assert SK.gf_struct_solver[0]['ud_{}'.format(i)] == list(range(2)), "wrong block size"
for i in range(10): for i in range(10):
assert SK.sumk_to_solver[0]['ud',i] == ('ud_{}'.format(i/2), i%2), "wrong mapping" assert SK.sumk_to_solver[0]['ud',i] == ('ud_{}'.format(i/2), i%2), "wrong mapping"

View File

@ -44,8 +44,8 @@ class MyTestCase(unittest.TestCase):
# Make a diff # Make a diff
# #
# Remove empty lines # Remove empty lines
lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) lstr1 = [s for s in str1.splitlines(True) if s.strip() != '']
lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) lstr2 = [s for s in str2.splitlines(True) if s.strip() != '']
# diff # diff
delta = difflib.unified_diff(lstr1, lstr2) delta = difflib.unified_diff(lstr1, lstr2)
# combine delta's to a string # combine delta's to a string

View File

@ -45,8 +45,8 @@ class MyTestCase(unittest.TestCase):
# Make a diff # Make a diff
# #
# Remove empty lines # Remove empty lines
lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) lstr1 = [s for s in str1.splitlines(True) if s.strip() != '']
lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) lstr2 = [s for s in str2.splitlines(True) if s.strip() != '']
# diff # diff
delta = difflib.unified_diff(lstr1, lstr2) delta = difflib.unified_diff(lstr1, lstr2)
# combine delta's to a string # combine delta's to a string

View File

@ -2,10 +2,10 @@ r"""
Tests of 'parse_general()' defined in ConfigParameters class Tests of 'parse_general()' defined in ConfigParameters class
""" """
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import arraytest from . import arraytest
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters

View File

@ -2,10 +2,10 @@ r"""
Tests of 'parse_groups()' defined in ConfigParameters class Tests of 'parse_groups()' defined in ConfigParameters class
""" """
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import arraytest from . import arraytest
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
@ -30,7 +30,7 @@ class TestParseGroups(arraytest.ArrayTestCase):
def test_gr_required(self): def test_gr_required(self):
conf_pars = ConfigParameters(_rpath + 'parse_groups_1.cfg') conf_pars = ConfigParameters(_rpath + 'parse_groups_1.cfg')
err_mess = "Required parameter" err_mess = "Required parameter"
with self.assertRaisesRegexp(Exception, err_mess): with self.assertRaisesRegex(Exception, err_mess):
conf_pars.parse_groups() conf_pars.parse_groups()
# Scenario 2 # Scenario 2
@ -42,8 +42,8 @@ class TestParseGroups(arraytest.ArrayTestCase):
'normalize': True, 'normion': True,'complement': False}, 'normalize': True, 'normion': True,'complement': False},
{'index': 2, 'shells': [3], 'ewindow': (-1.6, 2.0), {'index': 2, 'shells': [3], 'ewindow': (-1.6, 2.0),
'normalize': True, 'normion': True,'complement': False}] 'normalize': True, 'normion': True,'complement': False}]
print res print(res)
print expected print(expected)
self.assertListEqual(res, expected) self.assertListEqual(res, expected)

View File

@ -2,10 +2,10 @@ r"""
Tests of 'parse_input()' defined in ConfigParameters class Tests of 'parse_input()' defined in ConfigParameters class
""" """
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import arraytest from . import arraytest
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
@ -38,28 +38,28 @@ class TestParseInput(arraytest.ArrayTestCase):
def test_no_group(self): def test_no_group(self):
conf_pars = ConfigParameters(_rpath + 'input_test_1.cfg') conf_pars = ConfigParameters(_rpath + 'input_test_1.cfg')
err_mess = "At least one group" err_mess = "At least one group"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
conf_pars.parse_input() conf_pars.parse_input()
# Scenario 2 # Scenario 2
def test_gr_required(self): def test_gr_required(self):
conf_pars = ConfigParameters(_rpath + 'input_test_2.cfg') conf_pars = ConfigParameters(_rpath + 'input_test_2.cfg')
err_mess = "One \[Shell\] section is" err_mess = "One \[Shell\] section is"
with self.assertRaisesRegexp(KeyError, err_mess): with self.assertRaisesRegex(KeyError, err_mess):
conf_pars.parse_input() conf_pars.parse_input()
# Scenario 3 # Scenario 3
def test_no_shell(self): def test_no_shell(self):
conf_pars = ConfigParameters(_rpath + 'input_test_3.cfg') conf_pars = ConfigParameters(_rpath + 'input_test_3.cfg')
err_mess = "Shell 3 referenced in" err_mess = "Shell 3 referenced in"
with self.assertRaisesRegexp(Exception, err_mess): with self.assertRaisesRegex(Exception, err_mess):
conf_pars.parse_input() conf_pars.parse_input()
# Scenario 4 # Scenario 4
def test_shell_outside_groups(self): def test_shell_outside_groups(self):
conf_pars = ConfigParameters(_rpath + 'input_test_4.cfg') conf_pars = ConfigParameters(_rpath + 'input_test_4.cfg')
err_mess = "Some shells are not inside" err_mess = "Some shells are not inside"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
conf_pars.parse_input() conf_pars.parse_input()
# Scenario 5 # Scenario 5

View File

@ -2,10 +2,10 @@ r"""
Tests of 'parse_parameter_set()' defined in ConfigParameters class Tests of 'parse_parameter_set()' defined in ConfigParameters class
""" """
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import arraytest from . import arraytest
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
@ -47,6 +47,6 @@ class TestParseParameterSet(arraytest.ArrayTestCase):
section = 'Shell 1' section = 'Shell 1'
param_set = self.cpars.sh_required # contains 'lshell' and 'ions' param_set = self.cpars.sh_required # contains 'lshell' and 'ions'
err_mess = "Required parameter" # .* in section [%s]"%(section) err_mess = "Required parameter" # .* in section [%s]"%(section)
with self.assertRaisesRegexp(Exception, err_mess): with self.assertRaisesRegex(Exception, err_mess):
self.cpars.parse_parameter_set(section, param_set, exception=True) self.cpars.parse_parameter_set(section, param_set, exception=True)

View File

@ -2,10 +2,10 @@ r"""
Tests of 'parse_shells()' defined in ConfigParameters class Tests of 'parse_shells()' defined in ConfigParameters class
""" """
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import arraytest from . import arraytest
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
@ -37,21 +37,21 @@ class TestParseShells(arraytest.ArrayTestCase):
def test_no_shell(self): def test_no_shell(self):
conf_pars = ConfigParameters(_rpath + 'parse_shells_1.cfg') conf_pars = ConfigParameters(_rpath + 'parse_shells_1.cfg')
err_mess = "No projected shells" err_mess = "No projected shells"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
conf_pars.parse_shells() conf_pars.parse_shells()
# Scenario 2 # Scenario 2
def test_bad_indices(self): def test_bad_indices(self):
conf_pars = ConfigParameters(_rpath + 'parse_shells_2.cfg') conf_pars = ConfigParameters(_rpath + 'parse_shells_2.cfg')
err_mess = "Failed to extract shell indices" err_mess = "Failed to extract shell indices"
with self.assertRaisesRegexp(ValueError, err_mess): with self.assertRaisesRegex(ValueError, err_mess):
conf_pars.parse_shells() conf_pars.parse_shells()
# Scenario 3 # Scenario 3
def test_sh_required(self): def test_sh_required(self):
conf_pars = ConfigParameters(_rpath + 'parse_shells_3.cfg') conf_pars = ConfigParameters(_rpath + 'parse_shells_3.cfg')
err_mess = "Required parameter" err_mess = "Required parameter"
with self.assertRaisesRegexp(Exception, err_mess): with self.assertRaisesRegex(Exception, err_mess):
conf_pars.parse_shells() conf_pars.parse_shells()
# Scenario 4 # Scenario 4

View File

@ -2,10 +2,10 @@ r"""
Tests of special parseres defined in ConfigParameters class Tests of special parseres defined in ConfigParameters class
""" """
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import arraytest from . import arraytest
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
@ -87,7 +87,7 @@ class TestParseStringIonList(arraytest.ArrayTestCase):
# Scenario 3 # Scenario 3
def test_out_of_bounds(self): def test_out_of_bounds(self):
err_mess = "Lowest ion index is" err_mess = "Lowest ion index is"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.cpars.parse_string_ion_list('0 1') self.cpars.parse_string_ion_list('0 1')
# Scenario 4 # Scenario 4
@ -99,7 +99,7 @@ class TestParseStringIonList(arraytest.ArrayTestCase):
# Scenario 5 # Scenario 5
def test_range_wrong_order(self): def test_range_wrong_order(self):
err_mess = "First index of the range" err_mess = "First index of the range"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.cpars.parse_string_ion_list('8..5') self.cpars.parse_string_ion_list('8..5')
# Scenario 6 # Scenario 6
@ -140,14 +140,14 @@ class TestParseStringTmatrix(arraytest.ArrayTestCase):
def test_number_of_columns(self): def test_number_of_columns(self):
par_str = "1.0 0.0\n1.0" par_str = "1.0 0.0\n1.0"
err_mess = "Number of columns" err_mess = "Number of columns"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.cpars.parse_string_tmatrix(par_str, real=True) self.cpars.parse_string_tmatrix(par_str, real=True)
# Scenario 2 # Scenario 2
def test_complex_matrix_odd(self): def test_complex_matrix_odd(self):
par_str = "1.0 0.0 2.0 1.0 0.0\n0.0 1.0 2.0 3.0 -1.0" par_str = "1.0 0.0 2.0 1.0 0.0\n0.0 1.0 2.0 3.0 -1.0"
err_mess = "Complex matrix must" err_mess = "Complex matrix must"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.cpars.parse_string_tmatrix(par_str, real=False) self.cpars.parse_string_tmatrix(par_str, real=False)
# Scenario 3 # Scenario 3
@ -192,13 +192,13 @@ class TestParseEnergyWindow(arraytest.ArrayTestCase):
# Scenario 2 # Scenario 2
def test_wrong_range(self): def test_wrong_range(self):
err_mess = "The first float in EWINDOW" err_mess = "The first float in EWINDOW"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.cpars.parse_energy_window('3.0 -1.5') self.cpars.parse_energy_window('3.0 -1.5')
# Scenario 3 # Scenario 3
def test_one_float(self): def test_one_float(self):
err_mess = "EWINDOW must be specified" err_mess = "EWINDOW must be specified"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.cpars.parse_energy_window('1.0') self.cpars.parse_energy_window('1.0')
# Scenario 4 # Scenario 4
@ -209,7 +209,7 @@ class TestParseEnergyWindow(arraytest.ArrayTestCase):
# Scenario 5 # Scenario 5
def test_three_floats(self): def test_three_floats(self):
err_mess = "EWINDOW must be specified" err_mess = "EWINDOW must be specified"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.cpars.parse_energy_window('1.5 3.0 2.0') self.cpars.parse_energy_window('1.5 3.0 2.0')
################################################################################ ################################################################################
@ -246,13 +246,13 @@ class TestParseBandWindow(arraytest.ArrayTestCase):
# Scenario 2 # Scenario 2
def test_wrong_range(self): def test_wrong_range(self):
err_mess = "The first int in BANDS" err_mess = "The first int in BANDS"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.cpars.parse_band_window('10 1') self.cpars.parse_band_window('10 1')
# Scenario 3 # Scenario 3
def test_one_float(self): def test_one_float(self):
err_mess = "BANDS must be specified" err_mess = "BANDS must be specified"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.cpars.parse_band_window('1') self.cpars.parse_band_window('1')
# Scenario 4 # Scenario 4
@ -263,7 +263,7 @@ class TestParseBandWindow(arraytest.ArrayTestCase):
# Scenario 5 # Scenario 5
def test_three_ints(self): def test_three_ints(self):
err_mess = "BANDS must be specified" err_mess = "BANDS must be specified"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.cpars.parse_band_window('1 2 3') self.cpars.parse_band_window('1 2 3')
################################################################################ ################################################################################
@ -345,7 +345,7 @@ class TestParseStringDosmesh(arraytest.ArrayTestCase):
# Scenario 3 # Scenario 3
def test_two_numbers(self): def test_two_numbers(self):
err_mess = "DOSMESH must be either" err_mess = "DOSMESH must be either"
with self.assertRaisesRegexp(ValueError, err_mess): with self.assertRaisesRegex(ValueError, err_mess):
self.cpars.parse_string_dosmesh('-8.0 101') self.cpars.parse_string_dosmesh('-8.0 101')
# Scenario 4 # Scenario 4

View File

@ -43,8 +43,8 @@ class MyTestCase(unittest.TestCase):
# Make a diff # Make a diff
# #
# Remove empty lines # Remove empty lines
lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) lstr1 = [s for s in str1.splitlines(True) if s.strip() != '']
lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) lstr2 = [s for s in str2.splitlines(True) if s.strip() != '']
# diff # diff
delta = difflib.unified_diff(lstr1, lstr2) delta = difflib.unified_diff(lstr1, lstr2)
# combine delta's to a string # combine delta's to a string

View File

@ -27,19 +27,19 @@ class TestFileIO(mytest.MyTestCase):
# Scenario 1 # Scenario 1
def test_no_plocar(self): def test_no_plocar(self):
err_mess = "Error opening xPLOCAR" err_mess = "Error opening xPLOCAR"
with self.assertRaisesRegexp(IOError, err_mess): with self.assertRaisesRegex(IOError, err_mess):
read_plocar('xPLOCAR') read_plocar('xPLOCAR')
# Scenario 2 # Scenario 2
def test_end_of_file(self): def test_end_of_file(self):
err_mess = "End-of-file reading" err_mess = "End-of-file reading"
with self.assertRaisesRegexp(IOError, err_mess): with self.assertRaisesRegex(IOError, err_mess):
read_plocar('PLOCAR.trunc') read_plocar('PLOCAR.trunc')
# Scenario 3 # Scenario 3
def test_wrong_prec(self): def test_wrong_prec(self):
err_mess = "only 'prec = 4, 8' are supported" err_mess = "only 'prec = 4, 8' are supported"
with self.assertRaisesRegexp(ValueError, err_mess): with self.assertRaisesRegex(ValueError, err_mess):
read_plocar('PLOCAR.noprec') read_plocar('PLOCAR.noprec')
# Scenario 4 # Scenario 4
@ -52,10 +52,10 @@ class TestFileIO(mytest.MyTestCase):
f.write(" nlm =%5i\n"%(nlm)) f.write(" nlm =%5i\n"%(nlm))
ion = 1 ion = 1
isp = 1 isp = 1
for ik in xrange(nk): for ik in range(nk):
for ib in xrange(nb): for ib in range(nb):
f.write("%5i%5i%5i%5i%10.5f\n"%(ion, isp, ik+1, ib+1, ferw[0, 0, ik, ib])) f.write("%5i%5i%5i%5i%10.5f\n"%(ion, isp, ik+1, ib+1, ferw[0, 0, ik, ib]))
for ilm in xrange(nlm): for ilm in range(nlm):
p = plo[0, 0, ik, ib, ilm] p = plo[0, 0, ik, ib, ilm]
f.write("%5i%15.7f%15.7f\n"%(ilm+1, p.real, p.imag)) f.write("%5i%15.7f%15.7f\n"%(ilm+1, p.real, p.imag))
@ -75,13 +75,13 @@ class TestFileIO(mytest.MyTestCase):
test_file = 'PLOCAR.example.out.test' test_file = 'PLOCAR.example.out.test'
with open(test_file, 'wt') as f: with open(test_file, 'wt') as f:
f.write("pars: %s\n"%(pars)) f.write("pars: %s\n"%(pars))
for ion in xrange(nion): for ion in range(nion):
for isp in xrange(ns): for isp in range(ns):
for ik in xrange(nk): for ik in range(nk):
for ib in xrange(nb): for ib in range(nb):
f.write("%5i%5i%5i%5i %s\n"%(ion+1, isp+1, ik+1, ib+1, f.write("%5i%5i%5i%5i %s\n"%(ion+1, isp+1, ik+1, ib+1,
ferw[ion, isp, ik, ib])) ferw[ion, isp, ik, ib]))
for ilm in xrange(nlm): for ilm in range(nlm):
p = plo[ion, isp, ik, ib, ilm] p = plo[ion, isp, ik, ib, ilm]
f.write("%5i %s\n"%(ilm+1, p)) f.write("%5i %s\n"%(ilm+1, p))

View File

@ -43,8 +43,8 @@ class MyTestCase(unittest.TestCase):
# Make a diff # Make a diff
# #
# Remove empty lines # Remove empty lines
lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) lstr1 = [s for s in str1.splitlines(True) if s.strip() != '']
lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) lstr2 = [s for s in str2.splitlines(True) if s.strip() != '']
# diff # diff
delta = difflib.unified_diff(lstr1, lstr2) delta = difflib.unified_diff(lstr1, lstr2)
# combine delta's to a string # combine delta's to a string

View File

@ -45,8 +45,8 @@ class MyTestCase(unittest.TestCase):
# Make a diff # Make a diff
# #
# Remove empty lines # Remove empty lines
lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) lstr1 = [s for s in str1.splitlines(True) if s.strip() != '']
lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) lstr2 = [s for s in str2.splitlines(True) if s.strip() != '']
# diff # diff
delta = difflib.unified_diff(lstr1, lstr2) delta = difflib.unified_diff(lstr1, lstr2)
# combine delta's to a string # combine delta's to a string

View File

@ -1,13 +1,13 @@
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell
from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup
import mytest from . import mytest
################################################################################ ################################################################################
# #
@ -30,19 +30,19 @@ class TestBlockMap(mytest.MyTestCase):
nproj = 16 nproj = 16
self.mock_plo = np.zeros((nproj, 1, 1, 11), dtype=np.complex128) self.mock_plo = np.zeros((nproj, 1, 1, 11), dtype=np.complex128)
self.mock_proj_params = [{} for i in xrange(nproj)] self.mock_proj_params = [{} for i in range(nproj)]
ip = 0 ip = 0
# Mock d-sites # Mock d-sites
for isite in xrange(2): for isite in range(2):
for im in xrange(5): for im in range(5):
self.mock_proj_params[ip]['label'] = 'd-orb' self.mock_proj_params[ip]['label'] = 'd-orb'
self.mock_proj_params[ip]['isite'] = isite + 1 self.mock_proj_params[ip]['isite'] = isite + 1
self.mock_proj_params[ip]['l'] = 2 self.mock_proj_params[ip]['l'] = 2
self.mock_proj_params[ip]['m'] = im self.mock_proj_params[ip]['m'] = im
ip += 1 ip += 1
# Mock p-sites # Mock p-sites
for isite in xrange(2, 4): for isite in range(2, 4):
for im in xrange(3): for im in range(3):
self.mock_proj_params[ip]['label'] = 'p-orb' self.mock_proj_params[ip]['label'] = 'p-orb'
self.mock_proj_params[ip]['isite'] = isite + 1 self.mock_proj_params[ip]['isite'] = isite + 1
self.mock_proj_params[ip]['l'] = 1 self.mock_proj_params[ip]['l'] = 1

View File

@ -1,6 +1,6 @@
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import numpy as np import numpy as np
@ -10,7 +10,7 @@ from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell
from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup
from pytriqs.archive import HDFArchive from pytriqs.archive import HDFArchive
import mytest from . import mytest
################################################################################ ################################################################################
# #

View File

@ -1,6 +1,6 @@
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import numpy as np import numpy as np
@ -10,7 +10,7 @@ from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell
from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup
from pytriqs.archive import HDFArchive from pytriqs.archive import HDFArchive
import mytest from . import mytest
################################################################################ ################################################################################
# #
@ -46,7 +46,7 @@ class TestProjectorGroupCompl(mytest.MyTestCase):
def test_num_bands(self): def test_num_bands(self):
self.pars.groups[0]['complement'] = True self.pars.groups[0]['complement'] = True
err_mess = "At each band the same number" err_mess = "At each band the same number"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
self.proj_gr = ProjectorGroup(self.pars.groups[0], [self.proj_sh], self.eigvals) self.proj_gr = ProjectorGroup(self.pars.groups[0], [self.proj_sh], self.eigvals)
def test_compl(self): def test_compl(self):
@ -66,9 +66,9 @@ class TestProjectorGroupCompl(mytest.MyTestCase):
_, ns, nk, _, _ = self.proj_gr.shells[0].proj_win.shape _, ns, nk, _, _ = self.proj_gr.shells[0].proj_win.shape
# Note that 'ns' and 'nk' are the same for all shells # Note that 'ns' and 'nk' are the same for all shells
for isp in xrange(ns): for isp in range(ns):
for ik in xrange(nk): for ik in range(nk):
print('ik',ik) print(('ik',ik))
bmin = self.proj_gr.ib_win[ik, isp, 0] bmin = self.proj_gr.ib_win[ik, isp, 0]
bmax = self.proj_gr.ib_win[ik, isp, 1]+1 bmax = self.proj_gr.ib_win[ik, isp, 1]+1

View File

@ -1,6 +1,6 @@
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import numpy as np import numpy as np
@ -9,7 +9,7 @@ from triqs_dft_tools.converters.plovasp.elstruct import ElectronicStructure
from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell
from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup
import mytest from . import mytest
################################################################################ ################################################################################
# #
@ -58,14 +58,14 @@ class TestSelectBands(mytest.MyTestCase):
def test_emin_too_large(self): def test_emin_too_large(self):
self.proj_gr.emin = 20.0 self.proj_gr.emin = 20.0
self.proj_gr.emax = 25.0 self.proj_gr.emax = 25.0
with self.assertRaisesRegexp(Exception, "No bands inside the window"): with self.assertRaisesRegex(Exception, "No bands inside the window"):
ib_win, nb_min, nb_max = self.proj_gr.select_bands(self.eigvals) ib_win, nb_min, nb_max = self.proj_gr.select_bands(self.eigvals)
# Scenario 3 # Scenario 3
def test_emax_too_small(self): def test_emax_too_small(self):
self.proj_gr.emin = -50.0 self.proj_gr.emin = -50.0
self.proj_gr.emax = -55.0 self.proj_gr.emax = -55.0
with self.assertRaisesRegexp(Exception, "Energy window does not overlap"): with self.assertRaisesRegex(Exception, "Energy window does not overlap"):
ib_win, nb_min, nb_max = self.proj_gr.select_bands(self.eigvals) ib_win, nb_min, nb_max = self.proj_gr.select_bands(self.eigvals)

View File

@ -1,6 +1,6 @@
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import numpy as np import numpy as np
@ -10,7 +10,7 @@ from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell
from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup
from pytriqs.archive import HDFArchive from pytriqs.archive import HDFArchive
import mytest from . import mytest
################################################################################ ################################################################################
# #

View File

@ -44,8 +44,8 @@ class MyTestCase(unittest.TestCase):
# Make a diff # Make a diff
# #
# Remove empty lines # Remove empty lines
lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) lstr1 = [s for s in str1.splitlines(True) if s.strip() != '']
lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) lstr2 = [s for s in str2.splitlines(True) if s.strip() != '']
# diff # diff
delta = difflib.unified_diff(lstr1, lstr2) delta = difflib.unified_diff(lstr1, lstr2)
# combine delta's to a string # combine delta's to a string

View File

@ -1,6 +1,6 @@
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import numpy as np import numpy as np
@ -9,7 +9,7 @@ from triqs_dft_tools.converters.plovasp.elstruct import ElectronicStructure
from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters
from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell
from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup
import mytest from . import mytest
################################################################################ ################################################################################
# #
@ -52,14 +52,14 @@ class TestProjectorShell(mytest.MyTestCase):
nion, ns, nk, nlm, nbtot = self.proj_sh.proj_win.shape nion, ns, nk, nlm, nbtot = self.proj_sh.proj_win.shape
with open(testout, 'wt') as f: with open(testout, 'wt') as f:
f.write("pars: %s\n"%(self.pars.shells[0])) f.write("pars: %s\n"%(self.pars.shells[0]))
for ion in xrange(nion): for ion in range(nion):
for isp in xrange(ns): for isp in range(ns):
for ik in xrange(nk): for ik in range(nk):
ib1 = self.proj_sh.ib_win[ik, 0, 0] ib1 = self.proj_sh.ib_win[ik, 0, 0]
ib2 = self.proj_sh.ib_win[ik, 0, 1] ib2 = self.proj_sh.ib_win[ik, 0, 1]
f.write("%i %i\n"%(ib1, ib2)) f.write("%i %i\n"%(ib1, ib2))
for ib in xrange(ib2 - ib1 + 1): for ib in range(ib2 - ib1 + 1):
for ilm in xrange(nlm): for ilm in range(nlm):
p = self.proj_sh.proj_win[ion, isp, ik, ilm, ib] p = self.proj_sh.proj_win[ion, isp, ik, ilm, ib]
f.write("%5i %f %f\n"%(ilm+1, p.real, p.imag)) f.write("%5i %f %f\n"%(ilm+1, p.real, p.imag))

View File

@ -27,8 +27,8 @@ if __name__ == '__main__':
if results.wasSuccessful(): if results.wasSuccessful():
raise SystemExit(0) raise SystemExit(0)
else: else:
print "Failed tests:" print("Failed tests:")
for failure in results.failures: for failure in results.failures:
print failure[0].__str__() print(failure[0].__str__())
raise SystemExit(1) raise SystemExit(1)

View File

@ -44,9 +44,9 @@ class MyTestCase(unittest.TestCase):
# Make a diff # Make a diff
# #
# Remove empty lines # Remove empty lines
lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) lstr1 = [s for s in str1.splitlines(True) if s.strip() != '']
lstr1 = [str1.replace(" ","") for str1 in lstr1] # Remove spaces lstr1 = [str1.replace(" ","") for str1 in lstr1] # Remove spaces
lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) lstr2 = [s for s in str2.splitlines(True) if s.strip() != '']
lstr2 = [str2.replace(" ","") for str2 in lstr2] # Remove spaces lstr2 = [str2.replace(" ","") for str2 in lstr2] # Remove spaces
# diff # diff
delta = difflib.unified_diff(lstr1, lstr2) delta = difflib.unified_diff(lstr1, lstr2)

View File

@ -2,10 +2,10 @@ r"""
Tests for class 'Doscar' from module 'vaspio' Tests for class 'Doscar' from module 'vaspio'
""" """
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import mytest from . import mytest
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.vaspio import Doscar from triqs_dft_tools.converters.plovasp.vaspio import Doscar

View File

@ -2,10 +2,10 @@ r"""
Tests for class 'Eigneval' from module 'vaspio' Tests for class 'Eigneval' from module 'vaspio'
""" """
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import mytest from . import mytest
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.vaspio import Eigenval from triqs_dft_tools.converters.plovasp.vaspio import Eigenval
@ -55,6 +55,6 @@ class TestEigenval(mytest.MyTestCase):
eigenval = Eigenval() eigenval = Eigenval()
err_mess = "EIGENVAL file is incorrect" err_mess = "EIGENVAL file is incorrect"
with self.assertRaisesRegexp(AssertionError, err_mess): with self.assertRaisesRegex(AssertionError, err_mess):
eigenval.from_file(vasp_dir=_rpath, eig_filename=filename) eigenval.from_file(vasp_dir=_rpath, eig_filename=filename)

View File

@ -2,10 +2,10 @@ r"""
Tests for class 'Ibzkpt' from module 'vaspio' Tests for class 'Ibzkpt' from module 'vaspio'
""" """
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import mytest from . import mytest
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.vaspio import Kpoints from triqs_dft_tools.converters.plovasp.vaspio import Kpoints

View File

@ -2,10 +2,10 @@ r"""
Tests for class 'Poscar' from module 'vaspio' Tests for class 'Poscar' from module 'vaspio'
""" """
import os import os
import rpath from . import rpath
_rpath = os.path.dirname(rpath.__file__) + '/' _rpath = os.path.dirname(rpath.__file__) + '/'
import mytest from . import mytest
import numpy as np import numpy as np
from triqs_dft_tools.converters.plovasp.vaspio import Poscar from triqs_dft_tools.converters.plovasp.vaspio import Poscar

View File

@ -42,7 +42,7 @@ for name, s in Sigma_hdf:
# Read self energy from txt files # Read self energy from txt files
SK = SumkDFTTools(hdf_file = 'SrVO3.h5', use_dft_blocks = True) SK = SumkDFTTools(hdf_file = 'SrVO3.h5', use_dft_blocks = True)
a_list = [a for a,al in SK.gf_struct_solver[0].iteritems()] a_list = [a for a,al in SK.gf_struct_solver[0].items()]
g_list = [read_gf_from_txt([['Sigma_' + a + '.dat']], a) for a in a_list] g_list = [read_gf_from_txt([['Sigma_' + a + '.dat']], a) for a in a_list]
Sigma_txt = BlockGf(name_list = a_list, block_list = g_list, make_copies=False) Sigma_txt = BlockGf(name_list = a_list, block_list = g_list, make_copies=False)