From 97d4e0b402fe218ae61ea17eb23980897ae45892 Mon Sep 17 00:00:00 2001 From: Nils Wentzell Date: Wed, 8 Apr 2020 15:35:59 -0400 Subject: [PATCH] [py3] Run 2to3 -w -n **/*.py **/*.py.in --- cmake/sitecustomize.py | 4 +- doc/conf.py.in | 4 +- doc/tutorials/images_scripts/Ce-gamma.py | 2 +- doc/tutorials/images_scripts/Ce-gamma_DOS.py | 2 +- .../images_scripts/NiO_local_lattice_GF.py | 6 +- .../images_scripts/dft_dmft_cthyb.py | 4 +- doc/tutorials/images_scripts/maxent.py | 4 +- doc/tutorials/images_scripts/nio.py | 2 +- doc/tutorials/images_scripts/nio_csc.py | 8 +- python/__init__.py | 10 +- python/block_structure.py | 28 +-- python/clear_h5_output.py | 8 +- python/converters/__init__.py | 8 +- python/converters/converter_tools.py | 4 +- python/converters/hk_converter.py | 24 +-- python/converters/plovasp/converter.py | 8 +- python/converters/plovasp/elstruct.py | 22 +-- python/converters/plovasp/inpconf.py | 66 +++---- python/converters/plovasp/plotools.py | 120 ++++++------ python/converters/plovasp/proj_group.py | 36 ++-- python/converters/plovasp/proj_shell.py | 50 ++--- python/converters/plovasp/sc_dmft.py | 42 ++--- python/converters/plovasp/vaspio.py | 174 +++++++++--------- python/converters/vasp_converter.py | 100 +++++----- python/converters/wannier90_converter.py | 18 +- python/converters/wien2k_converter.py | 134 +++++++------- python/sumk_dft.py | 80 ++++---- python/sumk_dft_tools.py | 72 ++++---- python/trans_basis.py | 4 +- python/update_archive.py | 28 +-- python/version.py.in | 4 +- test/analyse_block_structure_from_gf.py | 2 +- test/analyse_block_structure_from_gf2.py | 8 +- test/plovasp/atm/mytest.py | 4 +- test/plovasp/converter/mytest.py | 4 +- test/plovasp/inpconf/test_general.py | 4 +- test/plovasp/inpconf/test_groups.py | 10 +- test/plovasp/inpconf/test_input.py | 12 +- test/plovasp/inpconf/test_parameter_set.py | 6 +- test/plovasp/inpconf/test_shells.py | 10 +- test/plovasp/inpconf/test_special_parsers.py | 26 +-- test/plovasp/plocar_io/mytest.py | 4 +- test/plovasp/plocar_io/test_fileio.py | 22 +-- test/plovasp/plotools/mytest.py | 4 +- test/plovasp/proj_group/mytest.py | 4 +- test/plovasp/proj_group/test_block_map.py | 14 +- test/plovasp/proj_group/test_one_site.py | 4 +- .../plovasp/proj_group/test_one_site_compl.py | 12 +- test/plovasp/proj_group/test_select_bands.py | 8 +- test/plovasp/proj_group/test_two_site.py | 4 +- test/plovasp/proj_shell/mytest.py | 4 +- test/plovasp/proj_shell/test_projshells.py | 14 +- test/plovasp/run_suite.py | 4 +- test/plovasp/vaspio/mytest.py | 4 +- test/plovasp/vaspio/test_doscar.py | 4 +- test/plovasp/vaspio/test_eigenval.py | 6 +- test/plovasp/vaspio/test_kpoints.py | 4 +- test/plovasp/vaspio/test_poscar.py | 4 +- test/sigma_from_file.py | 2 +- 59 files changed, 642 insertions(+), 642 deletions(-) diff --git a/cmake/sitecustomize.py b/cmake/sitecustomize.py index 0f31ba93..d87e97e9 100644 --- a/cmake/sitecustomize.py +++ b/cmake/sitecustomize.py @@ -3,6 +3,6 @@ def application_pytriqs_import(name,*args,**kwargs): name = name[len('@package_name@')+1:] return builtin_import(name,*args,**kwargs) -import __builtin__ -__builtin__.__import__, builtin_import = application_pytriqs_import, __builtin__.__import__ +import builtins +builtins.__import__, builtin_import = application_pytriqs_import, builtins.__import__ diff --git a/doc/conf.py.in b/doc/conf.py.in index 90497936..111490f5 100644 --- a/doc/conf.py.in +++ b/doc/conf.py.in @@ -22,8 +22,8 @@ extensions = ['sphinx.ext.autodoc', source_suffix = '.rst' -project = u'TRIQS DFTTools' -copyright = u'2011-2019' +project = 'TRIQS DFTTools' +copyright = '2011-2019' version = '@DFT_TOOLS_VERSION@' mathjax_path = "@TRIQS_MATHJAX_PATH@/MathJax.js?config=default" diff --git a/doc/tutorials/images_scripts/Ce-gamma.py b/doc/tutorials/images_scripts/Ce-gamma.py index 258ca52a..92f4061a 100644 --- a/doc/tutorials/images_scripts/Ce-gamma.py +++ b/doc/tutorials/images_scripts/Ce-gamma.py @@ -111,7 +111,7 @@ for iteration_number in range(1,Loops+1): #Save essential SumkDFT data: SK.save(['chemical_potential','dc_imp','dc_energ','correnerg']) if (mpi.is_master_node()): - print 'DC after solver: ',SK.dc_imp[0] + print('DC after solver: ',SK.dc_imp[0]) # print out occupancy matrix of Ce 4f mpi.report("Orbital densities of impurity Green function:") diff --git a/doc/tutorials/images_scripts/Ce-gamma_DOS.py b/doc/tutorials/images_scripts/Ce-gamma_DOS.py index c96d756f..5f4e8a9c 100644 --- a/doc/tutorials/images_scripts/Ce-gamma_DOS.py +++ b/doc/tutorials/images_scripts/Ce-gamma_DOS.py @@ -31,7 +31,7 @@ SK.dc_imp = mpi.bcast(SK.dc_imp) SK.dc_energ = mpi.bcast(SK.dc_energ) if (mpi.is_master_node()): - print 'DC after reading SK: ',SK.dc_imp[0] + print('DC after reading SK: ',SK.dc_imp[0]) N = SK.corr_shells[0]['dim'] l = SK.corr_shells[0]['l'] diff --git a/doc/tutorials/images_scripts/NiO_local_lattice_GF.py b/doc/tutorials/images_scripts/NiO_local_lattice_GF.py index 21cf2eb6..7e60c230 100644 --- a/doc/tutorials/images_scripts/NiO_local_lattice_GF.py +++ b/doc/tutorials/images_scripts/NiO_local_lattice_GF.py @@ -39,7 +39,7 @@ if mpi.is_master_node(): if not 'Iterations' in ar['DMFT_results']: ar['DMFT_results'].create_group('Iterations') if 'iteration_count' in ar['DMFT_results']: iteration_offset = ar['DMFT_results']['iteration_count']+1 - print('offset',iteration_offset) + print(('offset',iteration_offset)) Sigma_iw = ar['DMFT_results']['Iterations']['Sigma_it'+str(iteration_offset-1)] SK.dc_imp = ar['DMFT_results']['Iterations']['dc_imp'+str(iteration_offset-1)] SK.dc_energ = ar['DMFT_results']['Iterations']['dc_energ'+str(iteration_offset-1)] @@ -54,13 +54,13 @@ SK.chemical_potential = mpi.bcast(SK.chemical_potential) SK.put_Sigma(Sigma_imp = [Sigma_iw]) -ikarray = numpy.array(range(SK.n_k)) +ikarray = numpy.array(list(range(SK.n_k))) # set up the orbitally resolved local lattice greens function: n_orbs = SK.proj_mat_csc.shape[2] spn = SK.spin_block_names[SK.SO] mesh = Sigma_iw.mesh -block_structure = [range(n_orbs) for sp in spn] +block_structure = [list(range(n_orbs)) for sp in spn] gf_struct = [(spn[isp], block_structure[isp]) for isp in range(SK.n_spin_blocks[SK.SO])] block_ind_list = [block for block, inner in gf_struct] diff --git a/doc/tutorials/images_scripts/dft_dmft_cthyb.py b/doc/tutorials/images_scripts/dft_dmft_cthyb.py index 4d6d9dc9..fff68b91 100644 --- a/doc/tutorials/images_scripts/dft_dmft_cthyb.py +++ b/doc/tutorials/images_scripts/dft_dmft_cthyb.py @@ -76,7 +76,7 @@ spin_names = ["up","down"] orb_names = [i for i in range(n_orb)] # Use GF structure determined by DFT blocks -gf_struct = [(block, indices) for block, indices in SK.gf_struct_solver[0].iteritems()] +gf_struct = [(block, indices) for block, indices in SK.gf_struct_solver[0].items()] # Construct Solver S = Solver(beta=beta, gf_struct=gf_struct) @@ -97,7 +97,7 @@ if previous_present: SK.set_dc(dc_imp,dc_energ) for iteration_number in range(1,loops+1): - if mpi.is_master_node(): print "Iteration = ", iteration_number + if mpi.is_master_node(): print("Iteration = ", iteration_number) SK.symm_deg_gf(S.Sigma_iw,orb=0) # symmetrise Sigma SK.set_Sigma([ S.Sigma_iw ]) # set Sigma into the SumK class diff --git a/doc/tutorials/images_scripts/maxent.py b/doc/tutorials/images_scripts/maxent.py index 841d8067..9a5d0089 100644 --- a/doc/tutorials/images_scripts/maxent.py +++ b/doc/tutorials/images_scripts/maxent.py @@ -12,7 +12,7 @@ if 'iteration_count' in ar['DMFT_results']: tm = TauMaxEnt(cost_function='bryan', probability='normal') -print(G_latt['up'][0,0]) +print((G_latt['up'][0,0])) t2g_orbs = [0,1,3] eg_orbs = [2,4] op_orbs = [5,6,7] @@ -22,7 +22,7 @@ orbs = [t2g_orbs, eg_orbs, op_orbs] for orb in orbs: - print '\n'+str(orb[0])+'\n' + print('\n'+str(orb[0])+'\n') gf = 0*G_latt['up'][0,0] for iO in orb: diff --git a/doc/tutorials/images_scripts/nio.py b/doc/tutorials/images_scripts/nio.py index 0195e797..f064a931 100644 --- a/doc/tutorials/images_scripts/nio.py +++ b/doc/tutorials/images_scripts/nio.py @@ -30,7 +30,7 @@ for i_sh in range(len(SK.deg_shells)): mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(num_block_deg_orbs, i_sh)) for iblock in range(num_block_deg_orbs): mpi.report('block {0:d} consists of orbitals:'.format(iblock)) - for keys in SK.deg_shells[i_sh][iblock].keys(): + for keys in list(SK.deg_shells[i_sh][iblock].keys()): mpi.report(' '+keys) # Setup CTQMC Solver diff --git a/doc/tutorials/images_scripts/nio_csc.py b/doc/tutorials/images_scripts/nio_csc.py index 75f2c2fa..61003480 100644 --- a/doc/tutorials/images_scripts/nio_csc.py +++ b/doc/tutorials/images_scripts/nio_csc.py @@ -37,7 +37,7 @@ def dmft_cycle(): mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(num_block_deg_orbs, i_sh)) for iblock in range(num_block_deg_orbs): mpi.report('block {0:d} consists of orbitals:'.format(iblock)) - for keys in SK.deg_shells[i_sh][iblock].keys(): + for keys in list(SK.deg_shells[i_sh][iblock].keys()): mpi.report(' '+keys) # Setup CTQMC Solver @@ -176,15 +176,15 @@ def dmft_cycle(): if mpi.is_master_node(): - print 'calculating mu...' + print('calculating mu...') SK.chemical_potential = SK.calc_mu( precision = 0.000001 ) if mpi.is_master_node(): - print 'calculating GAMMA' + print('calculating GAMMA') SK.calc_density_correction(dm_type='vasp') if mpi.is_master_node(): - print 'calculating energy corrections' + print('calculating energy corrections') correnerg = 0.5 * (S.G_iw * S.Sigma_iw).total_density() diff --git a/python/__init__.py b/python/__init__.py index 137355ae..380c1ed9 100644 --- a/python/__init__.py +++ b/python/__init__.py @@ -20,11 +20,11 @@ # ########################################################################## -from sumk_dft import SumkDFT -from symmetry import Symmetry -from block_structure import BlockStructure -from sumk_dft_tools import SumkDFTTools -from converters import * +from .sumk_dft import SumkDFT +from .symmetry import Symmetry +from .block_structure import BlockStructure +from .sumk_dft_tools import SumkDFTTools +from .converters import * __all__ = ['SumkDFT', 'Symmetry', 'SumkDFTTools', 'Wien2kConverter', 'HkConverter','BlockStructure'] diff --git a/python/block_structure.py b/python/block_structure.py index f1501409..442e190b 100644 --- a/python/block_structure.py +++ b/python/block_structure.py @@ -145,14 +145,14 @@ class BlockStructure(object): # create new solver_to_sumk so2su={} so2su_block = {} - for blk,idxs in gf_struct.items(): + for blk,idxs in list(gf_struct.items()): for i in range(len(idxs)): so2su[(blk,i)]=self.solver_to_sumk[ish][(blk,idxs[i])] so2su_block[blk]=so2su[(blk,i)][0] self.solver_to_sumk[ish] = so2su self.solver_to_sumk_block[ish] = so2su_block # create new sumk_to_solver - for k,v in self.sumk_to_solver[ish].items(): + for k,v in list(self.sumk_to_solver[ish].items()): blk,ind=v if blk in gf_struct and ind in gf_struct[blk]: new_ind = gf_struct[blk].index(ind) @@ -161,7 +161,7 @@ class BlockStructure(object): self.sumk_to_solver[ish][k]=(None,None) # reindexing gf_struct so that it starts with 0 for k in gf_struct: - gf_struct[k]=range(len(gf_struct[k])) + gf_struct[k]=list(range(len(gf_struct[k]))) self.gf_struct_solver[ish]=gf_struct def pick_gf_struct_sumk(self,new_gf_struct): @@ -207,7 +207,7 @@ class BlockStructure(object): # mapping for ish in range(len(new_gf_struct)): gfs.append({}) - for block in new_gf_struct[ish].keys(): + for block in list(new_gf_struct[ish].keys()): for ind in new_gf_struct[ish][block]: ind_sol = self.sumk_to_solver[ish][(block,ind)] if not ind_sol[0] in gfs[ish]: @@ -232,7 +232,7 @@ class BlockStructure(object): so2su = {} su2so = {} so2su_block = {} - for frm,to in mapping[ish].iteritems(): + for frm,to in mapping[ish].items(): if not to[0] in gf_struct: gf_struct[to[0]]=[] gf_struct[to[0]].append(to[1]) @@ -247,7 +247,7 @@ class BlockStructure(object): else: so2su_block[to[0]]=\ self.solver_to_sumk_block[ish][frm[0]] - for k in self.sumk_to_solver[ish].keys(): + for k in list(self.sumk_to_solver[ish].keys()): if not k in su2so: su2so[k] = (None,None) self.gf_struct_solver[ish]=gf_struct @@ -273,7 +273,7 @@ class BlockStructure(object): blocks """ - names = self.gf_struct_solver[ish].keys() + names = list(self.gf_struct_solver[ish].keys()) blocks=[] for n in names: G = gf_function(indices=self.gf_struct_solver[ish][n],**kwargs) @@ -315,7 +315,7 @@ class BlockStructure(object): show_warnings = True G_new = self.create_gf(ish=ish,**kwargs) - for block in G_struct.gf_struct_solver[ish].keys(): + for block in list(G_struct.gf_struct_solver[ish].keys()): for i1 in G_struct.gf_struct_solver[ish][block]: for i2 in G_struct.gf_struct_solver[ish][block]: i1_sumk = G_struct.solver_to_sumk[ish][(block,i1)] @@ -356,7 +356,7 @@ class BlockStructure(object): self.gf_struct_solver.append({}) self.solver_to_sumk.append({}) self.solver_to_sumk_block.append({}) - for frm,to in self.sumk_to_solver[ish].iteritems(): + for frm,to in self.sumk_to_solver[ish].items(): if to[0] is not None: self.gf_struct_solver[ish][frm[0]+'_'+str(frm[1])]=[0] self.sumk_to_solver[ish][frm]=(frm[0]+'_'+str(frm[1]),0) @@ -384,7 +384,7 @@ class BlockStructure(object): elif isinstance(one,dict): if set(one.keys()) != set(two.keys()): return False - for k in set(one.keys()).intersection(two.keys()): + for k in set(one.keys()).intersection(list(two.keys())): if not compare(one[k],two[k]): return False return True @@ -413,7 +413,7 @@ class BlockStructure(object): d = [] for ish in range(len(mapping)): d.append({}) - for k,v in mapping[ish].iteritems(): + for k,v in mapping[ish].items(): d[ish][repr(k)] = repr(v) return d @@ -429,7 +429,7 @@ class BlockStructure(object): d = [] for ish in range(len(mapping)): d.append({}) - for k,v in mapping[ish].iteritems(): + for k,v in mapping[ish].items(): # literal_eval is a saje alternative to eval d[ish][literal_eval(k)] = literal_eval(v) return d @@ -450,7 +450,7 @@ class BlockStructure(object): s+=' shell '+str(ish)+'\n' def keyfun(el): return '{}_{:05d}'.format(el[0],el[1]) - keys = sorted(element[ish].keys(),key=keyfun) + keys = sorted(list(element[ish].keys()),key=keyfun) for k in keys: s+=' '+str(k)+str(element[ish][k])+'\n' s += "deg_shells\n" @@ -459,7 +459,7 @@ class BlockStructure(object): for l in range(len(self.deg_shells[ish])): s+=' equivalent group '+str(l)+'\n' if isinstance(self.deg_shells[ish][l],dict): - for key, val in self.deg_shells[ish][l].iteritems(): + for key, val in self.deg_shells[ish][l].items(): s+=' '+key+('*' if val[1] else '')+':\n' s+=' '+str(val[0]).replace('\n','\n ')+'\n' else: diff --git a/python/clear_h5_output.py b/python/clear_h5_output.py index a9135771..c6bb4621 100644 --- a/python/clear_h5_output.py +++ b/python/clear_h5_output.py @@ -3,13 +3,13 @@ import sys import subprocess if len(sys.argv) < 2: - print "Usage: python clear_h5_output.py archive" + print("Usage: python clear_h5_output.py archive") sys.exit() -print """ +print(""" This script is to remove any SumkDFT generated output from the h5 archive and to restore it to the original post-converter state. -""" +""") filename = sys.argv[1] A = h5py.File(filename) @@ -21,6 +21,6 @@ A.close() # Repack to reclaim disk space retcode = subprocess.call(["h5repack", "-i%s" % filename, "-otemphgfrt.h5"]) if retcode != 0: - print "h5repack failed!" + print("h5repack failed!") else: subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % filename]) diff --git a/python/converters/__init__.py b/python/converters/__init__.py index b835323b..fa9c5276 100644 --- a/python/converters/__init__.py +++ b/python/converters/__init__.py @@ -20,10 +20,10 @@ # ########################################################################## -from wien2k_converter import Wien2kConverter -from hk_converter import HkConverter -from vasp_converter import VaspConverter -from wannier90_converter import Wannier90Converter +from .wien2k_converter import Wien2kConverter +from .hk_converter import HkConverter +from .vasp_converter import VaspConverter +from .wannier90_converter import Wannier90Converter __all__ =['Wien2kConverter','HkConverter','Wannier90Converter','VaspConverter'] diff --git a/python/converters/converter_tools.py b/python/converters/converter_tools.py index b971518d..7299869c 100644 --- a/python/converters/converter_tools.py +++ b/python/converters/converter_tools.py @@ -46,9 +46,9 @@ class ConverterTools: import os.path import string if not(os.path.exists(filename)): - raise IOError, "File %s does not exist." % filename + raise IOError("File %s does not exist." % filename) for line in open(filename, 'r'): - for old, new in to_replace.iteritems(): + for old, new in to_replace.items(): line = line.replace(old, new) for x in line.split(): yield string.atof(x) diff --git a/python/converters/hk_converter.py b/python/converters/hk_converter.py index 510a7cca..aa828e39 100644 --- a/python/converters/hk_converter.py +++ b/python/converters/hk_converter.py @@ -25,7 +25,7 @@ import numpy from pytriqs.archive import * import pytriqs.utility.mpi as mpi from math import sqrt -from converter_tools import * +from .converter_tools import * class HkConverter(ConverterTools): @@ -96,20 +96,20 @@ class HkConverter(ConverterTools): # the energy conversion factor is 1.0, we assume eV in files energy_unit = 1.0 # read the number of k points - n_k = int(R.next()) + n_k = int(next(R)) k_dep_projection = 0 SP = 0 # no spin-polarision SO = 0 # no spin-orbit # total charge below energy window is set to 0 charge_below = 0.0 # density required, for setting the chemical potential - density_required = R.next() + density_required = next(R) symm_op = 0 # No symmetry groups for the k-sum # the information on the non-correlated shells is needed for # defining dimension of matrices: # number of shells considered in the Wanniers - n_shells = int(R.next()) + n_shells = int(next(R)) # corresponds to index R in formulas # now read the information about the shells (atom, sort, l, dim): shell_entries = ['atom', 'sort', 'l', 'dim'] @@ -117,7 +117,7 @@ class HkConverter(ConverterTools): shell_entries, R)} for ish in range(n_shells)] # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, - n_corr_shells = int(R.next()) + n_corr_shells = int(next(R)) # corresponds to index R in formulas # now read the information about the shells (atom, sort, l, dim, SO # flag, irep): @@ -141,8 +141,8 @@ class HkConverter(ConverterTools): T = [] for ish in range(n_inequiv_shells): # number of representatives ("subsets"), e.g. t2g and eg - n_reps[ish] = int(R.next()) - dim_reps[ish] = [int(R.next()) for i in range( + n_reps[ish] = int(next(R)) + dim_reps[ish] = [int(next(R)) for i in range( n_reps[ish])] # dimensions of the subsets # The transformation matrix: @@ -201,7 +201,7 @@ class HkConverter(ConverterTools): if (weights_in_file): # weights in the file for ik in range(n_k): - bz_weights[ik] = R.next() + bz_weights[ik] = next(R) # if the sum over spins is in the weights, take it out again!! sm = sum(bz_weights) @@ -222,7 +222,7 @@ class HkConverter(ConverterTools): else: istart = 0 for j in range(istart, n_orb): - hopping[ik, isp, i, j] = R.next() + hopping[ik, isp, i, j] = next(R) for i in range(n_orb): if (only_upper_triangle): @@ -230,7 +230,7 @@ class HkConverter(ConverterTools): else: istart = 0 for j in range(istart, n_orb): - hopping[ik, isp, i, j] += R.next() * 1j + hopping[ik, isp, i, j] += next(R) * 1j if ((only_upper_triangle)and(i != j)): hopping[ik, isp, j, i] = hopping[ ik, isp, i, j].conjugate() @@ -243,8 +243,8 @@ class HkConverter(ConverterTools): else: istart = 0 for j in range(istart, n_orb): - hopping[ik, isp, i, j] = R.next() - hopping[ik, isp, i, j] += R.next() * 1j + hopping[ik, isp, i, j] = next(R) + hopping[ik, isp, i, j] += next(R) * 1j if ((only_upper_triangle)and(i != j)): hopping[ik, isp, j, i] = hopping[ diff --git a/python/converters/plovasp/converter.py b/python/converters/plovasp/converter.py index 53fbdb14..b2c97548 100644 --- a/python/converters/plovasp/converter.py +++ b/python/converters/plovasp/converter.py @@ -36,10 +36,10 @@ r""" Usage: python converter.py [] """ import sys -import vaspio -from inpconf import ConfigParameters -from elstruct import ElectronicStructure -from plotools import generate_plo, output_as_text +from . import vaspio +from .inpconf import ConfigParameters +from .elstruct import ElectronicStructure +from .plotools import generate_plo, output_as_text def generate_and_output_as_text(conf_filename, vasp_dir): """ diff --git a/python/converters/plovasp/elstruct.py b/python/converters/plovasp/elstruct.py index 1ad1a4ca..0040f789 100644 --- a/python/converters/plovasp/elstruct.py +++ b/python/converters/plovasp/elstruct.py @@ -92,7 +92,7 @@ class ElectronicStructure: # removed completely. # if not vasp_data.eigenval.eigs is None: if False: - print "eigvals from EIGENVAL" + print("eigvals from EIGENVAL") self.eigvals = vasp_data.eigenval.eigs self.ferw = vasp_data.eigenval.ferw.transpose((2, 0, 1)) @@ -102,7 +102,7 @@ class ElectronicStructure: # Check that the number of band is the same in PROJCAR and EIGENVAL assert nb_plo == self.nband, "PLOCAR is inconsistent with EIGENVAL (number of bands)" else: - print "eigvals from LOCPROJ" + print("eigvals from LOCPROJ") self.eigvals = vasp_data.plocar.eigs self.ferw = vasp_data.plocar.ferw.transpose((2, 0, 1)) self.efermi = vasp_data.doscar.efermi @@ -163,8 +163,8 @@ class ElectronicStructure: overlap = np.zeros((ns, nproj, nproj), dtype=np.float64) # ov_min = np.ones((ns, nproj, nproj), dtype=np.float64) * 100.0 # ov_max = np.zeros((ns, nproj, nproj), dtype=np.float64) - for ispin in xrange(ns): - for ik in xrange(nk): + for ispin in range(ns): + for ik in range(nk): kweight = self.kmesh['kweights'][ik] occ = self.ferw[ispin, ik, :] den_mat[ispin, :, :] += np.dot(plo[:, ispin, ik, :] * occ, plo[:, ispin, ik, :].T.conj()).real * kweight * sp_fac @@ -174,12 +174,12 @@ class ElectronicStructure: # ov_min = np.minimum(ov, ov_min) # Output only the site-diagonal parts of the matrices - print - print " Unorthonormalized density matrices and overlaps:" - for ispin in xrange(ns): - print " Spin:", ispin + 1 + print() + print(" Unorthonormalized density matrices and overlaps:") + for ispin in range(ns): + print(" Spin:", ispin + 1) for io, ion in enumerate(ions): - print " Site:", ion + print(" Site:", ion) iorb_inds = [(ip, param['m']) for ip, param in enumerate(self.proj_params) if param['isite'] == ion] norb = len(iorb_inds) dm = np.zeros((norb, norb)) @@ -189,9 +189,9 @@ class ElectronicStructure: dm[iorb, iorb2] = den_mat[ispin, ind, ind2] ov[iorb, iorb2] = overlap[ispin, ind, ind2] - print " Density matrix" + (12*norb - 12 + 2)*" " + "Overlap" + print(" Density matrix" + (12*norb - 12 + 2)*" " + "Overlap") for drow, dov in zip(dm, ov): out = ''.join(map("{0:12.7f}".format, drow)) out += " " out += ''.join(map("{0:12.7f}".format, dov)) - print out + print(out) diff --git a/python/converters/plovasp/inpconf.py b/python/converters/plovasp/inpconf.py index 861974a9..584bd7a3 100644 --- a/python/converters/plovasp/inpconf.py +++ b/python/converters/plovasp/inpconf.py @@ -29,20 +29,20 @@ r""" Module for parsing and checking an input config-file. """ -import ConfigParser +import configparser import numpy as np import re import sys import itertools as it -import vaspio +from . import vaspio def issue_warning(message): """ Issues a warning. """ - print - print " !!! WARNING !!!: " + message - print + print() + print(" !!! WARNING !!!: " + message) + print() ################################################################################ ################################################################################ @@ -73,7 +73,7 @@ class ConfigParameters: ################################################################################ def __init__(self, input_filename, verbosity=1): self.verbosity = verbosity - self.cp = ConfigParser.SafeConfigParser() + self.cp = configparser.SafeConfigParser() self.cp.readfp(open(input_filename, 'r')) self.parameters = {} @@ -89,7 +89,7 @@ class ConfigParameters: 'corr': ('corr', self.parse_string_logical, True)} self.gr_required = { - 'shells': ('shells', lambda s: map(int, s.split())), + 'shells': ('shells', lambda s: list(map(int, s.split()))), 'ewindow': ('ewindow', self.parse_energy_window)} self.gr_optional = { @@ -142,7 +142,7 @@ class ConfigParameters: else: # Check if a set of indices is given try: - l_tmp = map(int, par_str.split()) + l_tmp = list(map(int, par_str.split())) l_tmp.sort() # Subtract 1 so that VASP indices (starting with 1) are converted # to Python indices (starting with 0) @@ -160,7 +160,7 @@ class ConfigParameters: ion_list = [] nion = 0 for cl in classes: - ions = map(int, re.findall(patt2, cl)) + ions = list(map(int, re.findall(patt2, cl))) ion_list.append([ion - 1 for ion in ions]) nion += len(ions) @@ -218,7 +218,7 @@ class ConfigParameters: Energy window is given by two floats, with the first one being smaller than the second one. """ - ftmp = map(float, par_str.split()) + ftmp = list(map(float, par_str.split())) assert len(ftmp) == 2, "EWINDOW must be specified by exactly two floats" assert ftmp[0] < ftmp[1], "The first float in EWINDOW must be smaller than the second one" return tuple(ftmp) @@ -233,7 +233,7 @@ class ConfigParameters: Band window is given by two ints, with the first one being smaller than the second one. """ - ftmp = map(int, par_str.split()) + ftmp = list(map(int, par_str.split())) assert len(ftmp) == 2, "BANDS must be specified by exactly two ints" assert ftmp[0] < ftmp[1], "The first int in BANDS must be smaller than the second one" return tuple(ftmp) @@ -250,7 +250,7 @@ class ConfigParameters: """ str_rows = par_str.split('\n') try: - rows = [map(float, s.split()) for s in str_rows] + rows = [list(map(float, s.split())) for s in str_rows] except ValueError: err_mess = "Cannot parse a matrix string:\n%s"%(par_str) raise ValueError(err_mess) @@ -339,11 +339,11 @@ class ConfigParameters: For required parameters `exception=True` must be set. """ parsed = {} - for par in param_set.keys(): + for par in list(param_set.keys()): key = param_set[par][0] try: par_str = self.cp.get(section, par) - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + except (configparser.NoOptionError, configparser.NoSectionError): if exception: message = "Required parameter '%s' not found in section [%s]"%(par, section) raise Exception(message) @@ -354,7 +354,7 @@ class ConfigParameters: continue if self.verbosity > 0: - print " %s = %s"%(par, par_str) + print(" %s = %s"%(par, par_str)) parse_fun = param_set[par][1] parsed[key] = parse_fun(par_str) @@ -376,23 +376,23 @@ class ConfigParameters: sections = self.cp.sections() sh_patt1 = re.compile('shell +.*', re.IGNORECASE) - sec_shells = filter(sh_patt1.match, sections) + sec_shells = list(filter(sh_patt1.match, sections)) self.nshells = len(sec_shells) assert self.nshells > 0, "No projected shells found in the input file" if self.verbosity > 0: - print + print() if self.nshells > 1: - print " Found %i projected shells"%(self.nshells) + print(" Found %i projected shells"%(self.nshells)) else: - print " Found 1 projected shell" + print(" Found 1 projected shell") # Get shell indices sh_patt2 = re.compile('shell +([0-9]*)$', re.IGNORECASE) try: get_ind = lambda s: int(sh_patt2.match(s).groups()[0]) - sh_inds = map(get_ind, sec_shells) + sh_inds = list(map(get_ind, sec_shells)) except (ValueError, AttributeError): raise ValueError("Failed to extract shell indices from a list: %s"%(sec_shells)) @@ -405,7 +405,7 @@ class ConfigParameters: # Ideally, indices should run from 1 to # If it's not the case, issue a warning sh_inds.sort() - if sh_inds != range(1, len(sh_inds) + 1): + if sh_inds != list(range(1, len(sh_inds) + 1)): issue_warning("Shell indices are not uniform or not starting from 1. " "This might be an indication of a incorrect setup.") @@ -418,8 +418,8 @@ class ConfigParameters: section = self.sh_sections[ind] if self.verbosity > 0: - print - print " Shell parameters:" + print() + print(" Shell parameters:") # Shell required parameters parsed = self.parse_parameter_set(section, self.sh_required, exception=True) shell.update(parsed) @@ -453,7 +453,7 @@ class ConfigParameters: sections = self.cp.sections() gr_patt = re.compile('group +(.*)', re.IGNORECASE) - sec_groups = filter(gr_patt.match, sections) + sec_groups = list(filter(gr_patt.match, sections)) self.ngroups = len(sec_groups) @@ -471,8 +471,8 @@ class ConfigParameters: group['index'] = gr_ind if self.verbosity > 0: - print - print " Group parameters:" + print() + print(" Group parameters:") # Group required parameters parsed = self.parse_parameter_set(section, self.gr_required, exception=True) group.update(parsed) @@ -514,18 +514,18 @@ class ConfigParameters: sh_gr_required = dict(self.gr_required) sh_gr_required.pop('shells') try: - for par in sh_gr_required.keys(): + for par in list(sh_gr_required.keys()): key = sh_gr_required[par][0] value = self.shells[0].pop(key) self.groups[0][key] = value except KeyError: message = "One [Shell] section is specified but no explicit [Group] section is provided." message += " In this case the [Shell] section must contain all required group information.\n" - message += " Required parameters are: %s"%(sh_gr_required.keys()) + message += " Required parameters are: %s"%(list(sh_gr_required.keys())) raise KeyError(message) # Do the same for optional group parameters, but do not raise an exception - for par in self.gr_optional.keys(): + for par in list(self.gr_optional.keys()): try: key = self.gr_optional[par][0] value = self.shells[0].pop(key) @@ -562,7 +562,7 @@ class ConfigParameters: # remove them and issue a warning. # # First, required group parameters - for par in self.gr_required.keys(): + for par in list(self.gr_required.keys()): try: key = self.gr_required[par][0] value = shell.pop(key) @@ -573,7 +573,7 @@ class ConfigParameters: continue # Second, optional group parameters - for par in self.gr_optional.keys(): + for par in list(self.gr_optional.keys()): try: key = self.gr_optional[par][0] value = shell.pop(key) @@ -591,7 +591,7 @@ class ConfigParameters: sh_refs_used.sort() # Check that all shells are referenced in the groups - assert sh_refs_used == range(self.nshells), "Some shells are not inside any of the groups" + assert sh_refs_used == list(range(self.nshells)), "Some shells are not inside any of the groups" ################################################################################ @@ -605,7 +605,7 @@ class ConfigParameters: """ self.general = {} sections = self.cp.sections() - gen_section = filter(lambda s: s.lower() == 'general', sections) + gen_section = [s for s in sections if s.lower() == 'general'] # If no [General] section is found parse a dummy section name to the parser # to reset parameters to their default values if len(gen_section) > 1: diff --git a/python/converters/plovasp/plotools.py b/python/converters/plovasp/plotools.py index de8bc58f..f7804dba 100644 --- a/python/converters/plovasp/plotools.py +++ b/python/converters/plovasp/plotools.py @@ -55,9 +55,9 @@ r""" """ import itertools as it import numpy as np -from proj_group import ProjectorGroup -from proj_shell import ProjectorShell -from proj_shell import ComplementShell +from .proj_group import ProjectorGroup +from .proj_shell import ProjectorShell +from .proj_shell import ComplementShell np.set_printoptions(suppress=True) @@ -71,9 +71,9 @@ def issue_warning(message): """ Issues a warning. """ - print - print " !!! WARNING !!!: " + message - print + print() + print(" !!! WARNING !!!: " + message) + print() ################################################################################ # check_data_consistency() @@ -129,18 +129,18 @@ def generate_plo(conf_pars, el_struct): # check if at least one shell is correlated assert np.any([shell['corr'] for shell in conf_pars.shells]), 'at least one shell has be CORR = True' nshell = len(conf_pars.shells) - print - print " Generating %i shell%s..."%(nshell, '' if nshell == 1 else 's') + print() + print(" Generating %i shell%s..."%(nshell, '' if nshell == 1 else 's')) pshells = [] for sh_par in conf_pars.shells: pshell = ProjectorShell(sh_par, proj_raw, el_struct.proj_params, el_struct.kmesh, el_struct.structure, el_struct.nc_flag) - print - print " Shell : %s"%(pshell.user_index) - print " Orbital l : %i"%(pshell.lorb) - print " Number of ions: %i"%(pshell.nion) - print " Dimension : %i"%(pshell.ndim) - print " Correlated : %r"%(pshell.corr) - print " Ion sort : %r"%(pshell.ion_sort) + print() + print(" Shell : %s"%(pshell.user_index)) + print(" Orbital l : %i"%(pshell.lorb)) + print(" Number of ions: %i"%(pshell.nion)) + print(" Dimension : %i"%(pshell.ndim)) + print(" Correlated : %r"%(pshell.corr)) + print(" Ion sort : %r"%(pshell.ion_sort)) pshells.append(pshell) @@ -157,45 +157,45 @@ def generate_plo(conf_pars, el_struct): #with HDFArchive(testout, 'w') as h5test: # h5test['hk'] = pgroup.hk # DEBUG output - print "Density matrix:" + print("Density matrix:") nimp = 0.0 ov_all = [] for ish in pgroup.ishells: if not isinstance(pshells[pgroup.ishells[ish]],ComplementShell): - print " Shell %i"%(ish + 1) + print(" Shell %i"%(ish + 1)) dm_all, ov_all_ = pshells[ish].density_matrix(el_struct) ov_all.append(ov_all_[0]) spin_fac = 2 if dm_all.shape[0] == 1 else 1 - for io in xrange(dm_all.shape[1]): - print " Site %i"%(io + 1) + for io in range(dm_all.shape[1]): + print(" Site %i"%(io + 1)) dm = spin_fac * dm_all[:, io, : ,:].sum(0) for row in dm: - print ''.join(map("{0:14.7f}".format, row)) + print(''.join(map("{0:14.7f}".format, row))) ndm = dm.trace() if pshells[ish].corr: nimp += ndm - print " trace: ", ndm - print - print " Impurity density:", nimp - print - print "Overlap:" + print(" trace: ", ndm) + print() + print(" Impurity density:", nimp) + print() + print("Overlap:") for io, ov in enumerate(ov_all): - print " Site %i"%(io + 1) - print ov[0,...] - print - print "Local Hamiltonian:" + print(" Site %i"%(io + 1)) + print(ov[0,...]) + print() + print("Local Hamiltonian:") for ish in pgroup.ishells: if not isinstance(pshells[pgroup.ishells[ish]],ComplementShell): - print " Shell %i"%(ish + 1) + print(" Shell %i"%(ish + 1)) loc_ham = pshells[pgroup.ishells[ish]].local_hamiltonian(el_struct) - for io in xrange(loc_ham.shape[1]): - print " Site %i"%(io + 1) + for io in range(loc_ham.shape[1]): + print(" Site %i"%(io + 1)) for row in loc_ham[:, io, :, :].sum(0): - print ''.join(map("{0:14.7f}".format, row)) + print(''.join(map("{0:14.7f}".format, row))) # END DEBUG output if 'dosmesh' in conf_pars.general: - print - print "Evaluating DOS..." + print() + print("Evaluating DOS...") mesh_pars = conf_pars.general['dosmesh'] if np.isnan(mesh_pars['emin']): dos_emin = pgroup.emin @@ -208,12 +208,12 @@ def generate_plo(conf_pars, el_struct): emesh = np.linspace(dos_emin, dos_emax, n_points) for ish in pgroup.ishells: if not isinstance(pshells[pgroup.ishells[ish]],ComplementShell) or True: - print " Shell %i"%(ish + 1) + print(" Shell %i"%(ish + 1)) dos = pshells[pgroup.ishells[ish]].density_of_states(el_struct, emesh) de = emesh[1] - emesh[0] ntot = (dos[1:,...] + dos[:-1,...]).sum(0) / 2 * de - print " Total number of states:", ntot - for io in xrange(dos.shape[2]): + print(" Total number of states:", ntot) + for io in range(dos.shape[2]): np.savetxt('pdos_%i_%i.dat'%(ish,io), np.vstack((emesh.T, dos[:, 0, io, :].T)).T) pgroups.append(pgroup) @@ -254,7 +254,7 @@ def kpoints_output(basename, el_struct): f.write("%i\n"%(nktot)) # TODO: add the output of reciprocal lattice vectors f.write("# List of k-points with weights\n") - for ik in xrange(nktot): + for ik in range(nktot): kx, ky, kz = kmesh['kpoints'][ik, :] kwght = kmesh['kweights'][ik] f.write("%15.10f%15.10f%15.10f%20.10f\n"%(kx, ky, kz, kwght)) @@ -266,7 +266,7 @@ def kpoints_output(basename, el_struct): f.write("\n# Number of tetrahedra and volume: ntet, volt\n") f.write("%i %s\n"%(ntet, volt)) f.write("# List of tetrahedra: imult, ik1, ..., ik4\n") - for it in xrange(ntet): + for it in range(ntet): f.write(' '.join(map("{0:d}".format, *kmesh['itet'][it, :])) + '\n') except KeyError: pass @@ -315,14 +315,14 @@ def ctrl_output(conf_pars, el_struct, ng): header = json.dumps(head_dict, indent=4, separators=(',', ': ')) - print " Storing ctrl-file..." + print(" Storing ctrl-file...") with open(ctrl_fname, 'wt') as f: f.write(header + "\n") f.write("#END OF HEADER\n") f.write("# k-points and weights\n") labels = ['kx', 'ky', 'kz', 'kweight'] - out = "".join(map(lambda s: s.center(15), labels)) + out = "".join([s.center(15) for s in labels]) f.write("#" + out + "\n") for ik, kp in enumerate(el_struct.kmesh['kpoints']): tmp1 = "".join(map("{0:15.10f}".format, kp)) @@ -330,7 +330,7 @@ def ctrl_output(conf_pars, el_struct, ng): f.write(out + "\n") f.write("# k-points and weights cartesian\n") labels = ['kx', 'ky', 'kz'] - out = "".join(map(lambda s: s.center(15), labels)) + out = "".join([s.center(15) for s in labels]) f.write("#" + out + "\n") for ik, kp in enumerate(el_struct.kmesh['kpoints_cart']): out = "".join(map("{0:15.10f}".format, kp)) @@ -381,7 +381,7 @@ def plo_output(conf_pars, el_struct, pshells, pgroups): """ for ig, pgroup in enumerate(pgroups): plo_fname = conf_pars.general['basename'] + '.pg%i'%(ig + 1) - print " Storing PLO-group file '%s'..."%(plo_fname) + print(" Storing PLO-group file '%s'..."%(plo_fname)) head_dict = {} @@ -394,7 +394,7 @@ def plo_output(conf_pars, el_struct, pshells, pgroups): # Number of electrons within the window head_dict['nelect'] = pgroup.nelect_window(el_struct) - print " Density within window:", head_dict['nelect'] + print(" Density within window:", head_dict['nelect']) head_shells = [] for ish in pgroup.ishells: @@ -430,13 +430,13 @@ def plo_output(conf_pars, el_struct, pshells, pgroups): f.write("# Eigenvalues within the energy window: %s, %s\n"%(pgroup.emin, pgroup.emax)) nk, nband, ns_band = el_struct.eigvals.shape - for isp in xrange(ns_band): + for isp in range(ns_band): f.write("# is = %i\n"%(isp + 1)) - for ik in xrange(nk): + for ik in range(nk): ib1, ib2 = pgroup.ib_win[ik, isp, 0], pgroup.ib_win[ik, isp, 1] # Output band indices in Fortran convention! f.write(" %i %i\n"%(ib1 + 1, ib2 + 1)) - for ib in xrange(ib1, ib2 + 1): + for ib in range(ib1, ib2 + 1): eigv_ef = el_struct.eigvals[ik, ib, isp] - el_struct.efermi f_weight = el_struct.ferw[isp, ik, ib] f.write("%13.8f %12.7f\n"%(eigv_ef, f_weight)) @@ -449,15 +449,15 @@ def plo_output(conf_pars, el_struct, pshells, pgroups): f.write("# Shell %i\n"%(ish)) nion, ns, nk, nlm, nb = shell.proj_win.shape - for isp in xrange(ns): + for isp in range(ns): f.write("# is = %i\n"%(isp + 1)) - for ik in xrange(nk): + for ik in range(nk): f.write("# ik = %i\n"%(ik + 1)) - for ion in xrange(nion): - for ilm in xrange(nlm): + for ion in range(nion): + for ilm in range(nlm): ib1, ib2 = pgroup.ib_win[ik, isp, 0], pgroup.ib_win[ik, isp, 1] ib_win = ib2 - ib1 + 1 - for ib in xrange(ib_win): + for ib in range(ib_win): p = shell.proj_win[ion, isp, ik, ilm, ib] f.write("{0:16.10f}{1:16.10f}\n".format(p.real, p.imag)) f.write("\n") @@ -494,7 +494,7 @@ def hk_output(conf_pars, el_struct, pgroups): for ig, pgroup in enumerate(pgroups): hk_fname = conf_pars.general['basename'] + '.hk%i'%(ig + 1) - print " Storing HK-group file '%s'..."%(hk_fname) + print(" Storing HK-group file '%s'..."%(hk_fname)) head_shells = [] for ish in pgroup.ishells: @@ -528,13 +528,13 @@ def hk_output(conf_pars, el_struct, pgroups): f.write('%i %i %i %i # atom sort l dim\n'%(head['ion_list'][0],head['ion_sort'][0],head['lorb'],head['ndim'])) norbs = pgroup.hk.shape[2] - for isp in xrange(ns_band): - for ik in xrange(nk): - for io in xrange(norbs): - for iop in xrange(norbs): + for isp in range(ns_band): + for ik in range(nk): + for io in range(norbs): + for iop in range(norbs): f.write(" {0:14.10f}".format(pgroup.hk[isp,ik,io,iop].real)) f.write("\n") - for io in xrange(norbs): - for iop in xrange(norbs): + for io in range(norbs): + for iop in range(norbs): f.write(" {0:14.10f}".format(pgroup.hk[isp,ik,io,iop].imag)) f.write("\n") diff --git a/python/converters/plovasp/proj_group.py b/python/converters/plovasp/proj_group.py index 9fdadee8..6a4af019 100644 --- a/python/converters/plovasp/proj_group.py +++ b/python/converters/plovasp/proj_group.py @@ -30,7 +30,7 @@ r""" Storage and manipulation of projector groups. """ import numpy as np -from proj_shell import ComplementShell +from .proj_shell import ComplementShell np.set_printoptions(suppress=True) ################################################################################ @@ -89,8 +89,8 @@ class ProjectorGroup: assert np.all( n_bands == n_bands[0,0] ), "At each band the same number of bands has to be selected for calculating the complement (to end up with an equal number of orbitals at each k-point)." if n_orbs == n_bands[0,0]: self.complement = False - print "\nWARNING: The total number of orbitals in this group is " - print "equal to the number of bands. Setting COMPLEMENT to FALSE!\n" + print("\nWARNING: The total number of orbitals in this group is ") + print("equal to the number of bands. Setting COMPLEMENT to FALSE!\n") # Select projectors within the energy window @@ -112,8 +112,8 @@ class ProjectorGroup: self.nelect = 0 nk, ns_band, _ = self.ib_win.shape rspin = 2.0 if ns_band == 1 else 1.0 - for isp in xrange(ns_band): - for ik in xrange(nk): + for isp in range(ns_band): + for ik in range(nk): ib1 = self.ib_win[ik, isp, 0] ib2 = self.ib_win[ik, isp, 1]+1 occ = el_struct.ferw[isp, ik, ib1:ib2] @@ -154,8 +154,8 @@ class ProjectorGroup: _, ns, nk, _, _ = self.shells[0].proj_win.shape p_mat = np.zeros((ndim, self.nb_max), dtype=np.complex128) # Note that 'ns' and 'nk' are the same for all shells - for isp in xrange(ns): - for ik in xrange(nk): + for isp in range(ns): + for ik in range(nk): nb = self.ib_win[ik, isp, 1] - self.ib_win[ik, isp, 0] + 1 # Combine all projectors of the group to one block projector for bl_map in block_maps: @@ -203,8 +203,8 @@ class ProjectorGroup: self.hk = np.zeros((ns,nk,ndim,ndim), dtype=np.complex128) # Note that 'ns' and 'nk' are the same for all shells - for isp in xrange(ns): - for ik in xrange(nk): + for isp in range(ns): + for ik in range(nk): bmin = self.ib_win[ik, isp, 0] bmax = self.ib_win[ik, isp, 1]+1 @@ -247,7 +247,7 @@ class ProjectorGroup: """ - print '\nCalculating complement\n' + print('\nCalculating complement\n') block_maps, ndim = self.get_block_matrix_map() _, ns, nk, _, _ = self.shells[0].proj_win.shape @@ -257,8 +257,8 @@ class ProjectorGroup: # Note that 'ns' and 'nk' are the same for all shells - for isp in xrange(ns): - for ik in xrange(nk): + for isp in range(ns): + for ik in range(nk): bmin = self.ib_win[ik, isp, 0] bmax = self.ib_win[ik, isp, 1]+1 @@ -362,7 +362,7 @@ class ProjectorGroup: _shell = self.shells[ish] nion, ns, nk, nlm, nb_max = _shell.proj_win.shape ndim = max(ndim, nlm) - for ion in xrange(nion): + for ion in range(nion): i1_bl = 0 i2_bl = nlm block = {'bmat_range': (i1_bl, i2_bl)} @@ -378,7 +378,7 @@ class ProjectorGroup: for ish in self.ishells: _shell = self.shells[ish] nion, ns, nk, nlm, nb_max = _shell.proj_win.shape - for ion in xrange(nion): + for ion in range(nion): i2_bl = i1_bl + nlm block = {'bmat_range': (i1_bl, i2_bl)} block['shell_ion'] = (ish, ion) @@ -456,14 +456,14 @@ class ProjectorGroup: ib_min = 10000000 ib_max = 0 - for isp in xrange(ns_band): - for ik in xrange(nk): - for ib in xrange(nband): + for isp in range(ns_band): + for ik in range(nk): + for ib in range(nband): en = eigvals[ik, ib, isp] if en >= self.emin: break ib1 = ib - for ib in xrange(ib1, nband): + for ib in range(ib1, nband): en = eigvals[ik, ib, isp] if en > self.emax: break diff --git a/python/converters/plovasp/proj_shell.py b/python/converters/plovasp/proj_shell.py index e197982e..7ff5922b 100644 --- a/python/converters/plovasp/proj_shell.py +++ b/python/converters/plovasp/proj_shell.py @@ -33,9 +33,9 @@ def issue_warning(message): """ Issues a warning. """ - print - print " !!! WARNING !!!: " + message - print + print() + print(" !!! WARNING !!!: " + message) + print() import itertools as it import numpy as np @@ -165,7 +165,7 @@ class ProjectorShell: if is_complex: raw_matrices = raw_matrices[:, ::2] + raw_matrices[:, 1::2] * 1j - for io in xrange(nion): + for io in range(nion): i1 = io * nr i2 = (io + 1) * nr self.tmatrices[io, :, :] = raw_matrices[i1:i2, :] @@ -193,7 +193,7 @@ class ProjectorShell: ndim = nrow self.tmatrices = np.zeros((nion, nrow, nm), dtype=np.complex128) - for io in xrange(nion): + for io in range(nion): self.tmatrices[io, :, :] = raw_matrix return ndim @@ -206,7 +206,7 @@ class ProjectorShell: # We still need the matrices for the output self.tmatrices = np.zeros((nion, ndim, ndim), dtype=np.complex128) - for io in xrange(nion): + for io in range(nion): self.tmatrices[io, :, :] = np.identity(ndim, dtype=np.complex128) return ndim @@ -236,20 +236,20 @@ class ProjectorShell: # for a non-collinear case 'ndim' is 'ns * nm' ndim = self.tmatrices.shape[1] self.proj_arr = np.zeros((nion, ns, nk, ndim, nb), dtype=np.complex128) - for ik in xrange(nk): + for ik in range(nk): kp = kmesh['kpoints'][ik] for io, ion in enumerate(self.ion_list): proj_k = np.zeros((ns, nlm, nb), dtype=np.complex128) qcoord = structure['qcoords'][ion] # kphase = np.exp(-2.0j * np.pi * np.dot(kp, qcoord)) # kphase = 1.0 - for m in xrange(nlm): + for m in range(nlm): # Here we search for the index of the projector with the given isite/l/m indices for ip, par in enumerate(proj_params): if par['isite'] - 1 == ion and par['l'] == self.lorb and par['m'] == m: proj_k[:, m, :] = proj_raw[ip, :, ik, :] #* kphase break - for isp in xrange(ns): + for isp in range(ns): self.proj_arr[io, isp, ik, :, :] = np.dot(self.tmatrices[io, :, :], proj_k[isp, :, :]) else: @@ -257,7 +257,7 @@ class ProjectorShell: self.proj_arr = np.zeros((nion, ns, nk, nlm, nb), dtype=np.complex128) for io, ion in enumerate(self.ion_list): qcoord = structure['qcoords'][ion] - for m in xrange(nlm): + for m in range(nlm): # Here we search for the index of the projector with the given isite/l/m indices for ip, par in enumerate(proj_params): if par['isite'] - 1 == ion and par['l'] == self.lorb and par['m'] == m: @@ -291,8 +291,8 @@ class ProjectorShell: # Select projectors for a given energy window ns_band = self.ib_win.shape[1] - for isp in xrange(ns): - for ik in xrange(nk): + for isp in range(ns): + for ik in range(nk): # TODO: for non-collinear case something else should be done here is_b = min(isp, ns_band) ib1 = self.ib_win[ik, is_b, 0] @@ -328,9 +328,9 @@ class ProjectorShell: ib1 = self.ib_min ib2 = self.ib_max + 1 if site_diag: - for isp in xrange(ns): + for isp in range(ns): for ik, weight, occ in it.izip(it.count(), kweights, occnums[isp, :, :]): - for io in xrange(nion): + for io in range(nion): proj_k = self.proj_win[io, isp, ik, ...] occ_mats[isp, io, :, :] += np.dot(proj_k * occ[ib1:ib2], proj_k.conj().T).real * weight @@ -338,9 +338,9 @@ class ProjectorShell: proj_k.conj().T).real * weight else: proj_k = np.zeros((ndim, nbtot), dtype=np.complex128) - for isp in xrange(ns): + for isp in range(ns): for ik, weight, occ in it.izip(it.count(), kweights, occnums[isp, :, :]): - for io in xrange(nion): + for io in range(nion): i1 = io * nlm i2 = (io + 1) * nlm proj_k[i1:i2, :] = self.proj_win[io, isp, ik, ...] @@ -375,10 +375,10 @@ class ProjectorShell: occnums = el_struct.ferw ib1 = self.ib_min ib2 = self.ib_max + 1 - for isp in xrange(ns): + for isp in range(ns): for ik, weight, occ, eigk in it.izip(it.count(), kweights, occnums[isp, :, :], el_struct.eigvals[:, ib1:ib2, isp]): - for io in xrange(nion): + for io in range(nion): proj_k = self.proj_win[io, isp, ik, ...] loc_ham[isp, io, :, :] += np.dot(proj_k * (eigk - el_struct.efermi), proj_k.conj().T).real * weight @@ -410,13 +410,13 @@ class ProjectorShell: ne = len(emesh) dos = np.zeros((ne, ns, nion, nlm)) w_k = np.zeros((nk, nb_max, ns, nion, nlm), dtype=np.complex128) - for isp in xrange(ns): - for ik in xrange(nk): + for isp in range(ns): + for ik in range(nk): is_b = min(isp, ns_band) ib1 = self.ib_win[ik, is_b, 0] ib2 = self.ib_win[ik, is_b, 1] + 1 - for ib_g in xrange(ib1, ib2): - for io in xrange(nion): + for ib_g in range(ib1, ib2): + for io in range(nion): # Note the difference between 'ib' and 'ibn': # 'ib' counts from 0 to 'nb_k - 1' # 'ibn' counts from 'ib1 - ib_min' to 'ib2 - ib_min' @@ -429,13 +429,13 @@ class ProjectorShell: itt = el_struct.kmesh['itet'].T # k-indices are starting from 0 in Python itt[1:, :] -= 1 - for isp in xrange(ns): + for isp in range(ns): for ib, eigk in enumerate(el_struct.eigvals[:, self.ib_min:self.ib_max+1, isp].T): for ie, e in enumerate(emesh): eigk_ef = eigk - el_struct.efermi cti = atm.dos_tetra_weights_3d(eigk_ef, e, itt) - for im in xrange(nlm): - for io in xrange(nion): + for im in range(nlm): + for io in range(nion): dos[ie, isp, io, im] += np.sum((cti * w_k[itt[1:, :], ib, isp, io, im].real).sum(0) * itt[0, :]) dos *= 2 * el_struct.kmesh['volt'] diff --git a/python/converters/plovasp/sc_dmft.py b/python/converters/plovasp/sc_dmft.py index b0f32aae..2d5c540d 100644 --- a/python/converters/plovasp/sc_dmft.py +++ b/python/converters/plovasp/sc_dmft.py @@ -31,7 +31,7 @@ import time import signal import sys import pytriqs.utility.mpi as mpi -import converter +from . import converter from shutil import copyfile xch = sys.excepthook @@ -63,7 +63,7 @@ def is_vasp_running(vasp_pid): if mpi.is_master_node(): try: os.kill(vasp_pid, 0) - except OSError, e: + except OSError as e: pid_exists = e.errno == errno.EPERM else: pid_exists = True @@ -85,7 +85,7 @@ def get_dft_energy(): try: dft_energy = float(line.split()[2]) except ValueError: - print "Cannot read energy from OSZICAR, setting it to zero" + print("Cannot read energy from OSZICAR, setting it to zero") dft_energy = 0.0 return dft_energy @@ -111,7 +111,7 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version): iter = 0 while vasp_running: - if debug: print bcolors.RED + "rank %s"%(mpi.rank) + bcolors.ENDC + if debug: print(bcolors.RED + "rank %s"%(mpi.rank) + bcolors.ENDC) mpi.report(" Waiting for VASP lock to disappear...") mpi.barrier() while is_vasp_lock_present(): @@ -125,30 +125,30 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version): # Tell VASP to stop if the maximum number of iterations is reached - if debug: print bcolors.MAGENTA + "rank %s"%(mpi.rank) + bcolors.ENDC + if debug: print(bcolors.MAGENTA + "rank %s"%(mpi.rank) + bcolors.ENDC) err = 0 exc = None - if debug: print bcolors.BLUE + "plovasp: rank %s"%(mpi.rank) + bcolors.ENDC + if debug: print(bcolors.BLUE + "plovasp: rank %s"%(mpi.rank) + bcolors.ENDC) if mpi.is_master_node(): converter.generate_and_output_as_text(cfg_file, vasp_dir='./') # Read energy from OSZICAR dft_energy = get_dft_energy() mpi.barrier() - if debug: print bcolors.GREEN + "rank %s"%(mpi.rank) + bcolors.ENDC + if debug: print(bcolors.GREEN + "rank %s"%(mpi.rank) + bcolors.ENDC) corr_energy, dft_dc = dmft_cycle() mpi.barrier() if mpi.is_master_node(): total_energy = dft_energy + corr_energy - dft_dc - print - print "="*80 - print " Total energy: ", total_energy - print " DFT energy: ", dft_energy - print " Corr. energy: ", corr_energy - print " DFT DC: ", dft_dc - print "="*80 - print + print() + print("="*80) + print(" Total energy: ", total_energy) + print(" DFT energy: ", dft_energy) + print(" Corr. energy: ", corr_energy) + print(" DFT DC: ", dft_dc) + print("="*80) + print() # check if we should do additional VASP calculations # in the standard VASP version, VASP writes out GAMMA itself @@ -176,8 +176,8 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version): copyfile(src='GAMMA_recent',dst='GAMMA') iter += 1 if iter == n_iter: - print "\n Maximum number of iterations reached." - print " Aborting VASP iterations...\n" + print("\n Maximum number of iterations reached.") + print(" Aborting VASP iterations...\n") f_stop = open('STOPCAR', 'wt') f_stop.write("LABORT = .TRUE.\n") f_stop.close() @@ -200,28 +200,28 @@ def main(): vasp_pid = int(sys.argv[1]) except (ValueError, KeyError): if mpi.is_master_node(): - print "ERROR: VASP process pid must be provided as the first argument" + print("ERROR: VASP process pid must be provided as the first argument") raise try: n_iter = int(sys.argv[2]) except (ValueError, KeyError): if mpi.is_master_node(): - print "ERROR: Number of iterations must be provided as the second argument" + print("ERROR: Number of iterations must be provided as the second argument") raise try: n_iter_dft = int(sys.argv[3]) except (ValueError, KeyError): if mpi.is_master_node(): - print "ERROR: Number of VASP iterations with fixed charge density must be provided as the third argument" + print("ERROR: Number of VASP iterations with fixed charge density must be provided as the third argument") raise try: dmft_script = re.sub("\.py$", "", sys.argv[4]) except: if mpi.is_master_node(): - print "ERROR: User-defined DMFT script must be provided as the fourth argument" + print("ERROR: User-defined DMFT script must be provided as the fourth argument") raise # Optional parameter: config-file name diff --git a/python/converters/plovasp/vaspio.py b/python/converters/plovasp/vaspio.py index f5c3cb31..03dd0acb 100644 --- a/python/converters/plovasp/vaspio.py +++ b/python/converters/plovasp/vaspio.py @@ -83,12 +83,12 @@ class VaspData: except (IOError, StopIteration): self.eigenval.eigs = None self.eigenval.ferw = None - print "!!! WARNING !!!: Error reading from EIGENVAL, trying LOCPROJ" + print("!!! WARNING !!!: Error reading from EIGENVAL, trying LOCPROJ") try: self.doscar.from_file(vasp_dir) except (IOError, StopIteration): if efermi_required: - print "!!! WARNING !!!: Error reading from Efermi from DOSCAR, trying LOCPROJ" + print("!!! WARNING !!!: Error reading from Efermi from DOSCAR, trying LOCPROJ") try: self.plocar.efermi self.doscar.efermi = self.plocar.efermi @@ -96,7 +96,7 @@ class VaspData: raise Exception("Efermi cannot be read from DOSCAR or LOCPROJ") else: # TODO: This a hack. Find out a way to determine ncdij without DOSCAR - print "!!! WARNING !!!: Error reading from DOSCAR, taking Efermi from config" + print("!!! WARNING !!!: Error reading from DOSCAR, taking Efermi from config") self.doscar.ncdij = self.plocar.nspin ################################################################################ @@ -161,10 +161,10 @@ class Plocar: # Read the first line of LOCPROJ to get the dimensions with open(locproj_filename, 'rt') as f: line = f.readline() - nproj, nspin, nk, nband = map(int, line.split()) + nproj, nspin, nk, nband = list(map(int, line.split())) plo = np.zeros((nproj, nspin, nk, nband), dtype=np.complex128) - proj_params = [{} for i in xrange(nproj)] + proj_params = [{} for i in range(nproj)] iproj_site = 0 is_first_read = True @@ -173,7 +173,7 @@ class Plocar: while line: isite = int(line.split()[1]) if not is_first_read: - for il in xrange(norb): + for il in range(norb): ip_new = iproj_site * norb + il ip_prev = (iproj_site - 1) * norb + il proj_params[ip_new]['label'] = proj_params[ip_prev]['label'] @@ -181,8 +181,8 @@ class Plocar: proj_params[ip_new]['l'] = proj_params[ip_prev]['l'] proj_params[ip_new]['m'] = proj_params[ip_prev]['m'] - for ispin in xrange(nspin): - for ik in xrange(nk): + for ispin in range(nspin): + for ik in range(nk): # Parse the orbital labels and convert them to l,m-indices line = self.search_for(f, "^ *band") if is_first_read: @@ -202,10 +202,10 @@ class Plocar: is_first_read = False # Read the block of nk * ns * nband complex numbers - for ib in xrange(nband): + for ib in range(nband): line = f.readline() - rtmp = map(float, line.split()[1:]) - for il in xrange(norb): + rtmp = list(map(float, line.split()[1:])) + for il in range(norb): ctmp = complex(rtmp[2 * il], rtmp[2 * il + 1]) plo[iproj_site * norb + il, ispin, ik, ib] = ctmp @@ -213,9 +213,9 @@ class Plocar: iproj_site += 1 line = self.search_for(f, "^ *ISITE") - print "Read parameters:" + print("Read parameters:") for il, par in enumerate(proj_params): - print il, " -> ", par + print(il, " -> ", par) return proj_params, plo @@ -242,17 +242,17 @@ class Plocar: line = f.readline() line = line.split("#")[0] sline = line.split() - self.ncdij, nk, self.nband, nproj = map(int, sline[:4]) + self.ncdij, nk, self.nband, nproj = list(map(int, sline[:4])) self.nspin = 1 if self.ncdij == 1 else 2 self.nspin_band = 2 if self.ncdij == 2 else 1 try: self.efermi = float(sline[4]) except: - print "!!! WARNING !!!: Error reading E-Fermi from LOCPROJ, trying DOSCAR" + print("!!! WARNING !!!: Error reading E-Fermi from LOCPROJ, trying DOSCAR") plo = np.zeros((nproj, self.nspin, nk, self.nband), dtype=np.complex128) - proj_params = [{} for i in xrange(nproj)] + proj_params = [{} for i in range(nproj)] iproj_site = 0 is_first_read = True @@ -284,26 +284,26 @@ class Plocar: patt = re.compile("^orbital") # FIXME: fix spin indices for NCDIJ = 4 (non-collinear) assert self.ncdij < 4, "Non-collinear case is not implemented" - for ispin in xrange(self.nspin): - for ik in xrange(nk): - for ib in xrange(self.nband): + for ispin in range(self.nspin): + for ik in range(nk): + for ib in range(self.nband): line = "" while not line: line = f.readline().strip() sline = line.split() - isp_, ik_, ib_ = map(int, sline[1:4]) + isp_, ik_, ib_ = list(map(int, sline[1:4])) assert isp_ == ispin + 1 and ik_ == ik + 1 and ib_ == ib + 1, "Inconsistency in reading LOCPROJ" self.eigs[ik, ib, ispin] = float(sline[4]) self.ferw[ik, ib, ispin] = float(sline[5]) - for ip in xrange(nproj): + for ip in range(nproj): line = f.readline() sline = line.split() ctmp = complex(float(sline[1]), float(sline[2])) plo[ip, ispin, ik, ib] = ctmp - print "Read parameters:" + print("Read parameters:") for il, par in enumerate(proj_params): - print il, " -> ", par + print(il, " -> ", par) return proj_params, plo @@ -366,16 +366,16 @@ class Poscar: f = read_lines(vasp_dir + poscar_filename) # Comment line comment = f.next().rstrip() - print " Found POSCAR, title line: %s"%(comment) + print(" Found POSCAR, title line: %s"%(comment)) # Read scale sline = readline_remove_comments() ascale = float(sline) # Read lattice vectors self.a_brav = np.zeros((3, 3)) - for ia in xrange(3): + for ia in range(3): sline = readline_remove_comments() - self.a_brav[ia, :] = map(float, sline.split()) + self.a_brav[ia, :] = list(map(float, sline.split())) # Negative scale means that it is a volume scale if ascale < 0: vscale = -ascale @@ -389,13 +389,13 @@ class Poscar: sline = readline_remove_comments() try: # Old v4.6 format: no element names - self.nions = map(int, sline.split()) - self.el_names = ['El%i'%(i) for i in xrange(len(self.nions))] + self.nions = list(map(int, sline.split())) + self.el_names = ['El%i'%(i) for i in range(len(self.nions))] except ValueError: # New v5.x format: read element names first self.el_names = sline.split() sline = readline_remove_comments() - self.nions = map(int, sline.split()) + self.nions = list(map(int, sline.split())) # Set the number of atom sorts (types) and the total # number of atoms in the unit cell @@ -415,23 +415,23 @@ class Poscar: # Read atomic positions self.q_types = [] self.type_of_ion = [] - for it in xrange(self.ntypes): + for it in range(self.ntypes): # Array mapping ion index to type self.type_of_ion += self.nions[it] * [it] q_at_it = np.zeros((self.nions[it], 3)) - for iq in xrange(self.nions[it]): + for iq in range(self.nions[it]): sline = readline_remove_comments() - qcoord = map(float, sline.split()[:3]) + qcoord = list(map(float, sline.split()[:3])) if cartesian: qcoord = np.dot(brec, qcoord) q_at_it[iq, :] = qcoord self.q_types.append(q_at_it) - print " Total number of ions:", self.nq - print " Number of types:", self.ntypes - print " Number of ions for each type:", self.nions + print(" Total number of ions:", self.nq) + print(" Number of types:", self.ntypes) + print(" Number of ions for each type:", self.nions) # print # print " Coords:" @@ -485,23 +485,23 @@ class Kpoints: ibz_file = read_lines(vasp_dir + ibz_filename) # Skip comment line - line = ibz_file.next() + line = next(ibz_file) # Number of k-points - line = ibz_file.next() + line = next(ibz_file) self.nktot = int(line.strip().split()[0]) - print - print " {0:>26} {1:d}".format("Total number of k-points:", self.nktot) + print() + print(" {0:>26} {1:d}".format("Total number of k-points:", self.nktot)) self.kpts = np.zeros((self.nktot, 3)) self.kwghts = np.zeros((self.nktot)) # Skip comment line - line = ibz_file.next() - for ik in xrange(self.nktot): - line = ibz_file.next() + line = next(ibz_file) + for ik in range(self.nktot): + line = next(ibz_file) sline = line.strip().split() - self.kpts[ik, :] = map(float, sline[:3]) + self.kpts[ik, :] = list(map(float, sline[:3])) self.kwghts[ik] = float(sline[3]) self.kwghts /= self.nktot @@ -509,23 +509,23 @@ class Kpoints: # Attempt to read tetrahedra # Skip comment line ("Tetrahedra") try: - line = ibz_file.next() + line = next(ibz_file) # Number of tetrahedra and volume = 1/(6*nkx*nky*nkz) - line = ibz_file.next() + line = next(ibz_file) sline = line.split() self.ntet = int(sline[0]) self.volt = float(sline[1]) - print " {0:>26} {1:d}".format("Total number of tetrahedra:", self.ntet) + print(" {0:>26} {1:d}".format("Total number of tetrahedra:", self.ntet)) # Traditionally, itet[it, 0] contains multiplicity self.itet = np.zeros((self.ntet, 5), dtype=int) - for it in xrange(self.ntet): - line = ibz_file.next() - self.itet[it, :] = map(int, line.split()[:5]) - except StopIteration, ValueError: - print " No tetrahedron data found in %s. Skipping..."%(ibz_filename) + for it in range(self.ntet): + line = next(ibz_file) + self.itet[it, :] = list(map(int, line.split()[:5])) + except StopIteration as ValueError: + print(" No tetrahedron data found in %s. Skipping..."%(ibz_filename)) self.ntet = 0 # data = { 'nktot': nktot, @@ -572,14 +572,14 @@ class Eigenval: self.ispin = int(sline[3]) # Second line: cell volume and lengths of lattice vectors (skip) - sline = f.next() + sline = next(f) # Third line: temperature (skip) - sline = f.next() + sline = next(f) # Fourth and fifth line: useless - sline = f.next() - sline = f.next() + sline = next(f) + sline = next(f) # Sixth line: NELECT, NKTOT, NBTOT sline = f.next().split() @@ -593,16 +593,16 @@ class Eigenval: self.eigs = np.zeros((self.nktot, self.nband, self.ispin)) self.ferw = np.zeros((self.nktot, self.nband, self.ispin)) - for ik in xrange(self.nktot): - sline = f.next() # Empty line - sline = f.next() # k-point info - tmp = map(float, sline.split()) + for ik in range(self.nktot): + sline = next(f) # Empty line + sline = next(f) # k-point info + tmp = list(map(float, sline.split())) self.kpts[ik, :] = tmp[:3] self.kwghts[ik] = tmp[3] - for ib in xrange(self.nband): + for ib in range(self.nband): sline = f.next().split() - tmp = map(float, sline) + tmp = list(map(float, sline)) assert len(tmp) == 2 * self.ispin + 1, "EIGENVAL file is incorrect (probably from old versions of VASP)" self.eigs[ik, ib, :] = tmp[1:self.ispin+1] self.ferw[ik, ib, :] = tmp[self.ispin+1:] @@ -639,8 +639,8 @@ class Doscar: self.ncdij = int(sline[3]) # Skip next 4 lines - for _ in xrange(4): - sline = f.next() + for _ in range(4): + sline = next(f) # Sixth line: EMAX, EMIN, NEDOS, EFERMI, 1.0 sline = f.next().split() @@ -666,54 +666,54 @@ def read_symmcar(vasp_dir, symm_filename='SYMMCAR'): symmcar_exist = False sym_file = read_lines(vasp_dir + symm_filename) - line = sym_file.next() + line = next(sym_file) nrot = extract_int_par('NROT') - line = sym_file.next() + line = next(sym_file) ntrans = extract_int_par('NPCELL') # Lmax - line = sym_file.next() + line = next(sym_file) lmax = extract_int_par('LMAX') mmax = 2 * lmax + 1 # Nion - line = sym_file.next() + line = next(sym_file) nion = extract_int_par('NION') - print " {0:>26} {1:d}".format("Number of rotations:", nrot) - print " {0:>26} {1:d}".format("Number of translations:", ntrans) - print " {0:>26} {1:d}".format("Number of ions:", nion) - print " {0:>26} {1:d}".format("L_max:", lmax) + print(" {0:>26} {1:d}".format("Number of rotations:", nrot)) + print(" {0:>26} {1:d}".format("Number of translations:", ntrans)) + print(" {0:>26} {1:d}".format("Number of ions:", nion)) + print(" {0:>26} {1:d}".format("L_max:", lmax)) rot_mats = np.zeros((nrot, lmax+1, mmax, mmax)) rot_map = np.zeros((nrot, ntrans, nion), dtype=np.int32) - for irot in xrange(nrot): + for irot in range(nrot): # Empty line - line = sym_file.next() + line = next(sym_file) # IROT index (skip it) - line = sym_file.next() + line = next(sym_file) # ISYMOP matrix (can be also skipped) - line = sym_file.next() - line = sym_file.next() - line = sym_file.next() + line = next(sym_file) + line = next(sym_file) + line = next(sym_file) # Skip comment " Permutation map..." - line = sym_file.next() + line = next(sym_file) # Permutations (in chunks of 20 indices per line) - for it in xrange(ntrans): - for ibl in xrange((nion - 1) / 20 + 1): + for it in range(ntrans): + for ibl in range((nion - 1) / 20 + 1): i1 = ibl * 20 i2 = (ibl + 1) * 20 - line = sym_file.next() - rot_map[irot, it, i1:i2] = map(int, line.split()) + line = next(sym_file) + rot_map[irot, it, i1:i2] = list(map(int, line.split())) - for l in xrange(lmax + 1): + for l in range(lmax + 1): mmax = 2 * l + 1 # Comment: "L = ..." - line = sym_file.next() - for m in xrange(mmax): - line = sym_file.next() - rot_mats[irot, l, m, :mmax] = map(float, line.split()[:mmax]) + line = next(sym_file) + for m in range(mmax): + line = next(sym_file) + rot_mats[irot, l, m, :mmax] = list(map(float, line.split()[:mmax])) data.update({ 'nrot': nrot, 'ntrans': ntrans, 'lmax': lmax, 'nion': nion, diff --git a/python/converters/vasp_converter.py b/python/converters/vasp_converter.py index 9ab3bdc0..486e558e 100644 --- a/python/converters/vasp_converter.py +++ b/python/converters/vasp_converter.py @@ -27,7 +27,7 @@ from types import * import numpy from pytriqs.archive import * -from converter_tools import * +from .converter_tools import * import os.path try: import simplejson as json @@ -150,7 +150,7 @@ class VaspConverter(ConverterTools): # R is a generator : each R.Next() will return the next number in the file jheader, rf = self.read_header_and_data(self.ctrl_file) - print jheader + print(jheader) ctrl_head = json.loads(jheader) ng = ctrl_head['ngroups'] @@ -163,12 +163,12 @@ class VaspConverter(ConverterTools): kpts_cart = numpy.zeros((n_k, 3)) bz_weights = numpy.zeros(n_k) try: - for ik in xrange(n_k): - kx, ky, kz = rf.next(), rf.next(), rf.next() + for ik in range(n_k): + kx, ky, kz = next(rf), next(rf), next(rf) kpts[ik, :] = kx, ky, kz - bz_weights[ik] = rf.next() - for ik in xrange(n_k): - kx, ky, kz = rf.next(), rf.next(), rf.next() + bz_weights[ik] = next(rf) + for ik in range(n_k): + kx, ky, kz = next(rf), next(rf), next(rf) kpts_cart[ik, :] = kx, ky, kz except StopIteration: raise "VaspConverter: error reading %s"%self.ctrl_file @@ -186,7 +186,7 @@ class VaspConverter(ConverterTools): assert ng == 1, "Only one group is allowed at the moment" try: - for ig in xrange(ng): + for ig in range(ng): gr_file = self.basename + '.pg%i'%(ig + 1) jheader, rf = self.read_header_and_data(gr_file) gr_head = json.loads(jheader) @@ -203,9 +203,9 @@ class VaspConverter(ConverterTools): shells = [] corr_shells = [] - shion_to_shell = [[] for ish in xrange(len(p_shells))] - cr_shion_to_shell = [[] for ish in xrange(len(p_shells))] - shorbs_to_globalorbs = [[] for ish in xrange(len(p_shells))] + shion_to_shell = [[] for ish in range(len(p_shells))] + cr_shion_to_shell = [[] for ish in range(len(p_shells))] + shorbs_to_globalorbs = [[] for ish in range(len(p_shells))] last_dimension = 0 crshorbs_to_globalorbs = [] icsh = 0 @@ -243,7 +243,7 @@ class VaspConverter(ConverterTools): n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(self, corr_shells) if mpi.is_master_node(): - print " No. of inequivalent shells:", n_inequiv_shells + print(" No. of inequivalent shells:", n_inequiv_shells) # NB!: these rotation matrices are specific to Wien2K! Set to identity in VASP use_rotations = 1 @@ -272,19 +272,19 @@ class VaspConverter(ConverterTools): # else: hopping = numpy.zeros([n_k, n_spin_blocs, nb_max, nb_max], numpy.complex_) f_weights = numpy.zeros([n_k, n_spin_blocs, nb_max], numpy.complex_) - band_window = [numpy.zeros((n_k, 2), dtype=int) for isp in xrange(n_spin_blocs)] + band_window = [numpy.zeros((n_k, 2), dtype=int) for isp in range(n_spin_blocs)] n_orbitals = numpy.zeros([n_k, n_spin_blocs], numpy.int) - for isp in xrange(n_spin_blocs): - for ik in xrange(n_k): - ib1, ib2 = int(rf.next()), int(rf.next()) + for isp in range(n_spin_blocs): + for ik in range(n_k): + ib1, ib2 = int(next(rf)), int(next(rf)) band_window[isp][ik, :2] = ib1, ib2 nb = ib2 - ib1 + 1 n_orbitals[ik, isp] = nb - for ib in xrange(nb): - hopping[ik, isp, ib, ib] = rf.next() - f_weights[ik, isp, ib] = rf.next() + for ib in range(nb): + hopping[ik, isp, ib, ib] = next(rf) + f_weights[ik, isp, ib] = next(rf) if self.proj_or_hk == 'hk': hopping = numpy.zeros([n_k, n_spin_blocs, n_orbs, n_orbs], numpy.complex_) @@ -298,15 +298,15 @@ class VaspConverter(ConverterTools): f_hk.readline() count += 1 rf_hk = self.read_data(f_hk) - for isp in xrange(n_spin_blocs): - for ik in xrange(n_k): + for isp in range(n_spin_blocs): + for ik in range(n_k): n_orbitals[ik, isp] = n_orbs - for ib in xrange(n_orbs): - for jb in xrange(n_orbs): - hopping[ik, isp, ib, jb] = rf_hk.next() - for ib in xrange(n_orbs): - for jb in xrange(n_orbs): - hopping[ik, isp, ib, jb] += 1j*rf_hk.next() + for ib in range(n_orbs): + for jb in range(n_orbs): + hopping[ik, isp, ib, jb] = next(rf_hk) + for ib in range(n_orbs): + for jb in range(n_orbs): + hopping[ik, isp, ib, jb] += 1j*next(rf_hk) rf_hk.close() # Projectors @@ -328,14 +328,14 @@ class VaspConverter(ConverterTools): # use cases and decide which solution is to be made permanent. # for ish, sh in enumerate(p_shells): - for isp in xrange(n_spin_blocs): - for ik in xrange(n_k): - for ion in xrange(len(sh['ion_list'])): - for ilm in xrange(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1]): - for ib in xrange(n_orbitals[ik, isp]): + for isp in range(n_spin_blocs): + for ik in range(n_k): + for ion in range(len(sh['ion_list'])): + for ilm in range(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1]): + for ib in range(n_orbitals[ik, isp]): # This is to avoid confusion with the order of arguments - pr = rf.next() - pi = rf.next() + pr = next(rf) + pi = next(rf) proj_mat_csc[ik, isp, ilm, ib] = complex(pr, pi) # now save only projectors with flag 'corr' to proj_mat @@ -343,22 +343,22 @@ class VaspConverter(ConverterTools): if self.proj_or_hk == 'proj': for ish, sh in enumerate(p_shells): if sh['corr']: - for isp in xrange(n_spin_blocs): - for ik in xrange(n_k): - for ion in xrange(len(sh['ion_list'])): + for isp in range(n_spin_blocs): + for ik in range(n_k): + for ion in range(len(sh['ion_list'])): icsh = shion_to_shell[ish][ion] - for iclm,ilm in enumerate(xrange(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1])): - for ib in xrange(n_orbitals[ik, isp]): + for iclm,ilm in enumerate(range(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1])): + for ib in range(n_orbitals[ik, isp]): proj_mat[ik,isp,icsh,iclm,ib] = proj_mat_csc[ik,isp,ilm,ib] elif self.proj_or_hk == 'hk': for ish, sh in enumerate(p_shells): if sh['corr']: - for ion in xrange(len(sh['ion_list'])): + for ion in range(len(sh['ion_list'])): icsh = shion_to_shell[ish][ion] - for isp in xrange(n_spin_blocs): - for ik in xrange(n_k): - for iclm,ilm in enumerate(xrange(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1])): + for isp in range(n_spin_blocs): + for ik in range(n_k): + for iclm,ilm in enumerate(range(shorbs_to_globalorbs[ish][ion][0],shorbs_to_globalorbs[ish][ion][1])): proj_mat[ik,isp,icsh,iclm,ilm] = 1.0 #corr_shell.pop('ion_list') @@ -445,13 +445,13 @@ class VaspConverter(ConverterTools): if os.path.exists(f): mpi.report("Reading input from %s..."%f) R = ConverterTools.read_fortran_file(self, f, self.fortran_to_replace) - assert int(R.next()) == n_k, "convert_misc_input: Number of k-points is inconsistent in oubwin file!" - assert int(R.next()) == SO, "convert_misc_input: SO is inconsistent in oubwin file!" - for ik in xrange(n_k): - R.next() - band_window[isp][ik,0] = R.next() # lowest band - band_window[isp][ik,1] = R.next() # highest band - R.next() + assert int(next(R)) == n_k, "convert_misc_input: Number of k-points is inconsistent in oubwin file!" + assert int(next(R)) == SO, "convert_misc_input: SO is inconsistent in oubwin file!" + for ik in range(n_k): + next(R) + band_window[isp][ik,0] = next(R) # lowest band + band_window[isp][ik,1] = next(R) # highest band + next(R) things_to_save.append('band_window') R.close() # Reading done! diff --git a/python/converters/wannier90_converter.py b/python/converters/wannier90_converter.py index 925d14e3..82d4ecbc 100644 --- a/python/converters/wannier90_converter.py +++ b/python/converters/wannier90_converter.py @@ -48,7 +48,7 @@ from types import * import numpy import math from pytriqs.archive import * -from converter_tools import * +from .converter_tools import * from itertools import product import os.path @@ -125,19 +125,19 @@ class Wannier90Converter(ConverterTools): # conversion try: # read k - point mesh generation option - kmesh_mode = int(R.next()) + kmesh_mode = int(next(R)) if kmesh_mode >= 0: # read k-point mesh size from input - nki = [int(R.next()) for idir in range(3)] + nki = [int(next(R)) for idir in range(3)] else: # some default grid, if everything else fails... nki = [8, 8, 8] # read the total number of electrons per cell - density_required = float(R.next()) + density_required = float(next(R)) # we do not read shells, because we have no additional shells beyond correlated ones, # and the data will be copied from corr_shells into shells (see below) # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, - n_corr_shells = int(R.next()) + n_corr_shells = int(next(R)) # now read the information about the correlated shells (atom, sort, # l, dim, SO flag, irep): corr_shells = [{name: int(val) for name, val in zip( @@ -423,7 +423,7 @@ class Wannier90Converter(ConverterTools): ir += 1 # for each direct lattice vector R read the block of the # Hamiltonian H(R) - for ir, jj, ii in product(range(nrpt), range(num_wf), range(num_wf)): + for ir, jj, ii in product(list(range(nrpt)), list(range(num_wf)), list(range(num_wf))): # advance one line, split the line into tokens currpos += 1 cline = hr_data[currpos].split() @@ -569,7 +569,7 @@ class Wannier90Converter(ConverterTools): nkpt = msize[0] * msize[1] * msize[2] kmesh = numpy.zeros((nkpt, 3), dtype=float) ii = 0 - for ix, iy, iz in product(range(msize[0]), range(msize[1]), range(msize[2])): + for ix, iy, iz in product(list(range(msize[0])), list(range(msize[1])), list(range(msize[2]))): kmesh[ii, :] = [float(ix) / msize[0], float(iy) / msize[1], float(iz) / msize[2]] ii += 1 @@ -601,8 +601,8 @@ class Wannier90Converter(ConverterTools): twopi = 2 * numpy.pi h_of_k = [numpy.zeros((norb, norb), dtype=numpy.complex_) for ik in range(self.n_k)] - ridx = numpy.array(range(self.nrpt)) - for ik, ir in product(range(self.n_k), ridx): + ridx = numpy.array(list(range(self.nrpt))) + for ik, ir in product(list(range(self.n_k)), ridx): rdotk = twopi * numpy.dot(self.k_mesh[ik], self.rvec[ir]) factor = (math.cos(rdotk) + 1j * math.sin(rdotk)) / \ float(self.rdeg[ir]) diff --git a/python/converters/wien2k_converter.py b/python/converters/wien2k_converter.py index 1fcb267b..44406bcf 100644 --- a/python/converters/wien2k_converter.py +++ b/python/converters/wien2k_converter.py @@ -23,7 +23,7 @@ from types import * import numpy from pytriqs.archive import * -from converter_tools import * +from .converter_tools import * import os.path @@ -114,23 +114,23 @@ class Wien2kConverter(ConverterTools): R = ConverterTools.read_fortran_file( self, self.dft_file, self.fortran_to_replace) try: - energy_unit = R.next() # read the energy convertion factor + energy_unit = next(R) # read the energy convertion factor # read the number of k points - n_k = int(R.next()) + n_k = int(next(R)) k_dep_projection = 1 # flag for spin-polarised calculation - SP = int(R.next()) + SP = int(next(R)) # flag for spin-orbit calculation - SO = int(R.next()) - charge_below = R.next() # total charge below energy window + SO = int(next(R)) + charge_below = next(R) # total charge below energy window # total density required, for setting the chemical potential - density_required = R.next() + density_required = next(R) symm_op = 1 # Use symmetry groups for the k-sum # the information on the non-correlated shells is not important # here, maybe skip: # number of shells (e.g. Fe d, As p, O p) in the unit cell, - n_shells = int(R.next()) + n_shells = int(next(R)) # corresponds to index R in formulas # now read the information about the shells (atom, sort, l, dim): shell_entries = ['atom', 'sort', 'l', 'dim'] @@ -138,7 +138,7 @@ class Wien2kConverter(ConverterTools): shell_entries, R)} for ish in range(n_shells)] # number of corr. shells (e.g. Fe d, Ce f) in the unit cell, - n_corr_shells = int(R.next()) + n_corr_shells = int(next(R)) # corresponds to index R in formulas # now read the information about the shells (atom, sort, l, dim, SO # flag, irep): @@ -161,14 +161,14 @@ class Wien2kConverter(ConverterTools): for icrsh in range(n_corr_shells): for i in range(corr_shells[icrsh]['dim']): # read real part: for j in range(corr_shells[icrsh]['dim']): - rot_mat[icrsh][i, j] = R.next() + rot_mat[icrsh][i, j] = next(R) # read imaginary part: for i in range(corr_shells[icrsh]['dim']): for j in range(corr_shells[icrsh]['dim']): - rot_mat[icrsh][i, j] += 1j * R.next() + rot_mat[icrsh][i, j] += 1j * next(R) if (SP == 1): # read time inversion flag: - rot_mat_time_inv[icrsh] = int(R.next()) + rot_mat_time_inv[icrsh] = int(next(R)) # Read here the info for the transformation of the basis: n_reps = [1 for i in range(n_inequiv_shells)] @@ -176,8 +176,8 @@ class Wien2kConverter(ConverterTools): T = [] for ish in range(n_inequiv_shells): # number of representatives ("subsets"), e.g. t2g and eg - n_reps[ish] = int(R.next()) - dim_reps[ish] = [int(R.next()) for i in range( + n_reps[ish] = int(next(R)) + dim_reps[ish] = [int(next(R)) for i in range( n_reps[ish])] # dimensions of the subsets # The transformation matrix: @@ -189,10 +189,10 @@ class Wien2kConverter(ConverterTools): # now read it from file: for i in range(lmax): for j in range(lmax): - T[ish][i, j] = R.next() + T[ish][i, j] = next(R) for i in range(lmax): for j in range(lmax): - T[ish][i, j] += 1j * R.next() + T[ish][i, j] += 1j * next(R) # Spin blocks to be read: n_spin_blocs = SP + 1 - SO @@ -201,7 +201,7 @@ class Wien2kConverter(ConverterTools): n_orbitals = numpy.zeros([n_k, n_spin_blocs], numpy.int) for isp in range(n_spin_blocs): for ik in range(n_k): - n_orbitals[ik, isp] = int(R.next()) + n_orbitals[ik, isp] = int(next(R)) # Initialise the projectors: proj_mat = numpy.zeros([n_k, n_spin_blocs, n_corr_shells, max( @@ -216,12 +216,12 @@ class Wien2kConverter(ConverterTools): for isp in range(n_spin_blocs): for i in range(n_orb): for j in range(n_orbitals[ik][isp]): - proj_mat[ik, isp, icrsh, i, j] = R.next() + proj_mat[ik, isp, icrsh, i, j] = next(R) # now Imag part: for isp in range(n_spin_blocs): for i in range(n_orb): for j in range(n_orbitals[ik][isp]): - proj_mat[ik, isp, icrsh, i, j] += 1j * R.next() + proj_mat[ik, isp, icrsh, i, j] += 1j * next(R) # now define the arrays for weights and hopping ... # w(k_index), default normalisation @@ -231,7 +231,7 @@ class Wien2kConverter(ConverterTools): # weights in the file for ik in range(n_k): - bz_weights[ik] = R.next() + bz_weights[ik] = next(R) # if the sum over spins is in the weights, take it out again!! sm = sum(bz_weights) @@ -244,7 +244,7 @@ class Wien2kConverter(ConverterTools): for ik in range(n_k): n_orb = n_orbitals[ik, isp] for i in range(n_orb): - hopping[ik, isp, i, i] = R.next() * energy_unit + hopping[ik, isp, i, i] = next(R) * energy_unit # keep some things that we need for reading parproj: things_to_set = ['n_shells', 'shells', 'n_corr_shells', 'corr_shells', @@ -252,7 +252,7 @@ class Wien2kConverter(ConverterTools): for it in things_to_set: setattr(self, it, locals()[it]) except StopIteration: # a more explicit error if the file is corrupted. - raise IOError, "Wien2k_converter : reading file %s failed!" % self.dft_file + raise IOError("Wien2k_converter : reading file %s failed!" % self.dft_file) R.close() # Reading done! @@ -308,7 +308,7 @@ class Wien2kConverter(ConverterTools): R = ConverterTools.read_fortran_file( self, self.parproj_file, self.fortran_to_replace) - n_parproj = [int(R.next()) for i in range(self.n_shells)] + n_parproj = [int(next(R)) for i in range(self.n_shells)] n_parproj = numpy.array(n_parproj) # Initialise P, here a double list of matrices: @@ -328,39 +328,39 @@ class Wien2kConverter(ConverterTools): # read real part: for i in range(self.shells[ish]['dim']): for j in range(self.n_orbitals[ik][isp]): - proj_mat_all[ik, isp, ish, ir, i, j] = R.next() + proj_mat_all[ik, isp, ish, ir, i, j] = next(R) for isp in range(self.n_spin_blocs): # read imaginary part: for i in range(self.shells[ish]['dim']): for j in range(self.n_orbitals[ik][isp]): proj_mat_all[ik, isp, ish, - ir, i, j] += 1j * R.next() + ir, i, j] += 1j * next(R) # now read the Density Matrix for this orbital below the energy # window: for isp in range(self.n_spin_blocs): for i in range(self.shells[ish]['dim']): # read real part: for j in range(self.shells[ish]['dim']): - dens_mat_below[isp][ish][i, j] = R.next() + dens_mat_below[isp][ish][i, j] = next(R) for isp in range(self.n_spin_blocs): # read imaginary part: for i in range(self.shells[ish]['dim']): for j in range(self.shells[ish]['dim']): - dens_mat_below[isp][ish][i, j] += 1j * R.next() + dens_mat_below[isp][ish][i, j] += 1j * next(R) if (self.SP == 0): dens_mat_below[isp][ish] /= 2.0 # Global -> local rotation matrix for this shell: for i in range(self.shells[ish]['dim']): # read real part: for j in range(self.shells[ish]['dim']): - rot_mat_all[ish][i, j] = R.next() + rot_mat_all[ish][i, j] = next(R) for i in range(self.shells[ish]['dim']): # read imaginary part: for j in range(self.shells[ish]['dim']): - rot_mat_all[ish][i, j] += 1j * R.next() + rot_mat_all[ish][i, j] += 1j * next(R) if (self.SP): - rot_mat_all_time_inv[ish] = int(R.next()) + rot_mat_all_time_inv[ish] = int(next(R)) R.close() # Reading done! @@ -404,13 +404,13 @@ class Wien2kConverter(ConverterTools): mpi.report("Reading input from %s..." % self.band_file) R = ConverterTools.read_fortran_file( self, self.band_file, self.fortran_to_replace) - n_k = int(R.next()) + n_k = int(next(R)) # read the list of n_orbitals for all k points n_orbitals = numpy.zeros([n_k, self.n_spin_blocs], numpy.int) for isp in range(self.n_spin_blocs): for ik in range(n_k): - n_orbitals[ik, isp] = int(R.next()) + n_orbitals[ik, isp] = int(next(R)) # Initialise the projectors: proj_mat = numpy.zeros([n_k, self.n_spin_blocs, self.n_corr_shells, max( @@ -425,12 +425,12 @@ class Wien2kConverter(ConverterTools): for isp in range(self.n_spin_blocs): for i in range(n_orb): for j in range(n_orbitals[ik, isp]): - proj_mat[ik, isp, icrsh, i, j] = R.next() + proj_mat[ik, isp, icrsh, i, j] = next(R) # now Imag part: for isp in range(self.n_spin_blocs): for i in range(n_orb): for j in range(n_orbitals[ik, isp]): - proj_mat[ik, isp, icrsh, i, j] += 1j * R.next() + proj_mat[ik, isp, icrsh, i, j] += 1j * next(R) hopping = numpy.zeros([n_k, self.n_spin_blocs, numpy.max( n_orbitals), numpy.max(n_orbitals)], numpy.complex_) @@ -441,10 +441,10 @@ class Wien2kConverter(ConverterTools): for ik in range(n_k): n_orb = n_orbitals[ik, isp] for i in range(n_orb): - hopping[ik, isp, i, i] = R.next() * self.energy_unit + hopping[ik, isp, i, i] = next(R) * self.energy_unit # now read the partial projectors: - n_parproj = [int(R.next()) for i in range(self.n_shells)] + n_parproj = [int(next(R)) for i in range(self.n_shells)] n_parproj = numpy.array(n_parproj) # Initialise P, here a double list of matrices: @@ -460,20 +460,20 @@ class Wien2kConverter(ConverterTools): for i in range(self.shells[ish]['dim']): for j in range(n_orbitals[ik, isp]): proj_mat_all[ik, isp, ish, - ir, i, j] = R.next() + ir, i, j] = next(R) # read imaginary part: for i in range(self.shells[ish]['dim']): for j in range(n_orbitals[ik, isp]): proj_mat_all[ik, isp, ish, - ir, i, j] += 1j * R.next() + ir, i, j] += 1j * next(R) R.close() except KeyError: - raise IOError, "convert_bands_input : Needed data not found in hdf file. Consider calling convert_dft_input first!" + raise IOError("convert_bands_input : Needed data not found in hdf file. Consider calling convert_dft_input first!") except StopIteration: # a more explicit error if the file is corrupted. - raise IOError, "Wien2k_converter : reading file %s failed!" % self.band_file + raise IOError("Wien2k_converter : reading file %s failed!" % self.band_file) # Reading done! @@ -507,7 +507,7 @@ class Wien2kConverter(ConverterTools): # Check if SP, SO and n_k are already in h5 with HDFArchive(self.hdf_file, 'r') as ar: if not (self.dft_subgrp in ar): - raise IOError, "convert_misc_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp + raise IOError("convert_misc_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp) SP = ar[self.dft_subgrp]['SP'] SO = ar[self.dft_subgrp]['SO'] n_k = ar[self.dft_subgrp]['n_k'] @@ -539,19 +539,19 @@ class Wien2kConverter(ConverterTools): mpi.report("Reading input from %s..." % f) R = ConverterTools.read_fortran_file( self, f, self.fortran_to_replace) - n_k_oubwin = int(R.next()) + n_k_oubwin = int(next(R)) if (n_k_oubwin != n_k): mpi.report( "convert_misc_input : WARNING : n_k in case.oubwin is different from n_k in case.klist") assert int( - R.next()) == SO, "convert_misc_input: SO is inconsistent in oubwin file!" + next(R)) == SO, "convert_misc_input: SO is inconsistent in oubwin file!" band_window[isp] = numpy.zeros((n_k_oubwin, 2), dtype=int) - for ik in xrange(n_k_oubwin): - R.next() - band_window[isp][ik, 0] = R.next() # lowest band - band_window[isp][ik, 1] = R.next() # highest band - R.next() + for ik in range(n_k_oubwin): + next(R) + band_window[isp][ik, 0] = next(R) # lowest band + band_window[isp][ik, 1] = next(R) # highest band + next(R) things_to_save.append('band_window') R.close() # Reading done! @@ -578,7 +578,7 @@ class Wien2kConverter(ConverterTools): things_to_save.extend( ['lattice_type', 'lattice_constants', 'lattice_angles']) except IOError: - raise IOError, "convert_misc_input: reading file %s failed" % self.struct_file + raise IOError("convert_misc_input: reading file %s failed" % self.struct_file) # Read relevant data from .outputs file ####################################### @@ -610,7 +610,7 @@ class Wien2kConverter(ConverterTools): things_to_save.extend(['n_symmetries', 'rot_symmetries']) things_to_save.append('rot_symmetries') except IOError: - raise IOError, "convert_misc_input: reading file %s failed" % self.outputs_file + raise IOError("convert_misc_input: reading file %s failed" % self.outputs_file) # Save it to the HDF: with HDFArchive(self.hdf_file, 'a') as ar: @@ -635,7 +635,7 @@ class Wien2kConverter(ConverterTools): # Check if SP, SO and n_k are already in h5 with HDFArchive(self.hdf_file, 'r') as ar: if not (self.dft_subgrp in ar): - raise IOError, "convert_transport_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp + raise IOError("convert_transport_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp) SP = ar[self.dft_subgrp]['SP'] SO = ar[self.dft_subgrp]['SO'] n_k = ar[self.dft_subgrp]['n_k'] @@ -665,20 +665,20 @@ class Wien2kConverter(ConverterTools): band_window_optics = [] for isp, f in enumerate(files): if not os.path.exists(f): - raise IOError, "convert_transport_input: File %s does not exist" % f + raise IOError("convert_transport_input: File %s does not exist" % f) mpi.report("Reading input from %s..." % f) R = ConverterTools.read_fortran_file( self, f, {'D': 'E', '(': '', ')': '', ',': ' '}) band_window_optics_isp = [] - for ik in xrange(n_k): - R.next() - nu1 = int(R.next()) - nu2 = int(R.next()) + for ik in range(n_k): + next(R) + nu1 = int(next(R)) + nu2 = int(next(R)) band_window_optics_isp.append((nu1, nu2)) n_bands = nu2 - nu1 + 1 for _ in range(4): - R.next() + next(R) if n_bands <= 0: velocity_xyz = numpy.zeros((1, 1, 3), dtype=complex) else: @@ -688,7 +688,7 @@ class Wien2kConverter(ConverterTools): for nu_j in range(nu_i, n_bands): for i in range(3): velocity_xyz[nu_i][nu_j][ - i] = R.next() + R.next() * 1j + i] = next(R) + next(R) * 1j if (nu_i != nu_j): velocity_xyz[nu_j][nu_i][i] = velocity_xyz[ nu_i][nu_j][i].conjugate() @@ -737,13 +737,13 @@ class Wien2kConverter(ConverterTools): self, symm_file, self.fortran_to_replace) try: - n_symm = int(R.next()) # Number of symmetry operations - n_atoms = int(R.next()) # number of atoms involved - perm = [[int(R.next()) for i in range(n_atoms)] + n_symm = int(next(R)) # Number of symmetry operations + n_atoms = int(next(R)) # number of atoms involved + perm = [[int(next(R)) for i in range(n_atoms)] for j in range(n_symm)] # list of permutations of the atoms if SP: # time inversion for SO coupling - time_inv = [int(R.next()) for j in range(n_symm)] + time_inv = [int(next(R)) for j in range(n_symm)] else: time_inv = [0 for j in range(n_symm)] @@ -757,11 +757,11 @@ class Wien2kConverter(ConverterTools): for i in range(orbits[orb]['dim']): for j in range(orbits[orb]['dim']): # real part - mat[i_symm][orb][i, j] = R.next() + mat[i_symm][orb][i, j] = next(R) for i in range(orbits[orb]['dim']): for j in range(orbits[orb]['dim']): mat[i_symm][orb][i, j] += 1j * \ - R.next() # imaginary part + next(R) # imaginary part mat_tinv = [numpy.identity(orbits[orb]['dim'], numpy.complex_) for orb in range(n_orbits)] @@ -773,14 +773,14 @@ class Wien2kConverter(ConverterTools): for i in range(orbits[orb]['dim']): for j in range(orbits[orb]['dim']): # real part - mat_tinv[orb][i, j] = R.next() + mat_tinv[orb][i, j] = next(R) for i in range(orbits[orb]['dim']): for j in range(orbits[orb]['dim']): mat_tinv[orb][i, j] += 1j * \ - R.next() # imaginary part + next(R) # imaginary part except StopIteration: # a more explicit error if the file is corrupted. - raise IOError, "Wien2k_converter : reading file %s failed!" %symm_file + raise IOError("Wien2k_converter : reading file %s failed!" %symm_file) R.close() # Reading done! diff --git a/python/sumk_dft.py b/python/sumk_dft.py index 1cd7f715..e76a1c6b 100644 --- a/python/sumk_dft.py +++ b/python/sumk_dft.py @@ -27,8 +27,8 @@ from pytriqs.gf import * import pytriqs.utility.mpi as mpi from pytriqs.utility.comparison_tests import assert_arrays_are_close from pytriqs.archive import * -from symmetry import * -from block_structure import BlockStructure +from .symmetry import * +from .block_structure import BlockStructure from sets import Set from itertools import product from warnings import warn @@ -127,10 +127,10 @@ class SumkDFT(object): # GF structure used for the local things in the k sums # Most general form allowing for all hybridisation, i.e. largest # blocks possible - self.gf_struct_sumk = [[(sp, range(self.corr_shells[icrsh]['dim'])) for sp in self.spin_block_names[self.corr_shells[icrsh]['SO']]] + self.gf_struct_sumk = [[(sp, list(range(self.corr_shells[icrsh]['dim']))) for sp in self.spin_block_names[self.corr_shells[icrsh]['SO']]] for icrsh in range(self.n_corr_shells)] # First set a standard gf_struct solver: - self.gf_struct_solver = [dict([(sp, range(self.corr_shells[self.inequiv_to_corr[ish]]['dim'])) + self.gf_struct_solver = [dict([(sp, list(range(self.corr_shells[self.inequiv_to_corr[ish]]['dim']))) for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]]) for ish in range(self.n_inequiv_shells)] # Set standard (identity) maps from gf_struct_sumk <-> @@ -273,7 +273,7 @@ class SumkDFT(object): try: list_to_return.append(ar[subgrp][it]) except: - raise ValueError, "load: %s not found, and so not loaded." % it + raise ValueError("load: %s not found, and so not loaded." % it) return list_to_return ################ @@ -324,7 +324,7 @@ class SumkDFT(object): projmat = self.proj_mat[ik, isp, ish, 0:dim, 0:n_orb] elif shells == 'all': if ir is None: - raise ValueError, "downfold: provide ir if treating all shells." + raise ValueError("downfold: provide ir if treating all shells.") dim = self.shells[ish]['dim'] projmat = self.proj_mat_all[ik, isp, ish, ir, 0:dim, 0:n_orb] elif shells == 'csc': @@ -379,7 +379,7 @@ class SumkDFT(object): projmat = self.proj_mat[ik, isp, ish, 0:dim, 0:n_orb] elif shells == 'all': if ir is None: - raise ValueError, "upfold: provide ir if treating all shells." + raise ValueError("upfold: provide ir if treating all shells.") dim = self.shells[ish]['dim'] projmat = self.proj_mat_all[ik, isp, ish, ir, 0:dim, 0:n_orb] elif shells == 'csc': @@ -495,7 +495,7 @@ class SumkDFT(object): ntoi = self.spin_names_to_ind[self.SO] spn = self.spin_block_names[self.SO] if (iw_or_w != "iw") and (iw_or_w != "w"): - raise ValueError, "lattice_gf: Implemented only for Re/Im frequency functions." + raise ValueError("lattice_gf: Implemented only for Re/Im frequency functions.") if not hasattr(self, "Sigma_imp_" + iw_or_w): with_Sigma = False if broadening is None: @@ -521,12 +521,12 @@ class SumkDFT(object): else: if iw_or_w == "iw": if beta is None: - raise ValueError, "lattice_gf: Give the beta for the lattice GfReFreq." + raise ValueError("lattice_gf: Give the beta for the lattice GfReFreq.") # Default number of Matsubara frequencies mesh = MeshImFreq(beta=beta, S='Fermion', n_max=1025) elif iw_or_w == "w": if mesh is None: - raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq." + raise ValueError("lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq.") mesh = MeshReFreq(mesh[0], mesh[1], mesh[2]) # Check if G_latt is present @@ -547,7 +547,7 @@ class SumkDFT(object): # Set up G_latt if set_up_G_latt: block_structure = [ - range(self.n_orbitals[ik, ntoi[sp]]) for sp in spn] + list(range(self.n_orbitals[ik, ntoi[sp]])) for sp in spn] gf_struct = [(spn[isp], block_structure[isp]) for isp in range(self.n_spin_blocks[self.SO])] block_ind_list = [block for block, inner in gf_struct] @@ -624,13 +624,13 @@ class SumkDFT(object): SK_Sigma_imp = self.Sigma_imp_w else: - raise ValueError, "put_Sigma: This type of Sigma is not handled." + raise ValueError("put_Sigma: This type of Sigma is not handled.") # transform the CTQMC blocks to the full matrix: for icrsh in range(self.n_corr_shells): # ish is the index of the inequivalent shell corresponding to icrsh ish = self.corr_to_inequiv[icrsh] - for block, inner in self.gf_struct_solver[ish].iteritems(): + for block, inner in self.gf_struct_solver[ish].items(): for ind1 in inner: for ind2 in inner: block_sumk, ind1_sumk = self.solver_to_sumk[ @@ -678,19 +678,19 @@ class SumkDFT(object): G_loc = [self.Sigma_imp_iw[icrsh].copy() for icrsh in range( self.n_corr_shells)] # this list will be returned beta = G_loc[0].mesh.beta - G_loc_inequiv = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, mesh=G_loc[0].mesh)) for block, inner in self.gf_struct_solver[ish].iteritems()], + G_loc_inequiv = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, mesh=G_loc[0].mesh)) for block, inner in self.gf_struct_solver[ish].items()], make_copies=False) for ish in range(self.n_inequiv_shells)] elif iw_or_w == "w": G_loc = [self.Sigma_imp_w[icrsh].copy() for icrsh in range( self.n_corr_shells)] # this list will be returned mesh = G_loc[0].mesh - G_loc_inequiv = [BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=mesh)) for block, inner in self.gf_struct_solver[ish].iteritems()], + G_loc_inequiv = [BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=mesh)) for block, inner in self.gf_struct_solver[ish].items()], make_copies=False) for ish in range(self.n_inequiv_shells)] for icrsh in range(self.n_corr_shells): G_loc[icrsh].zero() # initialize to zero - ikarray = numpy.array(range(self.n_k)) + ikarray = numpy.array(list(range(self.n_k))) for ik in mpi.slice_array(ikarray): if iw_or_w == 'iw': G_latt = self.lattice_gf( @@ -729,7 +729,7 @@ class SumkDFT(object): # transform to CTQMC blocks: for ish in range(self.n_inequiv_shells): - for block, inner in self.gf_struct_solver[ish].iteritems(): + for block, inner in self.gf_struct_solver[ish].items(): for ind1 in inner: for ind2 in inner: block_sumk, ind1_sumk = self.solver_to_sumk[ @@ -782,7 +782,7 @@ class SumkDFT(object): for ish in range(self.n_corr_shells)] if include_shells is None: - include_shells = range(self.n_inequiv_shells) + include_shells = list(range(self.n_inequiv_shells)) for ish in include_shells: for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]: @@ -815,7 +815,7 @@ class SumkDFT(object): for i in range(num_blocs): blocs[i].sort() self.gf_struct_solver[ish].update( - [('%s_%s' % (sp, i), range(len(blocs[i])))]) + [('%s_%s' % (sp, i), list(range(len(blocs[i]))))]) # Construct sumk_to_solver taking (sumk_block, sumk_index) --> (solver_block, solver_inner) # and solver_to_sumk taking (solver_block, solver_inner) --> @@ -834,7 +834,7 @@ class SumkDFT(object): # Now calculate degeneracies of orbitals dm = {} - for block, inner in self.gf_struct_solver[ish].iteritems(): + for block, inner in self.gf_struct_solver[ish].items(): # get dm for the blocks: dm[block] = numpy.zeros( [len(inner), len(inner)], numpy.complex_) @@ -847,8 +847,8 @@ class SumkDFT(object): dm[block][ind1, ind2] = dens_mat[ish][ block_sumk][ind1_sumk, ind2_sumk] - for block1 in self.gf_struct_solver[ish].iterkeys(): - for block2 in self.gf_struct_solver[ish].iterkeys(): + for block1 in self.gf_struct_solver[ish].keys(): + for block2 in self.gf_struct_solver[ish].keys(): if dm[block1].shape == dm[block2].shape: if ((abs(dm[block1] - dm[block2]) < threshold).all()) and (block1 != block2): ind1 = -1 @@ -969,7 +969,7 @@ class SumkDFT(object): if include_shells is None: # include all shells - include_shells = range(self.n_inequiv_shells) + include_shells = list(range(self.n_inequiv_shells)) for ish in include_shells: for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]: @@ -1002,7 +1002,7 @@ class SumkDFT(object): for i in range(num_blocs): blocs[i].sort() self.gf_struct_solver[ish].update( - [('%s_%s' % (sp, i), range(len(blocs[i])))]) + [('%s_%s' % (sp, i), list(range(len(blocs[i]))))]) # Construct sumk_to_solver taking (sumk_block, sumk_index) --> (solver_block, solver_inner) # and solver_to_sumk taking (solver_block, solver_inner) --> @@ -1021,7 +1021,7 @@ class SumkDFT(object): # transform G to the new structure full_structure = BlockStructure.full_structure( - [{sp:range(self.corr_shells[self.inequiv_to_corr[ish]]['dim']) + [{sp:list(range(self.corr_shells[self.inequiv_to_corr[ish]]['dim'])) for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]} for ish in range(self.n_inequiv_shells)],None) G_transformed = [ @@ -1073,7 +1073,7 @@ class SumkDFT(object): if include_shells is None: # include all shells - include_shells = range(self.n_inequiv_shells) + include_shells = list(range(self.n_inequiv_shells)) # We consider two blocks equal, if their Green's functions obey # maybe_conjugate1( v1^dagger G1 v1 ) = maybe_conjugate2( v2^dagger G2 v2 ) @@ -1086,8 +1086,8 @@ class SumkDFT(object): # where our goal is to find T # we just try whether there is such a T with and without conjugation for ish in include_shells: - for block1 in self.gf_struct_solver[ish].iterkeys(): - for block2 in self.gf_struct_solver[ish].iterkeys(): + for block1 in self.gf_struct_solver[ish].keys(): + for block2 in self.gf_struct_solver[ish].keys(): if block1==block2: continue # check if the blocks are already present in the deg_shells @@ -1298,7 +1298,7 @@ class SumkDFT(object): dens_mat[icrsh][sp] = numpy.zeros( [self.corr_shells[icrsh]['dim'], self.corr_shells[icrsh]['dim']], numpy.complex_) - ikarray = numpy.array(range(self.n_k)) + ikarray = numpy.array(list(range(self.n_k))) for ik in mpi.slice_array(ikarray): if method == "using_gf": @@ -1327,7 +1327,7 @@ class SumkDFT(object): MMat[isp][inu, inu] = 0.0 else: - raise ValueError, "density_matrix: the method '%s' is not supported." % method + raise ValueError("density_matrix: the method '%s' is not supported." % method) for icrsh in range(self.n_corr_shells): for isp, sp in enumerate(self.spin_block_names[self.corr_shells[icrsh]['SO']]): @@ -1527,10 +1527,10 @@ class SumkDFT(object): spn = self.spin_block_names[self.corr_shells[icrsh]['SO']] Ncr = {sp: 0.0 for sp in spn} - for block, inner in self.gf_struct_solver[ish].iteritems(): + for block, inner in self.gf_struct_solver[ish].items(): bl = self.solver_to_sumk_block[ish][block] Ncr[bl] += dens_mat[block].real.trace() - Ncrtot = sum(Ncr.itervalues()) + Ncrtot = sum(Ncr.values()) for sp in spn: self.dc_imp[icrsh][sp] = numpy.identity(dim, numpy.float_) if self.SP == 0: # average the densities if there is no SP: @@ -1543,7 +1543,7 @@ class SumkDFT(object): if use_dc_value is None: if U_interact is None and J_hund is None: - raise ValueError, "set_dc: either provide U_interact and J_hund or set use_dc_value to dc value." + raise ValueError("set_dc: either provide U_interact and J_hund or set use_dc_value to dc value.") if use_dc_formula == 0: # FLL @@ -1733,7 +1733,7 @@ class SumkDFT(object): if mu is None: mu = self.chemical_potential dens = 0.0 - ikarray = numpy.array(range(self.n_k)) + ikarray = numpy.array(list(range(self.n_k))) for ik in mpi.slice_array(ikarray): G_latt = self.lattice_gf( ik=ik, mu=mu, iw_or_w=iw_or_w, with_Sigma=with_Sigma, with_dc=with_dc, broadening=broadening) @@ -1848,7 +1848,7 @@ class SumkDFT(object): # Convert Fermi weights to a density matrix dens_mat_dft = {} for sp in spn: - dens_mat_dft[sp] = [fermi_weights[ik, ntoi[sp], :].astype(numpy.complex_) for ik in xrange(self.n_k)] + dens_mat_dft[sp] = [fermi_weights[ik, ntoi[sp], :].astype(numpy.complex_) for ik in range(self.n_k)] # Set up deltaN: @@ -1857,7 +1857,7 @@ class SumkDFT(object): deltaN[sp] = [numpy.zeros([self.n_orbitals[ik, ntoi[sp]], self.n_orbitals[ ik, ntoi[sp]]], numpy.complex_) for ik in range(self.n_k)] - ikarray = numpy.array(range(self.n_k)) + ikarray = numpy.array(list(range(self.n_k))) for ik in mpi.slice_array(ikarray): G_latt_iw = self.lattice_gf( ik=ik, mu=self.chemical_potential, iw_or_w="iw") @@ -1946,7 +1946,7 @@ class SumkDFT(object): to_write = {f: (0, 'up'), f1: (1, 'down')} if self.SO == 1: to_write = {f: (0, 'ud'), f1: (0, 'ud')} - for fout in to_write.iterkeys(): + for fout in to_write.keys(): isp, sp = to_write[fout] for ik in range(self.n_k): fout.write("%s\n" % self.n_orbitals[ik, isp]) @@ -1963,12 +1963,12 @@ class SumkDFT(object): if mpi.is_master_node(): with open(filename, 'w') as f: f.write(" %i -1 ! Number of k-points, default number of bands\n"%(self.n_k)) - for ik in xrange(self.n_k): + for ik in range(self.n_k): ib1 = band_window[0][ik, 0] ib2 = band_window[0][ik, 1] f.write(" %i %i %i\n"%(ik + 1, ib1, ib2)) - for inu in xrange(self.n_orbitals[ik, 0]): - for imu in xrange(self.n_orbitals[ik, 0]): + for inu in range(self.n_orbitals[ik, 0]): + for imu in range(self.n_orbitals[ik, 0]): valre = (deltaN['up'][ik][inu, imu].real + deltaN['down'][ik][inu, imu].real) / 2.0 valim = (deltaN['up'][ik][inu, imu].imag + deltaN['down'][ik][inu, imu].imag) / 2.0 f.write(" %.14f %.14f"%(valre, valim)) diff --git a/python/sumk_dft_tools.py b/python/sumk_dft_tools.py index 589e800f..190c75b9 100644 --- a/python/sumk_dft_tools.py +++ b/python/sumk_dft_tools.py @@ -23,8 +23,8 @@ from types import * import numpy from pytriqs.gf import * import pytriqs.utility.mpi as mpi -from symmetry import * -from sumk_dft import SumkDFT +from .symmetry import * +from .sumk_dft import SumkDFT from scipy.integrate import * from scipy.interpolate import * @@ -79,7 +79,7 @@ class SumkDFTTools(SumkDFT): DOS projected to atoms and resolved into orbital contributions. """ if (mesh is None) and (not with_Sigma): - raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq." + raise ValueError("lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq.") if mesh is None: om_mesh = [x.real for x in self.Sigma_imp_w[0].mesh] om_min = om_mesh[0] @@ -111,7 +111,7 @@ class SumkDFTTools(SumkDFT): DOSproj_orb[ish][sp] = numpy.zeros( [n_om, dim, dim], numpy.complex_) - ikarray = numpy.array(range(self.n_k)) + ikarray = numpy.array(list(range(self.n_k))) for ik in mpi.slice_array(ikarray): G_latt_w = self.lattice_gf( @@ -217,7 +217,7 @@ class SumkDFTTools(SumkDFT): DOS projected to atoms and resolved into orbital contributions. """ if (mesh is None) and (not with_Sigma): - raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq." + raise ValueError("lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq.") if mesh is None: om_mesh = [x.real for x in self.Sigma_imp_w[0].mesh] om_min = om_mesh[0] @@ -229,12 +229,12 @@ class SumkDFTTools(SumkDFT): om_mesh = numpy.linspace(om_min, om_max, n_om) spn = self.spin_block_names[self.SO] - gf_struct_parproj = [[(sp, range(self.shells[ish]['dim'])) for sp in spn] + gf_struct_parproj = [[(sp, list(range(self.shells[ish]['dim']))) for sp in spn] for ish in range(self.n_shells)] #print(self.proj_mat_csc.shape[2]) #print(spn) n_local_orbs = self.proj_mat_csc.shape[2] - gf_struct_parproj_all = [[(sp, range(n_local_orbs)) for sp in spn]] + gf_struct_parproj_all = [[(sp, list(range(n_local_orbs))) for sp in spn]] glist_all = [GfReFreq(indices=inner, window=(om_min, om_max), n_points=n_om) for block, inner in gf_struct_parproj_all[0]] @@ -251,7 +251,7 @@ class SumkDFTTools(SumkDFT): DOSproj_orb[sp] = numpy.zeros( [n_om, dim, dim], numpy.complex_) - ikarray = numpy.array(range(self.n_k)) + ikarray = numpy.array(list(range(self.n_k))) for ik in mpi.slice_array(ikarray): G_latt_w = self.lattice_gf( @@ -352,7 +352,7 @@ class SumkDFTTools(SumkDFT): self.symmpar = Symmetry(self.hdf_file, subgroup=self.symmpar_data) if (mesh is None) and (not with_Sigma): - raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq." + raise ValueError("lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq.") if mesh is None: om_mesh = [x.real for x in self.Sigma_imp_w[0].mesh] om_min = om_mesh[0] @@ -365,7 +365,7 @@ class SumkDFTTools(SumkDFT): G_loc = [] spn = self.spin_block_names[self.SO] - gf_struct_parproj = [[(sp, range(self.shells[ish]['dim'])) for sp in spn] + gf_struct_parproj = [[(sp, list(range(self.shells[ish]['dim']))) for sp in spn] for ish in range(self.n_shells)] for ish in range(self.n_shells): glist = [GfReFreq(indices=inner, window=(om_min, om_max), n_points=n_om) @@ -386,7 +386,7 @@ class SumkDFTTools(SumkDFT): DOSproj_orb[ish][sp] = numpy.zeros( [n_om, dim, dim], numpy.complex_) - ikarray = numpy.array(range(self.n_k)) + ikarray = numpy.array(list(range(self.n_k))) for ik in mpi.slice_array(ikarray): G_latt_w = self.lattice_gf( @@ -526,12 +526,12 @@ class SumkDFTTools(SumkDFT): if not ishell is None: gf_struct_parproj = [ - (sp, range(self.shells[ishell]['dim'])) for sp in spn] + (sp, list(range(self.shells[ishell]['dim']))) for sp in spn] G_loc = BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=self.Sigma_imp_w[0].mesh)) for block, inner in gf_struct_parproj], make_copies=False) G_loc.zero() - ikarray = numpy.array(range(self.n_k)) + ikarray = numpy.array(list(range(self.n_k))) for ik in mpi.slice_array(ikarray): G_latt_w = self.lattice_gf( @@ -653,7 +653,7 @@ class SumkDFTTools(SumkDFT): for ish in range(self.n_shells)] for isp in range(len(spn))] # Set up G_loc - gf_struct_parproj = [[(sp, range(self.shells[ish]['dim'])) for sp in spn] + gf_struct_parproj = [[(sp, list(range(self.shells[ish]['dim']))) for sp in spn] for ish in range(self.n_shells)] if with_Sigma: G_loc = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, mesh=self.Sigma_imp_iw[0].mesh)) @@ -667,7 +667,7 @@ class SumkDFTTools(SumkDFT): for ish in range(self.n_shells): G_loc[ish].zero() - ikarray = numpy.array(range(self.n_k)) + ikarray = numpy.array(list(range(self.n_k))) for ik in mpi.slice_array(ikarray): G_latt_iw = self.lattice_gf( @@ -828,10 +828,10 @@ class SumkDFTTools(SumkDFT): if mpi.is_master_node(): ar = HDFArchive(self.hdf_file, 'r') if not (self.transp_data in ar): - raise IOError, "transport_distribution: No %s subgroup in hdf file found! Call convert_transp_input first." % self.transp_data + raise IOError("transport_distribution: No %s subgroup in hdf file found! Call convert_transp_input first." % self.transp_data) # check if outputs file was converted if not ('n_symmetries' in ar['dft_misc_input']): - raise IOError, "transport_distribution: n_symmetries missing. Check if case.outputs file is present and call convert_misc_input() or convert_dft_input()." + raise IOError("transport_distribution: n_symmetries missing. Check if case.outputs file is present and call convert_misc_input() or convert_dft_input().") self.read_transport_input_from_hdf() @@ -894,7 +894,7 @@ class SumkDFTTools(SumkDFT): for i, g in self.Sigma_imp_w[icrsh]: for iL in g.indices[0]: for iR in g.indices[0]: - for iom in xrange(n_om): + for iom in range(n_om): g.data[iom, int(iL), int(iR)] = Sigma_save[ i].data[ioffset + iom, int(iL), int(iR)] else: @@ -913,18 +913,18 @@ class SumkDFTTools(SumkDFT): self.Om_mesh = iOm_mesh * d_omega if mpi.is_master_node(): - print "Chemical potential: ", mu - print "Using n_om = %s points in the energy_window [%s,%s]" % (n_om, self.omega[0], self.omega[-1]), - print "where the omega vector is:" - print self.omega - print "Calculation requested for Omega mesh: ", numpy.array(Om_mesh) - print "Omega mesh automatically repined to: ", self.Om_mesh + print("Chemical potential: ", mu) + print("Using n_om = %s points in the energy_window [%s,%s]" % (n_om, self.omega[0], self.omega[-1]), end=' ') + print("where the omega vector is:") + print(self.omega) + print("Calculation requested for Omega mesh: ", numpy.array(Om_mesh)) + print("Omega mesh automatically repined to: ", self.Om_mesh) self.Gamma_w = {direction: numpy.zeros( (len(self.Om_mesh), n_om), dtype=numpy.float_) for direction in self.directions} # Sum over all k-points - ikarray = numpy.array(range(self.n_k)) + ikarray = numpy.array(list(range(self.n_k))) for ik in mpi.slice_array(ikarray): # Calculate G_w for ik and initialize A_kw G_w = self.lattice_gf(ik, mu, iw_or_w="w", beta=beta, @@ -938,7 +938,7 @@ class SumkDFTTools(SumkDFT): A_kw[isp] = copy.deepcopy(G_w[self.spin_block_names[self.SO][ isp]].data.swapaxes(0, 1).swapaxes(1, 2)) # calculate A(k,w) for each frequency - for iw in xrange(n_om): + for iw in range(n_om): A_kw[isp][:, :, iw] = -1.0 / (2.0 * numpy.pi * 1j) * ( A_kw[isp][:, :, iw] - numpy.conjugate(numpy.transpose(A_kw[isp][:, :, iw]))) @@ -963,7 +963,7 @@ class SumkDFTTools(SumkDFT): # calculate Gamma_w for each direction from the velocities # vel_R and the spectral function A_kw for direction in self.directions: - for iw in xrange(n_om): + for iw in range(n_om): for iq in range(len(self.Om_mesh)): if(iw + iOm_mesh[iq] >= n_om or self.omega[iw] < -self.Om_mesh[iq] + energy_window[0] or self.omega[iw] > self.Om_mesh[iq] + energy_window[1]): continue @@ -1033,7 +1033,7 @@ class SumkDFTTools(SumkDFT): else: # rectangular integration for w-grid (orignal implementation) d_w = self.omega[1] - self.omega[0] - for iw in xrange(self.Gamma_w[direction].shape[1]): + for iw in range(self.Gamma_w[direction].shape[1]): A += A_int[iw] * d_w A = A * numpy.pi * (2.0 - self.SP) else: @@ -1083,16 +1083,16 @@ class SumkDFTTools(SumkDFT): (n_q,), numpy.nan) for direction in self.directions} for direction in self.directions: - for iq in xrange(n_q): + for iq in range(n_q): A0[direction][iq] = self.transport_coefficient( direction, iq=iq, n=0, beta=beta, method=method) A1[direction][iq] = self.transport_coefficient( direction, iq=iq, n=1, beta=beta, method=method) A2[direction][iq] = self.transport_coefficient( direction, iq=iq, n=2, beta=beta, method=method) - print "A_0 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A0[direction][iq]) - print "A_1 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A1[direction][iq]) - print "A_2 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A2[direction][iq]) + print("A_0 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A0[direction][iq])) + print("A_1 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A1[direction][iq])) + print("A_2 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A2[direction][iq])) if ~numpy.isnan(A1[direction][iq]): # Seebeck and kappa are overwritten if there is more than one Omega = # 0 in Om_mesh @@ -1102,11 +1102,11 @@ class SumkDFTTools(SumkDFT): self.kappa[direction] *= 293178.0 self.optic_cond[direction] = beta * \ A0[direction] * 10700.0 / numpy.pi - for iq in xrange(n_q): - print "Conductivity in direction %s for Omega = %.2f %f x 10^4 Ohm^-1 cm^-1" % (direction, self.Om_mesh[iq], self.optic_cond[direction][iq]) + for iq in range(n_q): + print("Conductivity in direction %s for Omega = %.2f %f x 10^4 Ohm^-1 cm^-1" % (direction, self.Om_mesh[iq], self.optic_cond[direction][iq])) if not (numpy.isnan(A1[direction][iq])): - print "Seebeck in direction %s for Omega = 0.00 %f x 10^(-6) V/K" % (direction, self.seebeck[direction]) - print "kappa in direction %s for Omega = 0.00 %f W/(m * K)" % (direction, self.kappa[direction]) + print("Seebeck in direction %s for Omega = 0.00 %f x 10^(-6) V/K" % (direction, self.seebeck[direction])) + print("kappa in direction %s for Omega = 0.00 %f W/(m * K)" % (direction, self.kappa[direction])) return self.optic_cond, self.seebeck, self.kappa diff --git a/python/trans_basis.py b/python/trans_basis.py index 91a014a5..845f50a8 100644 --- a/python/trans_basis.py +++ b/python/trans_basis.py @@ -113,7 +113,7 @@ class TransBasis: # transform the CTQMC blocks to the full matrix: # ish is the index of the inequivalent shell corresponding to icrsh ish = self.SK.corr_to_inequiv[0] - for block, inner in self.gf_struct_solver[ish].iteritems(): + for block, inner in self.gf_struct_solver[ish].items(): for ind1 in inner: for ind2 in inner: gfrotated[self.SK.solver_to_sumk_block[ish][block]][ @@ -126,7 +126,7 @@ class TransBasis: gfreturn = gf_to_rot.copy() # Put back into CTQMC basis: - for block, inner in self.gf_struct_solver[ish].iteritems(): + for block, inner in self.gf_struct_solver[ish].items(): for ind1 in inner: for ind2 in inner: gfreturn[block][ind1, ind2] << gfrotated[ diff --git a/python/update_archive.py b/python/update_archive.py index c2af8c69..0a14bdc7 100644 --- a/python/update_archive.py +++ b/python/update_archive.py @@ -5,15 +5,15 @@ import numpy import subprocess if len(sys.argv) < 2: - print "Usage: python update_archive.py old_archive [v1.0|v1.2]" + print("Usage: python update_archive.py old_archive [v1.0|v1.2]") sys.exit() -print """ +print(""" This script is an attempt to update your archive to TRIQS 1.2. Please keep a copy of your old archive as this script is ** not guaranteed ** to work for your archive. If you encounter any problem please report it on github! -""" +""") def convert_shells(shells): @@ -63,26 +63,26 @@ A = h5py.File(filename) old_to_new = {'SumK_LDA': 'dft_input', 'SumK_LDA_ParProj': 'dft_parproj_input', 'SymmCorr': 'dft_symmcorr_input', 'SymmPar': 'dft_symmpar_input', 'SumK_LDA_Bands': 'dft_bands_input'} -for old, new in old_to_new.iteritems(): - if old not in A.keys(): +for old, new in old_to_new.items(): + if old not in list(A.keys()): continue - print "Changing %s to %s ..." % (old, new) + print("Changing %s to %s ..." % (old, new)) A.copy(old, new) del(A[old]) # Move output items from dft_input to user_data move_to_output = ['chemical_potential', 'dc_imp', 'dc_energ'] for obj in move_to_output: - if obj in A['dft_input'].keys(): + if obj in list(A['dft_input'].keys()): if 'user_data' not in A: A.create_group('user_data') - print "Moving %s to user_data ..." % obj + print("Moving %s to user_data ..." % obj) A.copy('dft_input/' + obj, 'user_data/' + obj) del(A['dft_input'][obj]) # Delete obsolete quantities to_delete = ['gf_struct_solver', 'map_inv', 'map', 'deg_shells', 'h_field'] for obj in to_delete: - if obj in A['dft_input'].keys(): + if obj in list(A['dft_input'].keys()): del(A['dft_input'][obj]) if from_v == 'v1.0': @@ -109,11 +109,11 @@ if 'n_inequiv_shells' not in A['dft_input']: # Rename variables groups = ['dft_symmcorr_input', 'dft_symmpar_input'] for group in groups: - if group not in A.keys(): + if group not in list(A.keys()): continue if 'n_s' not in A[group]: continue - print "Changing n_s to n_symm ..." + print("Changing n_s to n_symm ...") A[group].move('n_s', 'n_symm') # Convert orbits to list of dicts orbits_old = HDFArchive(filename, 'r')[group]['orbits'] @@ -125,11 +125,11 @@ for group in groups: groups = ['dft_parproj_input'] for group in groups: - if group not in A.keys(): + if group not in list(A.keys()): continue if 'proj_mat_pc' not in A[group]: continue - print "Changing proj_mat_pc to proj_mat_all ..." + print("Changing proj_mat_pc to proj_mat_all ...") A[group].move('proj_mat_pc', 'proj_mat_all') A.close() @@ -137,6 +137,6 @@ A.close() # Repack to reclaim disk space retcode = subprocess.call(["h5repack", "-i%s" % filename, "-otemphgfrt.h5"]) if retcode != 0: - print "h5repack failed!" + print("h5repack failed!") else: subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % filename]) diff --git a/python/version.py.in b/python/version.py.in index ea412282..489e0617 100644 --- a/python/version.py.in +++ b/python/version.py.in @@ -24,7 +24,7 @@ triqs_hash = "@TRIQS_GIT_HASH@" dft_tools_hash = "@DFT_TOOLS_GIT_HASH@" def show_version(): - print "\nYou are using the dft_tools version %s\n"%version + print("\nYou are using the dft_tools version %s\n"%version) def show_git_hash(): - print "\nYou are using the dft_tools git hash %s based on triqs git hash %s\n"%(dft_tools_hash, triqs_hash) + print("\nYou are using the dft_tools git hash %s based on triqs git hash %s\n"%(dft_tools_hash, triqs_hash)) diff --git a/test/analyse_block_structure_from_gf.py b/test/analyse_block_structure_from_gf.py index 5a831444..500636b5 100644 --- a/test/analyse_block_structure_from_gf.py +++ b/test/analyse_block_structure_from_gf.py @@ -213,7 +213,7 @@ for conjugate in conjugate_values: # first, construct the old format of the deg shells for ish in range(len(SK.deg_shells)): for gr in range(len(SK.deg_shells[ish])): - SK.deg_shells[ish][gr] = SK.deg_shells[ish][gr].keys() + SK.deg_shells[ish][gr] = list(SK.deg_shells[ish][gr].keys()) # symmetrizing the GF as is has to leave it unchanged G_new_symm << G_pre_transform diff --git a/test/analyse_block_structure_from_gf2.py b/test/analyse_block_structure_from_gf2.py index 19553af8..0be757e2 100644 --- a/test/analyse_block_structure_from_gf2.py +++ b/test/analyse_block_structure_from_gf2.py @@ -35,13 +35,13 @@ Hloc[8:,8:] = Hloc1 V = get_random_hermitian(2) # the hopping elements from impurity to bath b1 = np.random.rand() # the bath energy of the first bath level b2 = np.random.rand() # the bath energy of the second bath level -delta = GfReFreq(window=(-10,10), indices=range(2), n_points=1001) +delta = GfReFreq(window=(-10,10), indices=list(range(2)), n_points=1001) delta[0,0] << (V[0,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[0,1].conjugate()*inverse(Omega-b2+0.02j))/2.0 delta[0,1] << (V[0,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[1,1].conjugate()*inverse(Omega-b2+0.02j))/2.0 delta[1,0] << (V[1,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[0,1].conjugate()*inverse(Omega-b2+0.02j))/2.0 delta[1,1] << (V[1,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[1,1].conjugate()*inverse(Omega-b2+0.02j))/2.0 # construct G -G = BlockGf(name_block_generator=[('ud',GfReFreq(window=(-10,10), indices=range(10), n_points=1001))], make_copies=False) +G = BlockGf(name_block_generator=[('ud',GfReFreq(window=(-10,10), indices=list(range(10)), n_points=1001))], make_copies=False) for i in range(0,10,2): G['ud'][i:i+2,i:i+2] << inverse(Omega-delta+0.02j) G['ud'] << inverse(inverse(G['ud']) - Hloc) @@ -58,7 +58,7 @@ assert SK.gf_struct_sumk == [[('ud', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])], [('ud', [ "wrong gf_struct_sumk" for i in range(5): assert 'ud_{}'.format(i) in SK.gf_struct_solver[0], "missing block" - assert SK.gf_struct_solver[0]['ud_{}'.format(i)] == range(2), "wrong block size" + assert SK.gf_struct_solver[0]['ud_{}'.format(i)] == list(range(2)), "wrong block size" for i in range(10): assert SK.sumk_to_solver[0]['ud',i] == ('ud_{}'.format(i/2), i%2), "wrong mapping" @@ -101,7 +101,7 @@ assert SK.gf_struct_sumk == [[('ud', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])], [('ud', [ "wrong gf_struct_sumk" for i in range(5): assert 'ud_{}'.format(i) in SK.gf_struct_solver[0], "missing block" - assert SK.gf_struct_solver[0]['ud_{}'.format(i)] == range(2), "wrong block size" + assert SK.gf_struct_solver[0]['ud_{}'.format(i)] == list(range(2)), "wrong block size" for i in range(10): assert SK.sumk_to_solver[0]['ud',i] == ('ud_{}'.format(i/2), i%2), "wrong mapping" diff --git a/test/plovasp/atm/mytest.py b/test/plovasp/atm/mytest.py index fb0c64e8..4c56f35c 100644 --- a/test/plovasp/atm/mytest.py +++ b/test/plovasp/atm/mytest.py @@ -44,8 +44,8 @@ class MyTestCase(unittest.TestCase): # Make a diff # # Remove empty lines - lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) - lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) + lstr1 = [s for s in str1.splitlines(True) if s.strip() != ''] + lstr2 = [s for s in str2.splitlines(True) if s.strip() != ''] # diff delta = difflib.unified_diff(lstr1, lstr2) # combine delta's to a string diff --git a/test/plovasp/converter/mytest.py b/test/plovasp/converter/mytest.py index 9d28fdbe..84f9dc59 100644 --- a/test/plovasp/converter/mytest.py +++ b/test/plovasp/converter/mytest.py @@ -45,8 +45,8 @@ class MyTestCase(unittest.TestCase): # Make a diff # # Remove empty lines - lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) - lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) + lstr1 = [s for s in str1.splitlines(True) if s.strip() != ''] + lstr2 = [s for s in str2.splitlines(True) if s.strip() != ''] # diff delta = difflib.unified_diff(lstr1, lstr2) # combine delta's to a string diff --git a/test/plovasp/inpconf/test_general.py b/test/plovasp/inpconf/test_general.py index b30d3879..1d1316da 100644 --- a/test/plovasp/inpconf/test_general.py +++ b/test/plovasp/inpconf/test_general.py @@ -2,10 +2,10 @@ r""" Tests of 'parse_general()' defined in ConfigParameters class """ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' -import arraytest +from . import arraytest import numpy as np from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters diff --git a/test/plovasp/inpconf/test_groups.py b/test/plovasp/inpconf/test_groups.py index 7dbd9cc2..af078fa1 100644 --- a/test/plovasp/inpconf/test_groups.py +++ b/test/plovasp/inpconf/test_groups.py @@ -2,10 +2,10 @@ r""" Tests of 'parse_groups()' defined in ConfigParameters class """ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' -import arraytest +from . import arraytest import numpy as np from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters @@ -30,7 +30,7 @@ class TestParseGroups(arraytest.ArrayTestCase): def test_gr_required(self): conf_pars = ConfigParameters(_rpath + 'parse_groups_1.cfg') err_mess = "Required parameter" - with self.assertRaisesRegexp(Exception, err_mess): + with self.assertRaisesRegex(Exception, err_mess): conf_pars.parse_groups() # Scenario 2 @@ -42,8 +42,8 @@ class TestParseGroups(arraytest.ArrayTestCase): 'normalize': True, 'normion': True,'complement': False}, {'index': 2, 'shells': [3], 'ewindow': (-1.6, 2.0), 'normalize': True, 'normion': True,'complement': False}] - print res - print expected + print(res) + print(expected) self.assertListEqual(res, expected) diff --git a/test/plovasp/inpconf/test_input.py b/test/plovasp/inpconf/test_input.py index 6bf12050..75d4afb1 100644 --- a/test/plovasp/inpconf/test_input.py +++ b/test/plovasp/inpconf/test_input.py @@ -2,10 +2,10 @@ r""" Tests of 'parse_input()' defined in ConfigParameters class """ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' -import arraytest +from . import arraytest import numpy as np from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters @@ -38,28 +38,28 @@ class TestParseInput(arraytest.ArrayTestCase): def test_no_group(self): conf_pars = ConfigParameters(_rpath + 'input_test_1.cfg') err_mess = "At least one group" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): conf_pars.parse_input() # Scenario 2 def test_gr_required(self): conf_pars = ConfigParameters(_rpath + 'input_test_2.cfg') err_mess = "One \[Shell\] section is" - with self.assertRaisesRegexp(KeyError, err_mess): + with self.assertRaisesRegex(KeyError, err_mess): conf_pars.parse_input() # Scenario 3 def test_no_shell(self): conf_pars = ConfigParameters(_rpath + 'input_test_3.cfg') err_mess = "Shell 3 referenced in" - with self.assertRaisesRegexp(Exception, err_mess): + with self.assertRaisesRegex(Exception, err_mess): conf_pars.parse_input() # Scenario 4 def test_shell_outside_groups(self): conf_pars = ConfigParameters(_rpath + 'input_test_4.cfg') err_mess = "Some shells are not inside" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): conf_pars.parse_input() # Scenario 5 diff --git a/test/plovasp/inpconf/test_parameter_set.py b/test/plovasp/inpconf/test_parameter_set.py index f42b4f76..35d8089c 100644 --- a/test/plovasp/inpconf/test_parameter_set.py +++ b/test/plovasp/inpconf/test_parameter_set.py @@ -2,10 +2,10 @@ r""" Tests of 'parse_parameter_set()' defined in ConfigParameters class """ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' -import arraytest +from . import arraytest import numpy as np from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters @@ -47,6 +47,6 @@ class TestParseParameterSet(arraytest.ArrayTestCase): section = 'Shell 1' param_set = self.cpars.sh_required # contains 'lshell' and 'ions' err_mess = "Required parameter" # .* in section [%s]"%(section) - with self.assertRaisesRegexp(Exception, err_mess): + with self.assertRaisesRegex(Exception, err_mess): self.cpars.parse_parameter_set(section, param_set, exception=True) diff --git a/test/plovasp/inpconf/test_shells.py b/test/plovasp/inpconf/test_shells.py index cb5d68fb..ecbc9271 100644 --- a/test/plovasp/inpconf/test_shells.py +++ b/test/plovasp/inpconf/test_shells.py @@ -2,10 +2,10 @@ r""" Tests of 'parse_shells()' defined in ConfigParameters class """ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' -import arraytest +from . import arraytest import numpy as np from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters @@ -37,21 +37,21 @@ class TestParseShells(arraytest.ArrayTestCase): def test_no_shell(self): conf_pars = ConfigParameters(_rpath + 'parse_shells_1.cfg') err_mess = "No projected shells" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): conf_pars.parse_shells() # Scenario 2 def test_bad_indices(self): conf_pars = ConfigParameters(_rpath + 'parse_shells_2.cfg') err_mess = "Failed to extract shell indices" - with self.assertRaisesRegexp(ValueError, err_mess): + with self.assertRaisesRegex(ValueError, err_mess): conf_pars.parse_shells() # Scenario 3 def test_sh_required(self): conf_pars = ConfigParameters(_rpath + 'parse_shells_3.cfg') err_mess = "Required parameter" - with self.assertRaisesRegexp(Exception, err_mess): + with self.assertRaisesRegex(Exception, err_mess): conf_pars.parse_shells() # Scenario 4 diff --git a/test/plovasp/inpconf/test_special_parsers.py b/test/plovasp/inpconf/test_special_parsers.py index b9811bec..d3d01f27 100644 --- a/test/plovasp/inpconf/test_special_parsers.py +++ b/test/plovasp/inpconf/test_special_parsers.py @@ -2,10 +2,10 @@ r""" Tests of special parseres defined in ConfigParameters class """ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' -import arraytest +from . import arraytest import numpy as np from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters @@ -87,7 +87,7 @@ class TestParseStringIonList(arraytest.ArrayTestCase): # Scenario 3 def test_out_of_bounds(self): err_mess = "Lowest ion index is" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.cpars.parse_string_ion_list('0 1') # Scenario 4 @@ -99,7 +99,7 @@ class TestParseStringIonList(arraytest.ArrayTestCase): # Scenario 5 def test_range_wrong_order(self): err_mess = "First index of the range" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.cpars.parse_string_ion_list('8..5') # Scenario 6 @@ -140,14 +140,14 @@ class TestParseStringTmatrix(arraytest.ArrayTestCase): def test_number_of_columns(self): par_str = "1.0 0.0\n1.0" err_mess = "Number of columns" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.cpars.parse_string_tmatrix(par_str, real=True) # Scenario 2 def test_complex_matrix_odd(self): par_str = "1.0 0.0 2.0 1.0 0.0\n0.0 1.0 2.0 3.0 -1.0" err_mess = "Complex matrix must" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.cpars.parse_string_tmatrix(par_str, real=False) # Scenario 3 @@ -192,13 +192,13 @@ class TestParseEnergyWindow(arraytest.ArrayTestCase): # Scenario 2 def test_wrong_range(self): err_mess = "The first float in EWINDOW" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.cpars.parse_energy_window('3.0 -1.5') # Scenario 3 def test_one_float(self): err_mess = "EWINDOW must be specified" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.cpars.parse_energy_window('1.0') # Scenario 4 @@ -209,7 +209,7 @@ class TestParseEnergyWindow(arraytest.ArrayTestCase): # Scenario 5 def test_three_floats(self): err_mess = "EWINDOW must be specified" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.cpars.parse_energy_window('1.5 3.0 2.0') ################################################################################ @@ -246,13 +246,13 @@ class TestParseBandWindow(arraytest.ArrayTestCase): # Scenario 2 def test_wrong_range(self): err_mess = "The first int in BANDS" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.cpars.parse_band_window('10 1') # Scenario 3 def test_one_float(self): err_mess = "BANDS must be specified" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.cpars.parse_band_window('1') # Scenario 4 @@ -263,7 +263,7 @@ class TestParseBandWindow(arraytest.ArrayTestCase): # Scenario 5 def test_three_ints(self): err_mess = "BANDS must be specified" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.cpars.parse_band_window('1 2 3') ################################################################################ @@ -345,7 +345,7 @@ class TestParseStringDosmesh(arraytest.ArrayTestCase): # Scenario 3 def test_two_numbers(self): err_mess = "DOSMESH must be either" - with self.assertRaisesRegexp(ValueError, err_mess): + with self.assertRaisesRegex(ValueError, err_mess): self.cpars.parse_string_dosmesh('-8.0 101') # Scenario 4 diff --git a/test/plovasp/plocar_io/mytest.py b/test/plovasp/plocar_io/mytest.py index 1490b477..4ce354f0 100644 --- a/test/plovasp/plocar_io/mytest.py +++ b/test/plovasp/plocar_io/mytest.py @@ -43,8 +43,8 @@ class MyTestCase(unittest.TestCase): # Make a diff # # Remove empty lines - lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) - lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) + lstr1 = [s for s in str1.splitlines(True) if s.strip() != ''] + lstr2 = [s for s in str2.splitlines(True) if s.strip() != ''] # diff delta = difflib.unified_diff(lstr1, lstr2) # combine delta's to a string diff --git a/test/plovasp/plocar_io/test_fileio.py b/test/plovasp/plocar_io/test_fileio.py index fc6c2bed..04b6cbf4 100644 --- a/test/plovasp/plocar_io/test_fileio.py +++ b/test/plovasp/plocar_io/test_fileio.py @@ -27,19 +27,19 @@ class TestFileIO(mytest.MyTestCase): # Scenario 1 def test_no_plocar(self): err_mess = "Error opening xPLOCAR" - with self.assertRaisesRegexp(IOError, err_mess): + with self.assertRaisesRegex(IOError, err_mess): read_plocar('xPLOCAR') # Scenario 2 def test_end_of_file(self): err_mess = "End-of-file reading" - with self.assertRaisesRegexp(IOError, err_mess): + with self.assertRaisesRegex(IOError, err_mess): read_plocar('PLOCAR.trunc') # Scenario 3 def test_wrong_prec(self): err_mess = "only 'prec = 4, 8' are supported" - with self.assertRaisesRegexp(ValueError, err_mess): + with self.assertRaisesRegex(ValueError, err_mess): read_plocar('PLOCAR.noprec') # Scenario 4 @@ -52,10 +52,10 @@ class TestFileIO(mytest.MyTestCase): f.write(" nlm =%5i\n"%(nlm)) ion = 1 isp = 1 - for ik in xrange(nk): - for ib in xrange(nb): + for ik in range(nk): + for ib in range(nb): f.write("%5i%5i%5i%5i%10.5f\n"%(ion, isp, ik+1, ib+1, ferw[0, 0, ik, ib])) - for ilm in xrange(nlm): + for ilm in range(nlm): p = plo[0, 0, ik, ib, ilm] f.write("%5i%15.7f%15.7f\n"%(ilm+1, p.real, p.imag)) @@ -75,13 +75,13 @@ class TestFileIO(mytest.MyTestCase): test_file = 'PLOCAR.example.out.test' with open(test_file, 'wt') as f: f.write("pars: %s\n"%(pars)) - for ion in xrange(nion): - for isp in xrange(ns): - for ik in xrange(nk): - for ib in xrange(nb): + for ion in range(nion): + for isp in range(ns): + for ik in range(nk): + for ib in range(nb): f.write("%5i%5i%5i%5i %s\n"%(ion+1, isp+1, ik+1, ib+1, ferw[ion, isp, ik, ib])) - for ilm in xrange(nlm): + for ilm in range(nlm): p = plo[ion, isp, ik, ib, ilm] f.write("%5i %s\n"%(ilm+1, p)) diff --git a/test/plovasp/plotools/mytest.py b/test/plovasp/plotools/mytest.py index 38f0689e..4cac121d 100644 --- a/test/plovasp/plotools/mytest.py +++ b/test/plovasp/plotools/mytest.py @@ -43,8 +43,8 @@ class MyTestCase(unittest.TestCase): # Make a diff # # Remove empty lines - lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) - lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) + lstr1 = [s for s in str1.splitlines(True) if s.strip() != ''] + lstr2 = [s for s in str2.splitlines(True) if s.strip() != ''] # diff delta = difflib.unified_diff(lstr1, lstr2) # combine delta's to a string diff --git a/test/plovasp/proj_group/mytest.py b/test/plovasp/proj_group/mytest.py index 9d28fdbe..84f9dc59 100644 --- a/test/plovasp/proj_group/mytest.py +++ b/test/plovasp/proj_group/mytest.py @@ -45,8 +45,8 @@ class MyTestCase(unittest.TestCase): # Make a diff # # Remove empty lines - lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) - lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) + lstr1 = [s for s in str1.splitlines(True) if s.strip() != ''] + lstr2 = [s for s in str2.splitlines(True) if s.strip() != ''] # diff delta = difflib.unified_diff(lstr1, lstr2) # combine delta's to a string diff --git a/test/plovasp/proj_group/test_block_map.py b/test/plovasp/proj_group/test_block_map.py index 27570c0e..b51068db 100644 --- a/test/plovasp/proj_group/test_block_map.py +++ b/test/plovasp/proj_group/test_block_map.py @@ -1,13 +1,13 @@ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' import numpy as np from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup -import mytest +from . import mytest ################################################################################ # @@ -30,19 +30,19 @@ class TestBlockMap(mytest.MyTestCase): nproj = 16 self.mock_plo = np.zeros((nproj, 1, 1, 11), dtype=np.complex128) - self.mock_proj_params = [{} for i in xrange(nproj)] + self.mock_proj_params = [{} for i in range(nproj)] ip = 0 # Mock d-sites - for isite in xrange(2): - for im in xrange(5): + for isite in range(2): + for im in range(5): self.mock_proj_params[ip]['label'] = 'd-orb' self.mock_proj_params[ip]['isite'] = isite + 1 self.mock_proj_params[ip]['l'] = 2 self.mock_proj_params[ip]['m'] = im ip += 1 # Mock p-sites - for isite in xrange(2, 4): - for im in xrange(3): + for isite in range(2, 4): + for im in range(3): self.mock_proj_params[ip]['label'] = 'p-orb' self.mock_proj_params[ip]['isite'] = isite + 1 self.mock_proj_params[ip]['l'] = 1 diff --git a/test/plovasp/proj_group/test_one_site.py b/test/plovasp/proj_group/test_one_site.py index d4307891..af83c885 100644 --- a/test/plovasp/proj_group/test_one_site.py +++ b/test/plovasp/proj_group/test_one_site.py @@ -1,6 +1,6 @@ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' import numpy as np @@ -10,7 +10,7 @@ from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup from pytriqs.archive import HDFArchive -import mytest +from . import mytest ################################################################################ # diff --git a/test/plovasp/proj_group/test_one_site_compl.py b/test/plovasp/proj_group/test_one_site_compl.py index 1d9daa65..76c6f909 100644 --- a/test/plovasp/proj_group/test_one_site_compl.py +++ b/test/plovasp/proj_group/test_one_site_compl.py @@ -1,6 +1,6 @@ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' import numpy as np @@ -10,7 +10,7 @@ from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup from pytriqs.archive import HDFArchive -import mytest +from . import mytest ################################################################################ # @@ -46,7 +46,7 @@ class TestProjectorGroupCompl(mytest.MyTestCase): def test_num_bands(self): self.pars.groups[0]['complement'] = True err_mess = "At each band the same number" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): self.proj_gr = ProjectorGroup(self.pars.groups[0], [self.proj_sh], self.eigvals) def test_compl(self): @@ -66,9 +66,9 @@ class TestProjectorGroupCompl(mytest.MyTestCase): _, ns, nk, _, _ = self.proj_gr.shells[0].proj_win.shape # Note that 'ns' and 'nk' are the same for all shells - for isp in xrange(ns): - for ik in xrange(nk): - print('ik',ik) + for isp in range(ns): + for ik in range(nk): + print(('ik',ik)) bmin = self.proj_gr.ib_win[ik, isp, 0] bmax = self.proj_gr.ib_win[ik, isp, 1]+1 diff --git a/test/plovasp/proj_group/test_select_bands.py b/test/plovasp/proj_group/test_select_bands.py index 10f59ef8..bb9236b6 100644 --- a/test/plovasp/proj_group/test_select_bands.py +++ b/test/plovasp/proj_group/test_select_bands.py @@ -1,6 +1,6 @@ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' import numpy as np @@ -9,7 +9,7 @@ from triqs_dft_tools.converters.plovasp.elstruct import ElectronicStructure from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup -import mytest +from . import mytest ################################################################################ # @@ -58,14 +58,14 @@ class TestSelectBands(mytest.MyTestCase): def test_emin_too_large(self): self.proj_gr.emin = 20.0 self.proj_gr.emax = 25.0 - with self.assertRaisesRegexp(Exception, "No bands inside the window"): + with self.assertRaisesRegex(Exception, "No bands inside the window"): ib_win, nb_min, nb_max = self.proj_gr.select_bands(self.eigvals) # Scenario 3 def test_emax_too_small(self): self.proj_gr.emin = -50.0 self.proj_gr.emax = -55.0 - with self.assertRaisesRegexp(Exception, "Energy window does not overlap"): + with self.assertRaisesRegex(Exception, "Energy window does not overlap"): ib_win, nb_min, nb_max = self.proj_gr.select_bands(self.eigvals) diff --git a/test/plovasp/proj_group/test_two_site.py b/test/plovasp/proj_group/test_two_site.py index f3959447..22dc5187 100644 --- a/test/plovasp/proj_group/test_two_site.py +++ b/test/plovasp/proj_group/test_two_site.py @@ -1,6 +1,6 @@ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' import numpy as np @@ -10,7 +10,7 @@ from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup from pytriqs.archive import HDFArchive -import mytest +from . import mytest ################################################################################ # diff --git a/test/plovasp/proj_shell/mytest.py b/test/plovasp/proj_shell/mytest.py index fb0c64e8..4c56f35c 100644 --- a/test/plovasp/proj_shell/mytest.py +++ b/test/plovasp/proj_shell/mytest.py @@ -44,8 +44,8 @@ class MyTestCase(unittest.TestCase): # Make a diff # # Remove empty lines - lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) - lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) + lstr1 = [s for s in str1.splitlines(True) if s.strip() != ''] + lstr2 = [s for s in str2.splitlines(True) if s.strip() != ''] # diff delta = difflib.unified_diff(lstr1, lstr2) # combine delta's to a string diff --git a/test/plovasp/proj_shell/test_projshells.py b/test/plovasp/proj_shell/test_projshells.py index 244821b8..46ce75f2 100644 --- a/test/plovasp/proj_shell/test_projshells.py +++ b/test/plovasp/proj_shell/test_projshells.py @@ -1,6 +1,6 @@ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' import numpy as np @@ -9,7 +9,7 @@ from triqs_dft_tools.converters.plovasp.elstruct import ElectronicStructure from triqs_dft_tools.converters.plovasp.inpconf import ConfigParameters from triqs_dft_tools.converters.plovasp.proj_shell import ProjectorShell from triqs_dft_tools.converters.plovasp.proj_group import ProjectorGroup -import mytest +from . import mytest ################################################################################ # @@ -52,14 +52,14 @@ class TestProjectorShell(mytest.MyTestCase): nion, ns, nk, nlm, nbtot = self.proj_sh.proj_win.shape with open(testout, 'wt') as f: f.write("pars: %s\n"%(self.pars.shells[0])) - for ion in xrange(nion): - for isp in xrange(ns): - for ik in xrange(nk): + for ion in range(nion): + for isp in range(ns): + for ik in range(nk): ib1 = self.proj_sh.ib_win[ik, 0, 0] ib2 = self.proj_sh.ib_win[ik, 0, 1] f.write("%i %i\n"%(ib1, ib2)) - for ib in xrange(ib2 - ib1 + 1): - for ilm in xrange(nlm): + for ib in range(ib2 - ib1 + 1): + for ilm in range(nlm): p = self.proj_sh.proj_win[ion, isp, ik, ilm, ib] f.write("%5i %f %f\n"%(ilm+1, p.real, p.imag)) diff --git a/test/plovasp/run_suite.py b/test/plovasp/run_suite.py index ec9a97b6..82e47c10 100644 --- a/test/plovasp/run_suite.py +++ b/test/plovasp/run_suite.py @@ -27,8 +27,8 @@ if __name__ == '__main__': if results.wasSuccessful(): raise SystemExit(0) else: - print "Failed tests:" + print("Failed tests:") for failure in results.failures: - print failure[0].__str__() + print(failure[0].__str__()) raise SystemExit(1) diff --git a/test/plovasp/vaspio/mytest.py b/test/plovasp/vaspio/mytest.py index f92cba6d..7c859089 100644 --- a/test/plovasp/vaspio/mytest.py +++ b/test/plovasp/vaspio/mytest.py @@ -44,9 +44,9 @@ class MyTestCase(unittest.TestCase): # Make a diff # # Remove empty lines - lstr1 = filter(lambda s: s.strip() != '', str1.splitlines(True)) + lstr1 = [s for s in str1.splitlines(True) if s.strip() != ''] lstr1 = [str1.replace(" ","") for str1 in lstr1] # Remove spaces - lstr2 = filter(lambda s: s.strip() != '', str2.splitlines(True)) + lstr2 = [s for s in str2.splitlines(True) if s.strip() != ''] lstr2 = [str2.replace(" ","") for str2 in lstr2] # Remove spaces # diff delta = difflib.unified_diff(lstr1, lstr2) diff --git a/test/plovasp/vaspio/test_doscar.py b/test/plovasp/vaspio/test_doscar.py index 8c43f8f2..efa32143 100644 --- a/test/plovasp/vaspio/test_doscar.py +++ b/test/plovasp/vaspio/test_doscar.py @@ -2,10 +2,10 @@ r""" Tests for class 'Doscar' from module 'vaspio' """ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' -import mytest +from . import mytest import numpy as np from triqs_dft_tools.converters.plovasp.vaspio import Doscar diff --git a/test/plovasp/vaspio/test_eigenval.py b/test/plovasp/vaspio/test_eigenval.py index ec3264e8..675814d1 100644 --- a/test/plovasp/vaspio/test_eigenval.py +++ b/test/plovasp/vaspio/test_eigenval.py @@ -2,10 +2,10 @@ r""" Tests for class 'Eigneval' from module 'vaspio' """ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' -import mytest +from . import mytest import numpy as np from triqs_dft_tools.converters.plovasp.vaspio import Eigenval @@ -55,6 +55,6 @@ class TestEigenval(mytest.MyTestCase): eigenval = Eigenval() err_mess = "EIGENVAL file is incorrect" - with self.assertRaisesRegexp(AssertionError, err_mess): + with self.assertRaisesRegex(AssertionError, err_mess): eigenval.from_file(vasp_dir=_rpath, eig_filename=filename) diff --git a/test/plovasp/vaspio/test_kpoints.py b/test/plovasp/vaspio/test_kpoints.py index f7410c3b..64cebb7c 100644 --- a/test/plovasp/vaspio/test_kpoints.py +++ b/test/plovasp/vaspio/test_kpoints.py @@ -2,10 +2,10 @@ r""" Tests for class 'Ibzkpt' from module 'vaspio' """ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' -import mytest +from . import mytest import numpy as np from triqs_dft_tools.converters.plovasp.vaspio import Kpoints diff --git a/test/plovasp/vaspio/test_poscar.py b/test/plovasp/vaspio/test_poscar.py index 3ec48e16..c9c5982d 100644 --- a/test/plovasp/vaspio/test_poscar.py +++ b/test/plovasp/vaspio/test_poscar.py @@ -2,10 +2,10 @@ r""" Tests for class 'Poscar' from module 'vaspio' """ import os -import rpath +from . import rpath _rpath = os.path.dirname(rpath.__file__) + '/' -import mytest +from . import mytest import numpy as np from triqs_dft_tools.converters.plovasp.vaspio import Poscar diff --git a/test/sigma_from_file.py b/test/sigma_from_file.py index ac4a83cd..c1db639a 100644 --- a/test/sigma_from_file.py +++ b/test/sigma_from_file.py @@ -42,7 +42,7 @@ for name, s in Sigma_hdf: # Read self energy from txt files SK = SumkDFTTools(hdf_file = 'SrVO3.h5', use_dft_blocks = True) -a_list = [a for a,al in SK.gf_struct_solver[0].iteritems()] +a_list = [a for a,al in SK.gf_struct_solver[0].items()] g_list = [read_gf_from_txt([['Sigma_' + a + '.dat']], a) for a in a_list] Sigma_txt = BlockGf(name_list = a_list, block_list = g_list, make_copies=False)