mirror of
https://github.com/triqs/dft_tools
synced 2024-12-21 11:53:41 +01:00
Changes to old interface files to comply with new gf_struct
Minor tidy-up too.
This commit is contained in:
parent
f803c13285
commit
906398894a
@ -28,10 +28,10 @@ import string
|
||||
from math import sqrt
|
||||
|
||||
|
||||
def Read_Fortran_File (filename):
|
||||
def read_fortran_file (filename):
|
||||
""" Returns a generator that yields all numbers in the Fortran file as float, one by one"""
|
||||
import os.path
|
||||
if not(os.path.exists(filename)) : raise IOError, "File %s does not exists"%filename
|
||||
if not(os.path.exists(filename)) : raise IOError, "File %s does not exist."%filename
|
||||
for line in open(filename,'r') :
|
||||
for x in line.replace('D','E').replace('(',' ').replace(')',' ').replace(',',' ').split() :
|
||||
yield string.atof(x)
|
||||
@ -40,22 +40,18 @@ def Read_Fortran_File (filename):
|
||||
|
||||
class HkConverter:
|
||||
"""
|
||||
Conversion from general H(k) file to an hdf5 file, that can be used as input for the SumK_LDA class.
|
||||
Conversion from general H(k) file to an hdf5 file that can be used as input for the SumK_LDA class.
|
||||
"""
|
||||
|
||||
def __init__(self, hk_file, hdf_file, lda_subgrp = 'SumK_LDA', symm_subgrp = 'SymmCorr', repacking = False):
|
||||
"""
|
||||
Init of the class. Variable Filename gives the root of all filenames, e.g. case.ctqmcout, case.h5, and so
|
||||
Init of the class.
|
||||
on.
|
||||
"""
|
||||
|
||||
assert type(hk_file)==StringType,"hk_file must be a filename"
|
||||
self.hdf_file = hdf_file
|
||||
self.lda_file = hk_file
|
||||
#self.Symm_file = Filename+'.symqmc'
|
||||
#self.Parproj_file = Filename+'.parproj'
|
||||
#self.Symmpar_file = Filename+'.sympar'
|
||||
#self.Band_file = Filename+'.outband'
|
||||
self.lda_subgrp = lda_subgrp
|
||||
self.symm_subgrp = symm_subgrp
|
||||
|
||||
@ -72,12 +68,12 @@ class HkConverter:
|
||||
"""
|
||||
|
||||
|
||||
if not (mpi.is_master_node()): return # do it only on master:
|
||||
# Read and write only on the master node
|
||||
if not (mpi.is_master_node()): return
|
||||
mpi.report("Reading input from %s..."%self.lda_file)
|
||||
|
||||
# Read and write only on Master!!!
|
||||
# R is a generator : each R.Next() will return the next number in the file
|
||||
R = Read_Fortran_File(self.lda_file)
|
||||
R = read_fortran_file(self.lda_file)
|
||||
try:
|
||||
energy_unit = 1.0 # the energy conversion factor is 1.0, we assume eV in files
|
||||
n_k = int(R.next()) # read the number of k points
|
||||
@ -101,7 +97,6 @@ class HkConverter:
|
||||
|
||||
self.inequiv_shells(corr_shells) # determine the number of inequivalent correlated shells, has to be known for further reading...
|
||||
|
||||
|
||||
use_rotations = 0
|
||||
rot_mat = [numpy.identity(corr_shells[icrsh][3],numpy.complex_) for icrsh in xrange(n_corr_shells)]
|
||||
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
||||
@ -109,16 +104,13 @@ class HkConverter:
|
||||
# Representative representations are read from file
|
||||
n_reps = [1 for i in range(self.n_inequiv_corr_shells)]
|
||||
dim_reps = [0 for i in range(self.n_inequiv_corr_shells)]
|
||||
|
||||
T = []
|
||||
for icrsh in range(self.n_inequiv_corr_shells):
|
||||
n_reps[icrsh] = int(R.next()) # number of representatives ("subsets"), e.g. t2g and eg
|
||||
dim_reps[icrsh] = [int(R.next()) for i in range(n_reps[icrsh])] # dimensions of the subsets
|
||||
|
||||
# The transformation matrix:
|
||||
# it is of dimension 2l+1, it is taken to be standard d (as in Wien2k)
|
||||
T = []
|
||||
for icrsh in range(self.n_inequiv_corr_shells):
|
||||
#for ish in xrange(self.N_inequiv_corr_shells):
|
||||
# The transformation matrix:
|
||||
# is of dimension 2l+1, it is taken to be standard d (as in Wien2k)
|
||||
ll = 2*corr_shells[self.invshellmap[icrsh]][2]+1
|
||||
lmax = ll * (corr_shells[self.invshellmap[icrsh]][4] + 1)
|
||||
T.append(numpy.zeros([lmax,lmax],numpy.complex_))
|
||||
@ -131,27 +123,21 @@ class HkConverter:
|
||||
|
||||
|
||||
# Spin blocks to be read:
|
||||
n_spin_blocks = SP + 1 - SO # number of spins to read for Norbs and Ham, NOT Projectors
|
||||
n_spin_blocs = SP + 1 - SO # number of spins to read for Norbs and Ham, NOT Projectors
|
||||
|
||||
|
||||
# define the number of N_Orbitals for all k points: it is the number of total bands and independent of k!
|
||||
n_orb = sum([ shells[ish][3] for ish in range(n_shells)])
|
||||
#n_orbitals = [ [n_orb for isp in range(n_spin_blocks)] for ik in xrange(n_k)]
|
||||
n_orbitals = numpy.ones([n_k,n_spin_blocks],numpy.int) * n_orb
|
||||
#print N_Orbitals
|
||||
# define the number of n_orbitals for all k points: it is the number of total bands and independent of k!
|
||||
n_orb = sum([ shells[ish][3] for ish in range(n_shells) ])
|
||||
n_orbitals = numpy.ones([n_k,n_spin_blocs],numpy.int) * n_orb
|
||||
|
||||
# Initialise the projectors:
|
||||
#proj_mat = [ [ [numpy.zeros([corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_)
|
||||
# for icrsh in range (n_corr_shells)]
|
||||
# for isp in range(n_spin_blocks)]
|
||||
# for ik in range(n_k) ]
|
||||
proj_mat = numpy.zeros([n_k,n_spin_blocks,n_corr_shells,max(numpy.array(corr_shells)[:,3]),max(n_orbitals)],numpy.complex_)
|
||||
proj_mat = numpy.zeros([n_k,n_spin_blocs,n_corr_shells,max(numpy.array(corr_shells)[:,3]),max(n_orbitals)],numpy.complex_)
|
||||
|
||||
|
||||
# Read the projectors from the file:
|
||||
for ik in xrange(n_k):
|
||||
for icrsh in range(n_corr_shells):
|
||||
for isp in range(n_spin_blocks):
|
||||
for isp in range(n_spin_blocs):
|
||||
|
||||
# calculate the offset:
|
||||
offset = 0
|
||||
@ -169,9 +155,7 @@ class HkConverter:
|
||||
|
||||
# now define the arrays for weights and hopping ...
|
||||
bz_weights = numpy.ones([n_k],numpy.float_)/ float(n_k) # w(k_index), default normalisation
|
||||
#hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_)
|
||||
# for isp in range(n_spin_blocks)] for ik in xrange(n_k) ]
|
||||
hopping = numpy.zeros([n_k,n_spin_blocks,max(n_orbitals),max(n_orbitals)],numpy.complex_)
|
||||
hopping = numpy.zeros([n_k,n_spin_blocs,max(n_orbitals),max(n_orbitals)],numpy.complex_)
|
||||
|
||||
if (weights_in_file):
|
||||
# weights in the file
|
||||
@ -181,11 +165,9 @@ class HkConverter:
|
||||
sm = sum(bz_weights)
|
||||
bz_weights[:] /= sm
|
||||
|
||||
|
||||
# Grab the H
|
||||
for ik in xrange(n_k) :
|
||||
for isp in range(n_spin_blocks):
|
||||
|
||||
for isp in range(n_spin_blocs):
|
||||
for ik in xrange(n_k) :
|
||||
no = n_orbitals[ik,isp]
|
||||
|
||||
if (first_real_part_matrix):
|
||||
@ -220,24 +202,22 @@ class HkConverter:
|
||||
|
||||
if ((only_upper_triangle)and(i!=j)): hopping[ik,isp,j,i] = hopping[ik,isp,i,j].conjugate()
|
||||
|
||||
#keep some things that we need for reading parproj:
|
||||
# keep some things that we need for reading parproj:
|
||||
self.n_shells = n_shells
|
||||
self.shells = shells
|
||||
self.n_corr_shells = n_corr_shells
|
||||
self.corr_shells = corr_shells
|
||||
self.n_spin_blocks = n_spin_blocks
|
||||
self.n_spin_blocs = n_spin_blocs
|
||||
self.n_orbitals = n_orbitals
|
||||
self.n_k = n_k
|
||||
self.SO = SO
|
||||
self.SP = SP
|
||||
self.energy_unit = energy_unit
|
||||
except StopIteration : # a more explicit error if the file is corrupted.
|
||||
raise "SumK_LDA : reading file HMLT_file failed!"
|
||||
raise "HK Converter : reading file lda_file failed!"
|
||||
|
||||
R.close()
|
||||
|
||||
#print Proj_Mat[0]
|
||||
|
||||
#-----------------------------------------
|
||||
# Store the input into HDF5:
|
||||
ar = HDFArchive(self.hdf_file,'a')
|
||||
@ -276,7 +256,7 @@ class HkConverter:
|
||||
|
||||
def __repack(self):
|
||||
"""Calls the h5repack routine, in order to reduce the file size of the hdf5 archive.
|
||||
Should only be used BEFORE the first invokation of HDF_Archive in the program, otherwise
|
||||
Should only be used BEFORE the first invokation of HDFArchive in the program, otherwise
|
||||
the hdf5 linking is broken!!!"""
|
||||
|
||||
import subprocess
|
||||
|
@ -30,7 +30,7 @@ import string
|
||||
def read_fortran_file (filename):
|
||||
""" Returns a generator that yields all numbers in the Fortran file as float, one by one"""
|
||||
import os.path
|
||||
if not(os.path.exists(filename)) : raise IOError, "File %s does not exists"%filename
|
||||
if not(os.path.exists(filename)) : raise IOError, "File %s does not exist."%filename
|
||||
for line in open(filename,'r') :
|
||||
for x in line.replace('D','E').split() :
|
||||
yield string.atof(x)
|
||||
@ -39,13 +39,12 @@ def read_fortran_file (filename):
|
||||
|
||||
class Wien2kConverter:
|
||||
"""
|
||||
Conversion from Wien2k output to an hdf5 file, that can be used as input for the SumkLDA class.
|
||||
Conversion from Wien2k output to an hdf5 file that can be used as input for the SumkLDA class.
|
||||
"""
|
||||
|
||||
def __init__(self, filename, lda_subgrp = 'SumK_LDA', symm_subgrp = 'SymmCorr', repacking = False):
|
||||
"""
|
||||
Init of the class. Variable filename gives the root of all filenames, e.g. case.ctqmcout, case.h5, and so
|
||||
on.
|
||||
Init of the class. Variable filename gives the root of all filenames, e.g. case.ctqmcout, case.h5, and so on.
|
||||
"""
|
||||
|
||||
assert type(filename)==StringType,"LDA_file must be a filename"
|
||||
@ -71,10 +70,10 @@ class Wien2kConverter:
|
||||
"""
|
||||
|
||||
|
||||
if not (mpi.is_master_node()): return # do it only on master:
|
||||
# Read and write only on the master node
|
||||
if not (mpi.is_master_node()): return
|
||||
mpi.report("Reading input from %s..."%self.lda_file)
|
||||
|
||||
# Read and write only on Master!!!
|
||||
# R is a generator : each R.Next() will return the next number in the file
|
||||
R = read_fortran_file(self.lda_file)
|
||||
try:
|
||||
@ -91,15 +90,13 @@ class Wien2kConverter:
|
||||
n_shells = int(R.next()) # number of shells (e.g. Fe d, As p, O p) in the unit cell,
|
||||
# corresponds to index R in formulas
|
||||
shells = [ [ int(R.next()) for i in range(4) ] for icrsh in range(n_shells) ] # reads iatom, sort, l, dim
|
||||
#shells = numpy.array(shells)
|
||||
|
||||
|
||||
n_corr_shells = int(R.next()) # number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
||||
# corresponds to index R in formulas
|
||||
# now read the information about the shells:
|
||||
corr_shells = [ [ int(R.next()) for i in range(6) ] for icrsh in range(n_corr_shells) ] # reads iatom, sort, l, dim, SO flag, irep
|
||||
|
||||
self.inequiv_shells(corr_shells) # determine the number of inequivalent correlated shells, has to be known for further reading...
|
||||
#corr_shells = numpy.array(corr_shells)
|
||||
|
||||
use_rotations = 1
|
||||
rot_mat = [numpy.identity(corr_shells[icrsh][3],numpy.complex_) for icrsh in xrange(n_corr_shells)]
|
||||
@ -120,7 +117,7 @@ class Wien2kConverter:
|
||||
|
||||
|
||||
|
||||
# Read here the infos for the transformation of the basis:
|
||||
# Read here the info for the transformation of the basis:
|
||||
n_reps = [1 for i in range(self.n_inequiv_corr_shells)]
|
||||
dim_reps = [0 for i in range(self.n_inequiv_corr_shells)]
|
||||
T = []
|
||||
@ -128,10 +125,8 @@ class Wien2kConverter:
|
||||
n_reps[icrsh] = int(R.next()) # number of representatives ("subsets"), e.g. t2g and eg
|
||||
dim_reps[icrsh] = [int(R.next()) for i in range(n_reps[icrsh])] # dimensions of the subsets
|
||||
|
||||
# The transformation matrix:
|
||||
# it is of dimension 2l+1, if no SO, and 2*(2l+1) with SO!!
|
||||
#T = []
|
||||
#for ish in xrange(self.n_inequiv_corr_shells):
|
||||
# The transformation matrix:
|
||||
# is of dimension 2l+1 without SO, and 2*(2l+1) with SO!
|
||||
ll = 2*corr_shells[self.invshellmap[icrsh]][2]+1
|
||||
lmax = ll * (corr_shells[self.invshellmap[icrsh]][4] + 1)
|
||||
T.append(numpy.zeros([lmax,lmax],numpy.complex_))
|
||||
@ -151,21 +146,14 @@ class Wien2kConverter:
|
||||
|
||||
# read the list of n_orbitals for all k points
|
||||
n_orbitals = numpy.zeros([n_k,n_spin_blocs],numpy.int)
|
||||
#n_orbitals = [ [0 for isp in range(n_spin_blocs)] for ik in xrange(n_k)]
|
||||
for isp in range(n_spin_blocs):
|
||||
for ik in xrange(n_k):
|
||||
#n_orbitals[ik][isp] = int(R.next())
|
||||
n_orbitals[ik,isp] = int(R.next())
|
||||
#print n_orbitals
|
||||
|
||||
|
||||
# Initialise the projectors:
|
||||
#proj_mat = [ [ [numpy.zeros([corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_)
|
||||
# for icrsh in range (n_corr_shells)]
|
||||
# for isp in range(n_spin_blocs)]
|
||||
# for ik in range(n_k) ]
|
||||
proj_mat = numpy.zeros([n_k,n_spin_blocs,n_corr_shells,max(numpy.array(corr_shells)[:,3]),max(n_orbitals)],numpy.complex_)
|
||||
|
||||
|
||||
|
||||
# Read the projectors from the file:
|
||||
for ik in xrange(n_k):
|
||||
@ -175,39 +163,34 @@ class Wien2kConverter:
|
||||
for isp in range(n_spin_blocs):
|
||||
for i in xrange(no):
|
||||
for j in xrange(n_orbitals[ik][isp]):
|
||||
#proj_mat[ik][isp][icrsh][i,j] = R.next()
|
||||
proj_mat[ik,isp,icrsh,i,j] = R.next()
|
||||
# now Imag part:
|
||||
for isp in range(n_spin_blocs):
|
||||
for i in xrange(no):
|
||||
for j in xrange(n_orbitals[ik][isp]):
|
||||
#proj_mat[ik][isp][icrsh][i,j] += 1j * R.next()
|
||||
proj_mat[ik,isp,icrsh,i,j] += 1j * R.next()
|
||||
|
||||
|
||||
|
||||
# now define the arrays for weights and hopping ...
|
||||
bz_weights = numpy.ones([n_k],numpy.float_)/ float(n_k) # w(k_index), default normalisation
|
||||
#hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_)
|
||||
# for isp in range(n_spin_blocs)] for ik in xrange(n_k) ]
|
||||
hopping = numpy.zeros([n_k,n_spin_blocs,max(n_orbitals),max(n_orbitals)],numpy.complex_)
|
||||
|
||||
|
||||
# weights in the file
|
||||
for ik in xrange(n_k) : bz_weights[ik] = R.next()
|
||||
|
||||
# if the sum over spins is in the weights, take it out again!!
|
||||
sm = sum(bz_weights)
|
||||
bz_weights[:] /= sm
|
||||
|
||||
|
||||
# Grab the H
|
||||
# we use now the convention of a DIAGONAL Hamiltonian!!!!
|
||||
for isp in range(n_spin_blocs):
|
||||
for ik in xrange(n_k) :
|
||||
no = n_orbitals[ik][isp]
|
||||
no = n_orbitals[ik,isp]
|
||||
for i in xrange(no):
|
||||
#hopping[ik][isp][i,i] = R.next() * energy_unit
|
||||
hopping[ik,isp,i,i] = R.next() * energy_unit
|
||||
|
||||
#keep some things that we need for reading parproj:
|
||||
# keep some things that we need for reading parproj:
|
||||
self.n_shells = n_shells
|
||||
self.shells = shells
|
||||
self.n_corr_shells = n_corr_shells
|
||||
@ -219,12 +202,10 @@ class Wien2kConverter:
|
||||
self.SP = SP
|
||||
self.energy_unit = energy_unit
|
||||
except StopIteration : # a more explicit error if the file is corrupted.
|
||||
raise "SumkLDA : reading file HMLT_file failed!"
|
||||
raise "Wien2k_converter : reading file lda_file failed!"
|
||||
|
||||
R.close()
|
||||
|
||||
#print proj_mat[0]
|
||||
|
||||
#-----------------------------------------
|
||||
# Store the input into HDF5:
|
||||
ar = HDFArchive(self.hdf_file,'a')
|
||||
@ -279,25 +260,17 @@ class Wien2kConverter:
|
||||
for isp in range(self.n_spin_blocs) ]
|
||||
|
||||
R = read_fortran_file(self.parproj_file)
|
||||
#try:
|
||||
|
||||
n_parproj = [int(R.next()) for i in range(self.n_shells)]
|
||||
n_parproj = numpy.array(n_parproj)
|
||||
|
||||
# Initialise P, here a double list of matrices:
|
||||
#proj_mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], self.n_orbitals[ik][isp]], numpy.complex_)
|
||||
# for ir in range(n_parproj[ish])]
|
||||
# for ish in range (self.n_shells) ]
|
||||
# for isp in range(self.n_spin_blocs) ]
|
||||
# for ik in range(self.n_k) ]
|
||||
|
||||
proj_mat_pc = numpy.zeros([self.n_k,self.n_spin_blocs,self.n_shells,max(n_parproj),max(numpy.array(self.shells)[:,3]),max(self.n_orbitals)],numpy.complex_)
|
||||
|
||||
rot_mat_all = [numpy.identity(self.shells[ish][3],numpy.complex_) for ish in xrange(self.n_shells)]
|
||||
rot_mat_all_time_inv = [0 for i in range(self.n_shells)]
|
||||
|
||||
for ish in range(self.n_shells):
|
||||
#print ish
|
||||
# read first the projectors for this orbital:
|
||||
for ik in xrange(self.n_k):
|
||||
for ir in range(n_parproj[ish]):
|
||||
@ -337,8 +310,6 @@ class Wien2kConverter:
|
||||
if (self.SP):
|
||||
rot_mat_all_time_inv[ish] = int(R.next())
|
||||
|
||||
#except StopIteration : # a more explicit error if the file is corrupted.
|
||||
# raise "Wien2kConverter: reading file for Projectors failed!"
|
||||
R.close()
|
||||
|
||||
#-----------------------------------------
|
||||
@ -378,10 +349,6 @@ class Wien2kConverter:
|
||||
n_orbitals[ik,isp] = int(R.next())
|
||||
|
||||
# Initialise the projectors:
|
||||
#proj_mat = [ [ [numpy.zeros([self.corr_shells[icrsh][3], n_orbitals[ik][isp]], numpy.complex_)
|
||||
# for icrsh in range (self.n_corr_shells)]
|
||||
# for isp in range(self.n_spin_blocs)]
|
||||
# for ik in range(n_k) ]
|
||||
proj_mat = numpy.zeros([n_k,self.n_spin_blocs,self.n_corr_shells,max(numpy.array(self.corr_shells)[:,3]),max(n_orbitals)],numpy.complex_)
|
||||
|
||||
# Read the projectors from the file:
|
||||
@ -399,8 +366,6 @@ class Wien2kConverter:
|
||||
for j in xrange(n_orbitals[ik,isp]):
|
||||
proj_mat[ik,isp,icrsh,i,j] += 1j * R.next()
|
||||
|
||||
#hopping = [ [numpy.zeros([n_orbitals[ik][isp],n_orbitals[ik][isp]],numpy.complex_)
|
||||
# for isp in range(self.n_spin_blocs)] for ik in xrange(n_k) ]
|
||||
hopping = numpy.zeros([n_k,self.n_spin_blocs,max(n_orbitals),max(n_orbitals)],numpy.complex_)
|
||||
|
||||
# Grab the H
|
||||
@ -416,11 +381,6 @@ class Wien2kConverter:
|
||||
n_parproj = numpy.array(n_parproj)
|
||||
|
||||
# Initialise P, here a double list of matrices:
|
||||
#proj_mat_pc = [ [ [ [numpy.zeros([self.shells[ish][3], n_orbitals[ik][isp]], numpy.complex_)
|
||||
# for ir in range(n_parproj[ish])]
|
||||
# for ish in range (self.n_shells) ]
|
||||
# for isp in range(self.n_spin_blocs) ]
|
||||
# for ik in range(n_k) ]
|
||||
proj_mat_pc = numpy.zeros([n_k,self.n_spin_blocs,self.n_shells,max(n_parproj),max(numpy.array(self.shells)[:,3]),max(n_orbitals)],numpy.complex_)
|
||||
|
||||
|
||||
@ -439,7 +399,7 @@ class Wien2kConverter:
|
||||
proj_mat_pc[ik,isp,ish,ir,i,j] += 1j * R.next()
|
||||
|
||||
except StopIteration : # a more explicit error if the file is corrupted.
|
||||
raise "SumkLDA : reading file HMLT_file failed!"
|
||||
raise "Wien2k_converter : reading file band_file failed!"
|
||||
|
||||
R.close()
|
||||
# reading done!
|
||||
@ -448,18 +408,11 @@ class Wien2kConverter:
|
||||
# Store the input into HDF5:
|
||||
ar = HDFArchive(self.hdf_file,'a')
|
||||
if not (self.bands_subgrp in ar): ar.create_group(self.bands_subgrp)
|
||||
|
||||
# The subgroup containing the data. If it does not exist, it is created.
|
||||
# If it exists, the data is overwritten!!!
|
||||
thingstowrite = ['n_k','n_orbitals','proj_mat','hopping','n_parproj','proj_mat_pc']
|
||||
for it in thingstowrite: exec "ar['%s']['%s'] = %s"%(self.bands_subgrp,it,it)
|
||||
|
||||
#ar[self.bands_subgrp]['n_k'] = n_k
|
||||
#ar[self.bands_subgrp]['n_orbitals'] = n_orbitals
|
||||
#ar[self.bands_subgrp]['proj_mat'] = proj_mat
|
||||
#self.proj_mat = proj_mat
|
||||
#self.n_orbitals = n_orbitals
|
||||
#self.n_k = n_k
|
||||
#self.hopping = hopping
|
||||
del ar
|
||||
|
||||
|
||||
@ -501,7 +454,7 @@ class Wien2kConverter:
|
||||
mat[in_s][orb][i,j] += 1j * R.next() # imaginary part
|
||||
|
||||
# determine the inequivalent shells:
|
||||
#SHOULD BE FINALLY REMOVED, PUT IT FOR ALL ORBITALS!!!!!
|
||||
#SHOULD BE FINALLY REMOVED, PUT IT FOR ALL ORBITALS!!!!! (PS: FIXME?)
|
||||
#self.inequiv_shells(orbits)
|
||||
mat_tinv = [numpy.identity(orbits[orb][3],numpy.complex_)
|
||||
for orb in range(n_orbits)]
|
||||
@ -519,7 +472,7 @@ class Wien2kConverter:
|
||||
|
||||
|
||||
except StopIteration : # a more explicit error if the file is corrupted.
|
||||
raise "Symmetry : reading file failed!"
|
||||
raise "Wien2k_converter : reading file symm_file failed!"
|
||||
|
||||
R.close()
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -24,23 +24,17 @@ from types import *
|
||||
import numpy
|
||||
import pytriqs.utility.dichotomy as dichotomy
|
||||
from pytriqs.gf.local import *
|
||||
#from pytriqs.applications.impurity_solvers.operators import *
|
||||
from pytriqs.operators import *
|
||||
import pytriqs.utility.mpi as mpi
|
||||
from datetime import datetime
|
||||
|
||||
#from pytriqs.applications.dft.symmetry import *
|
||||
#from pytriqs.applications.dft.sumk_lda import SumkLDA
|
||||
from symmetry import *
|
||||
from sumk_lda import SumkLDA
|
||||
|
||||
import string
|
||||
|
||||
|
||||
def read_fortran_file (filename):
|
||||
""" Returns a generator that yields all numbers in the Fortran file as float, one by one"""
|
||||
import os.path
|
||||
if not(os.path.exists(filename)) : raise IOError, "File %s does not exists"%filename
|
||||
if not(os.path.exists(filename)) : raise IOError, "File %s does not exist."%filename
|
||||
for line in open(filename,'r') :
|
||||
for x in line.replace('D','E').split() :
|
||||
yield string.atof(x)
|
||||
@ -56,12 +50,12 @@ class SumkLDATools(SumkLDA):
|
||||
self.Gupf_refreq = None
|
||||
SumkLDA.__init__(self,hdf_file=hdf_file,mu=mu,h_field=h_field,use_lda_blocks=use_lda_blocks,lda_data=lda_data,
|
||||
symm_corr_data=symm_corr_data,par_proj_data=par_proj_data,symm_par_data=symm_par_data,
|
||||
bands_data=bands_data)
|
||||
|
||||
bands_data=bands_data)
|
||||
|
||||
|
||||
def downfold_pc(self,ik,ir,ish,sig,gf_to_downfold,gf_inp):
|
||||
"""Downfolding a block of the Greens function"""
|
||||
|
||||
|
||||
gf_downfolded = gf_inp.copy()
|
||||
isp = self.names_to_ind[self.SO][sig] # get spin index for proj. matrices
|
||||
dim = self.shells[ish][3]
|
||||
@ -87,15 +81,15 @@ class SumkLDATools(SumkLDA):
|
||||
gf_rotated.from_L_G_R(self.rot_mat_all[ish].conjugate(),gf_rotated,self.rot_mat_all[ish].transpose())
|
||||
else:
|
||||
gf_rotated.from_L_G_R(self.rot_mat_all[ish],gf_rotated,self.rot_mat_all[ish].conjugate().transpose())
|
||||
|
||||
|
||||
elif (direction=='toLocal'):
|
||||
if ((self.rot_mat_all_time_inv[ish]==1)and(self.SO)):
|
||||
gf_rotated <<= gf_rotated.transpose()
|
||||
gf_rotated.from_L_G_R(self.rot_mat_all[ish].transpose(),gf_rotated,self.rot_mat_all[ish].conjugate())
|
||||
else:
|
||||
gf_rotated.from_L_G_R(self.rot_mat_all[ish].conjugate().transpose(),gf_rotated,self.rot_mat_all[ish])
|
||||
|
||||
|
||||
|
||||
|
||||
return gf_rotated
|
||||
|
||||
|
||||
@ -107,7 +101,7 @@ class SumkLDATools(SumkLDA):
|
||||
bln = self.block_names[self.SO]
|
||||
|
||||
if (not hasattr(self,"Sigma_imp")): with_Sigma=False
|
||||
if (with_Sigma):
|
||||
if (with_Sigma):
|
||||
assert self.Sigma_imp[0].note == 'ReFreq', "Real frequency Sigma needed for lattice_gf_realfreq!"
|
||||
stmp = self.add_dc()
|
||||
else:
|
||||
@ -139,7 +133,7 @@ class SumkLDATools(SumkLDA):
|
||||
glist = lambda : [ GfReFreq(indices = al, window=(mesh[0],mesh[1]),n_points=mesh[2]) for a,al in gf_struct]
|
||||
self.Gupf_refreq = BlockGf(name_list = a_list, block_list = glist(),make_copies=False)
|
||||
self.Gupf_refreq.zero()
|
||||
|
||||
|
||||
idmat = [numpy.identity(self.n_orbitals[ik,ntoi[bl]],numpy.complex_) for bl in bln]
|
||||
|
||||
self.Gupf_refreq <<= Omega + 1j*broadening
|
||||
@ -163,7 +157,7 @@ class SumkLDATools(SumkLDA):
|
||||
|
||||
|
||||
def check_input_dos(self, om_min, om_max, n_om, beta=10, broadening=0.01):
|
||||
|
||||
|
||||
|
||||
delta_om = (om_max-om_min)/(n_om-1)
|
||||
om_mesh = numpy.zeros([n_om],numpy.float_)
|
||||
@ -189,59 +183,59 @@ class SumkLDATools(SumkLDA):
|
||||
glist = lambda : [ GfReFreq(indices = al, window = (om_min,om_max), n_points = n_om) for a,al in self.gf_struct_corr[icrsh]]
|
||||
Gloc.append(BlockGf(name_list = b_list, block_list = glist(),make_copies=False))
|
||||
for icrsh in xrange(self.n_corr_shells): Gloc[icrsh].zero() # initialize to zero
|
||||
|
||||
|
||||
for ik in xrange(self.n_k):
|
||||
|
||||
Gupf=self.lattice_gf_realfreq(ik=ik,mu=self.chemical_potential,broadening=broadening,mesh=(om_min,om_max,n_om),with_Sigma=False)
|
||||
Gupf *= self.bz_weights[ik]
|
||||
|
||||
# non-projected DOS
|
||||
for iom in range(n_om):
|
||||
for sig,gf in Gupf:
|
||||
for iom in range(n_om):
|
||||
for sig,gf in Gupf:
|
||||
asd = gf.data[iom,:,:].imag.trace()/(-3.1415926535)
|
||||
DOS[sig][iom] += asd
|
||||
|
||||
|
||||
for icrsh in xrange(self.n_corr_shells):
|
||||
tmp = Gloc[icrsh].copy()
|
||||
for sig,gf in tmp: tmp[sig] <<= self.downfold(ik,icrsh,sig,Gupf[sig],gf) # downfolding G
|
||||
Gloc[icrsh] += tmp
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if (self.symm_op!=0): Gloc = self.Symm_corr.symmetrize(Gloc)
|
||||
|
||||
if (self.use_rotations):
|
||||
for icrsh in xrange(self.n_corr_shells):
|
||||
for sig,gf in Gloc[icrsh]: Gloc[icrsh][sig] <<= self.rotloc(icrsh,gf,direction='toLocal')
|
||||
|
||||
|
||||
# Gloc can now also be used to look at orbitally resolved quantities
|
||||
for ish in range(self.n_inequiv_corr_shells):
|
||||
for sig,gf in Gloc[self.invshellmap[ish]]: # loop over spins
|
||||
for iom in range(n_om): DOSproj[ish][sig][iom] += gf.data[iom,:,:].imag.trace()/(-3.1415926535)
|
||||
for iom in range(n_om): DOSproj[ish][sig][iom] += gf.data[iom,:,:].imag.trace()/(-3.1415926535)
|
||||
|
||||
DOSproj_orb[ish][sig][:,:,:] += gf.data[:,:,:].imag/(-3.1415926535)
|
||||
|
||||
|
||||
# output:
|
||||
if (mpi.is_master_node()):
|
||||
for bn in self.block_names[self.SO]:
|
||||
f=open('DOS%s.dat'%bn, 'w')
|
||||
for i in range(n_om): f.write("%s %s\n"%(om_mesh[i],DOS[bn][i]))
|
||||
f.close()
|
||||
f.close()
|
||||
|
||||
for ish in range(self.n_inequiv_corr_shells):
|
||||
f=open('DOS%s_proj%s.dat'%(bn,ish),'w')
|
||||
for i in range(n_om): f.write("%s %s\n"%(om_mesh[i],DOSproj[ish][bn][i]))
|
||||
f.close()
|
||||
|
||||
f.close()
|
||||
|
||||
for i in range(self.corr_shells[self.invshellmap[ish]][3]):
|
||||
for j in range(i,self.corr_shells[self.invshellmap[ish]][3]):
|
||||
Fname = 'DOS'+bn+'_proj'+str(ish)+'_'+str(i)+'_'+str(j)+'.dat'
|
||||
f=open(Fname,'w')
|
||||
for iom in range(n_om): f.write("%s %s\n"%(om_mesh[iom],DOSproj_orb[ish][bn][iom,i,j]))
|
||||
f.close()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def read_par_proj_input_from_hdf(self):
|
||||
"""
|
||||
@ -268,7 +262,7 @@ class SumkLDATools(SumkLDA):
|
||||
mu = self.chemical_potential
|
||||
|
||||
gf_struct_proj = [ [ (al, range(self.shells[i][3])) for al in self.block_names[self.SO] ] for i in xrange(self.n_shells) ]
|
||||
Gproj = [BlockGf(name_block_generator = [ (a,GfReFreq(indices = al, mesh = self.Sigma_imp[0].mesh)) for a,al in gf_struct_proj[ish] ], make_copies = False )
|
||||
Gproj = [BlockGf(name_block_generator = [ (a,GfReFreq(indices = al, mesh = self.Sigma_imp[0].mesh)) for a,al in gf_struct_proj[ish] ], make_copies = False )
|
||||
for ish in xrange(self.n_shells)]
|
||||
for ish in range(self.n_shells): Gproj[ish].zero()
|
||||
|
||||
@ -295,32 +289,32 @@ class SumkLDATools(SumkLDA):
|
||||
S *= self.bz_weights[ik]
|
||||
|
||||
# non-projected DOS
|
||||
for iom in range(n_om):
|
||||
for iom in range(n_om):
|
||||
for sig,gf in S: DOS[sig][iom] += gf.data[iom,:,:].imag.trace()/(-3.1415926535)
|
||||
|
||||
|
||||
#projected DOS:
|
||||
for ish in xrange(self.n_shells):
|
||||
tmp = Gproj[ish].copy()
|
||||
for ir in xrange(self.n_parproj[ish]):
|
||||
for sig,gf in tmp: tmp[sig] <<= self.downfold_pc(ik,ir,ish,sig,S[sig],gf)
|
||||
Gproj[ish] += tmp
|
||||
|
||||
|
||||
# collect data from mpi:
|
||||
for sig in DOS:
|
||||
DOS[sig] = mpi.all_reduce(mpi.world,DOS[sig],lambda x,y : x+y)
|
||||
for ish in xrange(self.n_shells):
|
||||
Gproj[ish] <<= mpi.all_reduce(mpi.world,Gproj[ish],lambda x,y : x+y)
|
||||
mpi.barrier()
|
||||
|
||||
mpi.barrier()
|
||||
|
||||
if (self.symm_op!=0): Gproj = self.Symm_par.symmetrize(Gproj)
|
||||
|
||||
# rotation to local coord. system:
|
||||
if (self.use_rotations):
|
||||
for ish in xrange(self.n_shells):
|
||||
for sig,gf in Gproj[ish]: Gproj[ish][sig] <<= self.rotloc_all(ish,gf,direction='toLocal')
|
||||
|
||||
|
||||
for ish in range(self.n_shells):
|
||||
for sig,gf in Gproj[ish]:
|
||||
for sig,gf in Gproj[ish]:
|
||||
for iom in range(n_om): DOSproj[ish][sig][iom] += gf.data[iom,:,:].imag.trace()/(-3.1415926535)
|
||||
DOSproj_orb[ish][sig][:,:,:] += gf.data[:,:,:].imag / (-3.1415926535)
|
||||
|
||||
@ -330,14 +324,14 @@ class SumkLDATools(SumkLDA):
|
||||
for bn in self.block_names[self.SO]:
|
||||
f=open('./DOScorr%s.dat'%bn, 'w')
|
||||
for i in range(n_om): f.write("%s %s\n"%(Msh[i],DOS[bn][i]))
|
||||
f.close()
|
||||
f.close()
|
||||
|
||||
# partial
|
||||
for ish in range(self.n_shells):
|
||||
f=open('DOScorr%s_proj%s.dat'%(bn,ish),'w')
|
||||
for i in range(n_om): f.write("%s %s\n"%(Msh[i],DOSproj[ish][bn][i]))
|
||||
f.close()
|
||||
|
||||
|
||||
for i in range(self.shells[ish][3]):
|
||||
for j in range(i,self.shells[ish][3]):
|
||||
Fname = './DOScorr'+bn+'_proj'+str(ish)+'_'+str(i)+'_'+str(j)+'.dat'
|
||||
@ -349,7 +343,7 @@ class SumkLDATools(SumkLDA):
|
||||
|
||||
|
||||
def spaghettis(self,broadening,shift=0.0,plot_range=None, ishell=None, invert_Akw=False, fermi_surface=False):
|
||||
""" Calculates the correlated band structure with a real-frequency self energy.
|
||||
""" Calculates the correlated band structure with a real-frequency self energy.
|
||||
ATTENTION: Many things from the original input file are are overwritten!!!"""
|
||||
|
||||
assert hasattr(self,"Sigma_imp"), "Set Sigma First!!"
|
||||
@ -358,13 +352,13 @@ class SumkLDATools(SumkLDA):
|
||||
if not retval: return retval
|
||||
|
||||
if fermi_surface: ishell=None
|
||||
|
||||
|
||||
# print hamiltonian for checks:
|
||||
if ((self.SP==1)and(self.SO==0)):
|
||||
f1=open('hamup.dat','w')
|
||||
f2=open('hamdn.dat','w')
|
||||
|
||||
for ik in xrange(self.n_k):
|
||||
|
||||
for ik in xrange(self.n_k):
|
||||
for i in xrange(self.n_orbitals[ik,0]):
|
||||
f1.write('%s %s\n'%(ik,self.hopping[ik,0,i,i].real))
|
||||
for i in xrange(self.n_orbitals[ik,1]):
|
||||
@ -381,7 +375,7 @@ class SumkLDATools(SumkLDA):
|
||||
f.write('\n')
|
||||
f.close()
|
||||
|
||||
|
||||
|
||||
#=========================================
|
||||
# calculate A(k,w):
|
||||
|
||||
@ -419,17 +413,17 @@ class SumkLDATools(SumkLDA):
|
||||
|
||||
for ik in xrange(self.n_k):
|
||||
|
||||
S = self.lattice_gf_realfreq(ik=ik,mu=mu,broadening=broadening)
|
||||
S = self.lattice_gf_realfreq(ik=ik,mu=mu,broadening=broadening)
|
||||
if (ishell is None):
|
||||
# non-projected A(k,w)
|
||||
for iom in range(n_om):
|
||||
for iom in range(n_om):
|
||||
if (M[iom]>om_minplot) and (M[iom]<om_maxplot):
|
||||
if fermi_surface:
|
||||
for sig,gf in S: Akw[sig][ik,0] += gf.data[iom,:,:].imag.trace()/(-3.1415926535) * (M[1]-M[0])
|
||||
else:
|
||||
for sig,gf in S: Akw[sig][ik,iom] += gf.data[iom,:,:].imag.trace()/(-3.1415926535)
|
||||
Akw[sig][ik,iom] += ik*shift # shift Akw for plotting in xmgrace
|
||||
|
||||
|
||||
|
||||
else:
|
||||
# projected A(k,w):
|
||||
@ -438,26 +432,26 @@ class SumkLDATools(SumkLDA):
|
||||
for ir in xrange(self.n_parproj[ishell]):
|
||||
for sig,gf in tmp: tmp[sig] <<= self.downfold_pc(ik,ir,ishell,sig,S[sig],gf)
|
||||
Gproj += tmp
|
||||
|
||||
|
||||
# TO BE FIXED:
|
||||
# rotate to local frame
|
||||
#if (self.use_rotations):
|
||||
# for sig,gf in Gproj: Gproj[sig] <<= self.rotloc(0,gf,direction='toLocal')
|
||||
|
||||
for iom in range(n_om):
|
||||
for iom in range(n_om):
|
||||
if (M[iom]>om_minplot) and (M[iom]<om_maxplot):
|
||||
for ish in range(self.shells[ishell][3]):
|
||||
for ibn in bln:
|
||||
Akw[ibn][ish,ik,iom] = Gproj[ibn].data[iom,ish,ish].imag/(-3.1415926535)
|
||||
|
||||
|
||||
|
||||
|
||||
# END k-LOOP
|
||||
if (mpi.is_master_node()):
|
||||
if (ishell is None):
|
||||
|
||||
|
||||
for ibn in bln:
|
||||
# loop over GF blocs:
|
||||
|
||||
|
||||
if (invert_Akw):
|
||||
maxAkw=Akw[ibn].max()
|
||||
minAkw=Akw[ibn].min()
|
||||
@ -472,10 +466,10 @@ class SumkLDATools(SumkLDA):
|
||||
for ik in range(self.n_k):
|
||||
if fermi_surface:
|
||||
if (invert_Akw):
|
||||
Akw[ibn][ik,0] = 1.0/(minAkw-maxAkw)*(Akw[ibn][ik,0] - maxAkw)
|
||||
Akw[ibn][ik,0] = 1.0/(minAkw-maxAkw)*(Akw[ibn][ik,0] - maxAkw)
|
||||
f.write('%s %s\n'%(ik,Akw[ibn][ik,0]))
|
||||
else:
|
||||
for iom in range(n_om):
|
||||
for iom in range(n_om):
|
||||
if (M[iom]>om_minplot) and (M[iom]<om_maxplot):
|
||||
if (invert_Akw):
|
||||
Akw[ibn][ik,iom] = 1.0/(minAkw-maxAkw)*(Akw[ibn][ik,iom] - maxAkw)
|
||||
@ -485,21 +479,21 @@ class SumkLDATools(SumkLDA):
|
||||
f.write('%s %s %s\n'%(ik,M[iom],Akw[ibn][ik,iom]))
|
||||
|
||||
f.write('\n')
|
||||
|
||||
|
||||
f.close()
|
||||
|
||||
else:
|
||||
for ibn in bln:
|
||||
for ish in range(self.shells[ishell][3]):
|
||||
|
||||
|
||||
if (invert_Akw):
|
||||
maxAkw=Akw[ibn][ish,:,:].max()
|
||||
minAkw=Akw[ibn][ish,:,:].min()
|
||||
|
||||
f=open('Akw_'+ibn+'_proj'+str(ish)+'.dat','w')
|
||||
f=open('Akw_'+ibn+'_proj'+str(ish)+'.dat','w')
|
||||
|
||||
for ik in range(self.n_k):
|
||||
for iom in range(n_om):
|
||||
for iom in range(n_om):
|
||||
if (M[iom]>om_minplot) and (M[iom]<om_maxplot):
|
||||
if (invert_Akw):
|
||||
Akw[ibn][ish,ik,iom] = 1.0/(minAkw-maxAkw)*(Akw[ibn][ish,ik,iom] - maxAkw)
|
||||
@ -509,20 +503,22 @@ class SumkLDATools(SumkLDA):
|
||||
f.write('%s %s %s\n'%(ik,M[iom],Akw[ibn][ish,ik,iom]))
|
||||
|
||||
f.write('\n')
|
||||
|
||||
|
||||
f.close()
|
||||
|
||||
|
||||
def constr_Sigma_real_axis(self, filename, hdf=True, hdf_dataset='SigmaReFreq',n_om=0,orb=0, tol_mesh=1e-6):
|
||||
"""Uses Data from files to construct Sigma (or GF) on the real axis."""
|
||||
|
||||
if not hdf:
|
||||
# read sigma from text files
|
||||
#first get the mesh out of one of the files:
|
||||
if (len(self.gf_struct_solver[orb][0][1])==1):
|
||||
Fname = filename+'_'+self.gf_struct_solver[orb][0][0]+'.dat'
|
||||
if not hdf: # then read sigma from text files
|
||||
|
||||
# first get the mesh out of any one of the files:
|
||||
bl = self.gf_struct_solver[orb].items()[0][0] # block name
|
||||
ol = self.gf_struct_solver[orb].items()[0][1] # list of orbital indices
|
||||
if (len(ol)==1): # if blocks are of size one
|
||||
Fname = filename+'_'+bl+'.dat'
|
||||
else:
|
||||
Fname = filename+'_'+self.gf_struct_solver[orb][0][0]+'/'+str(self.gf_struct_solver[orb][0][1][0])+'_'+str(self.gf_struct_solver[orb][0][1][0])+'.dat'
|
||||
Fname = filename+'_'+bl+'/'+str(ol[0])+'_'+str(ol[0])+'.dat'
|
||||
|
||||
R = read_fortran_file(Fname)
|
||||
mesh = numpy.zeros([n_om],numpy.float_)
|
||||
@ -542,12 +538,11 @@ class SumkLDATools(SumkLDA):
|
||||
assert abs(i*bin+mesh[0]-mesh[i]) < tol_mesh, 'constr_Sigma_ME: real-axis mesh is non-uniform!'
|
||||
|
||||
# construct Sigma
|
||||
a_list = [a for a,al in self.gf_struct_solver[orb]]
|
||||
glist = lambda : [ GfReFreq(indices = al, window=(mesh[0],mesh[n_om-1]),n_points=n_om) for a,al in self.gf_struct_solver[orb]]
|
||||
a_list = [a for a,al in self.gf_struct_solver[orb].iteritems()]
|
||||
glist = lambda : [ GfReFreq(indices = al, window=(mesh[0],mesh[n_om-1]),n_points=n_om) for a,al in self.gf_struct_solver[orb].iteritems()]
|
||||
SigmaME = BlockGf(name_list = a_list, block_list = glist(),make_copies=False)
|
||||
|
||||
#read Sigma
|
||||
|
||||
for i,g in SigmaME:
|
||||
mesh=[w for w in g.mesh]
|
||||
for iL in g.indices:
|
||||
@ -568,9 +563,8 @@ class SumkLDATools(SumkLDA):
|
||||
R.close()
|
||||
|
||||
|
||||
else:
|
||||
else: # read sigma from hdf
|
||||
|
||||
# read sigma from hdf
|
||||
omega_min=0.0
|
||||
omega_max=0.0
|
||||
n_om=0
|
||||
@ -588,8 +582,8 @@ class SumkLDATools(SumkLDA):
|
||||
mpi.barrier()
|
||||
# construct Sigma on other nodes
|
||||
if (not mpi.is_master_node()):
|
||||
a_list = [a for a,al in self.gf_struct_solver[orb]]
|
||||
glist = lambda : [ GfReFreq(indices = al, window=(omega_min,omega_max),n_points=n_om) for a,al in self.gf_struct_solver[orb]]
|
||||
a_list = [a for a,al in self.gf_struct_solver[orb].iteritems()]
|
||||
glist = lambda : [ GfReFreq(indices = al, window=(omega_min,omega_max),n_points=n_om) for a,al in self.gf_struct_solver[orb].iteritems()]
|
||||
SigmaME = BlockGf(name_list = a_list, block_list = glist(),make_copies=False)
|
||||
# pass SigmaME to other nodes
|
||||
SigmaME = mpi.bcast(SigmaME)
|
||||
@ -599,23 +593,23 @@ class SumkLDATools(SumkLDA):
|
||||
|
||||
return SigmaME
|
||||
|
||||
|
||||
|
||||
|
||||
def partial_charges(self,beta=40):
|
||||
"""Calculates the orbitally-resolved density matrix for all the orbitals considered in the input.
|
||||
The theta-projectors are used, hence case.parproj data is necessary"""
|
||||
|
||||
|
||||
|
||||
#thingstoread = ['Dens_Mat_below','N_parproj','Proj_Mat_pc','rotmat_all']
|
||||
#retval = self.read_input_from_HDF(SubGrp=self.par_proj_data,thingstoread=thingstoread)
|
||||
retval = self.read_par_proj_input_from_hdf()
|
||||
if not retval: return retval
|
||||
if self.symm_op: self.Symm_par = Symmetry(self.hdf_file,subgroup=self.symm_par_data)
|
||||
|
||||
|
||||
# Density matrix in the window
|
||||
bln = self.block_names[self.SO]
|
||||
ntoi = self.names_to_ind[self.SO]
|
||||
self.dens_mat_window = [ [numpy.zeros([self.shells[ish][3],self.shells[ish][3]],numpy.complex_) for ish in range(self.n_shells)]
|
||||
self.dens_mat_window = [ [numpy.zeros([self.shells[ish][3],self.shells[ish][3]],numpy.complex_) for ish in range(self.n_shells)]
|
||||
for isp in range(len(bln)) ] # init the density matrix
|
||||
|
||||
mu = self.chemical_potential
|
||||
@ -633,7 +627,7 @@ class SumkLDATools(SumkLDA):
|
||||
ikarray=numpy.array(range(self.n_k))
|
||||
#print mpi.rank, mpi.slice_array(ikarray)
|
||||
#print "K-Sum starts on node",mpi.rank," at ",datetime.now()
|
||||
|
||||
|
||||
for ik in mpi.slice_array(ikarray):
|
||||
#print mpi.rank, ik, datetime.now()
|
||||
S = self.lattice_gf_matsubara(ik=ik,mu=mu,beta=beta)
|
||||
@ -644,7 +638,7 @@ class SumkLDATools(SumkLDA):
|
||||
for ir in xrange(self.n_parproj[ish]):
|
||||
for sig,gf in tmp: tmp[sig] <<= self.downfold_pc(ik,ir,ish,sig,S[sig],gf)
|
||||
Gproj[ish] += tmp
|
||||
|
||||
|
||||
#print "K-Sum done on node",mpi.rank," at ",datetime.now()
|
||||
#collect data from mpi:
|
||||
for ish in xrange(self.n_shells):
|
||||
@ -656,7 +650,7 @@ class SumkLDATools(SumkLDA):
|
||||
# Symmetrisation:
|
||||
if (self.symm_op!=0): Gproj = self.Symm_par.symmetrize(Gproj)
|
||||
#print "Symmetrisation done on node",mpi.rank," at ",datetime.now()
|
||||
|
||||
|
||||
for ish in xrange(self.n_shells):
|
||||
|
||||
# Rotation to local:
|
||||
@ -667,11 +661,9 @@ class SumkLDATools(SumkLDA):
|
||||
for sig,gf in Gproj[ish]: #dmg.append(Gproj[ish].density()[sig])
|
||||
self.dens_mat_window[isp][ish] = Gproj[ish].density()[sig]
|
||||
isp+=1
|
||||
|
||||
|
||||
# add Density matrices to get the total:
|
||||
dens_mat = [ [ self.dens_mat_below[ntoi[bln[isp]]][ish]+self.dens_mat_window[isp][ish] for ish in range(self.n_shells)]
|
||||
for isp in range(len(bln)) ]
|
||||
|
||||
return dens_mat
|
||||
|
||||
|
||||
|
@ -60,24 +60,24 @@ class Symmetry:
|
||||
|
||||
#broadcasting
|
||||
for it in thingstoread: exec "self.%s = mpi.bcast(self.%s)"%(it,it)
|
||||
|
||||
|
||||
# now define the mapping of orbitals:
|
||||
# self.map[iorb]=jorb gives the permutation of the orbitals as given in the list, when the
|
||||
# self.map[iorb]=jorb gives the permutation of the orbitals as given in the list, when the
|
||||
# permutation of the atoms is done:
|
||||
self.n_orbits = len(self.orbits)
|
||||
|
||||
self.map = [ [0 for iorb in range(self.n_orbits)] for in_s in range(self.n_s) ]
|
||||
for in_s in range(self.n_s):
|
||||
for iorb in range(self.n_orbits):
|
||||
|
||||
|
||||
srch = copy.deepcopy(self.orbits[iorb])
|
||||
srch[0] = self.perm[in_s][self.orbits[iorb][0]-1]
|
||||
self.map[in_s][iorb] = self.orbits.index(srch)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def symmetrize(self,obj):
|
||||
|
||||
|
||||
assert isinstance(obj,list),"obj has to be a list of objects!"
|
||||
assert len(obj)==self.n_orbits,"obj has to be a list of the same length as defined in the init"
|
||||
|
||||
@ -88,14 +88,14 @@ class Symmetry:
|
||||
# if not a BlockGf, we assume it is a matrix (density matrix), has to be complex since self.mat is complex!
|
||||
#symm_obj = [ numpy.zeros([self.orbits[iorb][3],self.orbits[iorb][3]],numpy.complex_) for iorb in range(self.n_orbits) ]
|
||||
symm_obj = [ copy.deepcopy(obj[i]) for i in range(len(obj)) ]
|
||||
|
||||
|
||||
for iorb in range(self.n_orbits):
|
||||
if (type(symm_obj[iorb])==DictType):
|
||||
for ii in symm_obj[iorb]: symm_obj[iorb][ii] *= 0.0
|
||||
else:
|
||||
symm_obj[iorb] *= 0.0
|
||||
|
||||
|
||||
|
||||
|
||||
for in_s in range(self.n_s):
|
||||
|
||||
for iorb in range(self.n_orbits):
|
||||
@ -104,13 +104,9 @@ class Symmetry:
|
||||
dim = self.orbits[iorb][3]
|
||||
jorb = self.map[in_s][iorb]
|
||||
|
||||
|
||||
|
||||
if (isinstance(obj[0],BlockGf)):
|
||||
|
||||
#if l==0:
|
||||
# symm_obj[jorb] += obj[iorb]
|
||||
#else:
|
||||
|
||||
|
||||
tmp = obj[iorb].copy()
|
||||
if (self.time_inv[in_s]): tmp <<= tmp.transpose()
|
||||
for sig,gf in tmp: tmp[sig].from_L_G_R(self.mat[in_s][iorb],tmp[sig],self.mat[in_s][iorb].conjugate().transpose())
|
||||
@ -122,9 +118,6 @@ class Symmetry:
|
||||
if (type(obj[iorb])==DictType):
|
||||
|
||||
for ii in obj[iorb]:
|
||||
#if (l==0):
|
||||
# symm_obj[jorb][ii] += obj[iorb][ii]/self.n_s
|
||||
#else:
|
||||
if (self.time_inv[in_s]==0):
|
||||
symm_obj[jorb][ii] += numpy.dot(numpy.dot(self.mat[in_s][iorb],obj[iorb][ii]),
|
||||
self.mat[in_s][iorb].conjugate().transpose()) / self.n_s
|
||||
@ -132,20 +125,17 @@ class Symmetry:
|
||||
symm_obj[jorb][ii] += numpy.dot(numpy.dot(self.mat[in_s][iorb],obj[iorb][ii].conjugate()),
|
||||
self.mat[in_s][iorb].conjugate().transpose()) / self.n_s
|
||||
|
||||
|
||||
|
||||
|
||||
else:
|
||||
#if (l==0):
|
||||
# symm_obj[jorb] += obj[iorb]/self.n_s
|
||||
#else:
|
||||
if (self.time_inv[in_s]==0):
|
||||
symm_obj[jorb] += numpy.dot(numpy.dot(self.mat[in_s][iorb],obj[iorb]),self.mat[in_s][iorb].conjugate().transpose()) / self.n_s
|
||||
else:
|
||||
symm_obj[jorb] += numpy.dot(numpy.dot(self.mat[in_s][iorb],obj[iorb].conjugate()),
|
||||
self.mat[in_s][iorb].conjugate().transpose()) / self.n_s
|
||||
|
||||
|
||||
# This does not what it is supposed to do, check how this should work:
|
||||
|
||||
|
||||
# This does not what it is supposed to do, check how this should work:
|
||||
# if ((self.SO==0) and (self.SP==0)):
|
||||
# # add time inv:
|
||||
#mpi.report("Add time inversion")
|
||||
@ -156,7 +146,7 @@ class Symmetry:
|
||||
# for sig,gf in tmp: tmp[sig].from_L_G_R(self.mat_tinv[iorb],tmp[sig],self.mat_tinv[iorb].transpose().conjugate())
|
||||
# symm_obj[iorb] += tmp
|
||||
# symm_obj[iorb] /= 2.0
|
||||
#
|
||||
#
|
||||
# else:
|
||||
# if (type(symm_obj[iorb])==DictType):
|
||||
# for ii in symm_obj[iorb]:
|
||||
@ -167,10 +157,10 @@ class Symmetry:
|
||||
# symm_obj[iorb] += numpy.dot(numpy.dot(self.mat_tinv[iorb],symm_obj[iorb].conjugate()),
|
||||
# self.mat_tinv[iorb].transpose().conjugate())
|
||||
# symm_obj[iorb] /= 2.0
|
||||
|
||||
|
||||
|
||||
|
||||
return symm_obj
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -8,7 +8,7 @@ import copy
|
||||
import pytriqs.utility.mpi as mpi
|
||||
|
||||
class TransBasis:
|
||||
'''Computates rotations into a new basis, in order to make certain quantities diagonal.'''
|
||||
'''Computates rotations into a new basis in order to make certain quantities diagonal.'''
|
||||
|
||||
|
||||
def __init__(self, SK=None, hdf_datafile=None):
|
||||
@ -19,18 +19,18 @@ class TransBasis:
|
||||
if (hdf_datafile==None):
|
||||
mpi.report("Give SK instance or HDF filename!")
|
||||
return 0
|
||||
|
||||
|
||||
Converter = Wien2kConverter(filename=hdf_datafile,repacking=False)
|
||||
Converter.convert_dmft_input()
|
||||
del Converter
|
||||
|
||||
|
||||
self.SK = SumkLDA(hdf_file=hdf_datafile+'.h5',use_lda_blocks=False)
|
||||
else:
|
||||
self.SK = SK
|
||||
|
||||
|
||||
self.T = copy.deepcopy(self.SK.T[0])
|
||||
self.w = numpy.identity(SK.corr_shells[0][3])
|
||||
|
||||
|
||||
|
||||
|
||||
def __call__(self, prop_to_be_diagonal = 'eal'):
|
||||
@ -49,10 +49,10 @@ class TransBasis:
|
||||
|
||||
# now calculate new Transformation matrix
|
||||
self.T = numpy.dot(self.T.transpose().conjugate(),self.w).conjugate().transpose()
|
||||
|
||||
|
||||
|
||||
|
||||
#return numpy.dot(self.w.transpose().conjugate(),numpy.dot(eal['up'],self.w))
|
||||
|
||||
|
||||
else:
|
||||
|
||||
self.eig,self.w = numpy.linalg.eigh(eal['ud'])
|
||||
@ -60,17 +60,17 @@ class TransBasis:
|
||||
# now calculate new Transformation matrix
|
||||
self.T = numpy.dot(self.T.transpose().conjugate(),self.w).conjugate().transpose()
|
||||
|
||||
|
||||
|
||||
#MPI.report("SO not implemented yet!")
|
||||
#return 0
|
||||
|
||||
|
||||
# measure for the 'unity' of the transformation:
|
||||
wsqr = sum(abs(self.w.diagonal())**2)/self.w.diagonal().size
|
||||
return wsqr
|
||||
|
||||
|
||||
def rotate_gf(self,gf_to_rot):
|
||||
'''rotates a given GF into the new basis'''
|
||||
'''Rotates a given GF into the new basis.'''
|
||||
|
||||
# build a full GF
|
||||
gfrotated = BlockGf( name_block_generator = [ (a,GfImFreq(indices = al, mesh = gf_to_rot.mesh)) for a,al in self.SK.gf_struct_corr[0] ], make_copies = False)
|
||||
@ -78,12 +78,11 @@ class TransBasis:
|
||||
|
||||
# transform the CTQMC blocks to the full matrix:
|
||||
s = self.SK.shellmap[0] # s is the index of the inequivalent shell corresponding to icrsh
|
||||
for ibl in range(len(self.SK.gf_struct_solver[s])):
|
||||
for i in range(len(self.SK.gf_struct_solver[s][ibl][1])):
|
||||
for j in range(len(self.SK.gf_struct_solver[s][ibl][1])):
|
||||
bl = self.SK.gf_struct_solver[s][ibl][0]
|
||||
ind1 = self.SK.gf_struct_solver[s][ibl][1][i]
|
||||
ind2 = self.SK.gf_struct_solver[s][ibl][1][j]
|
||||
for bl, orblist in self.gf_struct_solver[s].iteritems():
|
||||
for i in range(len(orblist)):
|
||||
for j in range(len(orblist)):
|
||||
ind1 = orblist[i]
|
||||
ind2 = orblist[j]
|
||||
gfrotated[self.SK.map_inv[s][bl]][ind1,ind2] <<= gf_to_rot[bl][ind1,ind2]
|
||||
|
||||
# Rotate using the matrix w
|
||||
@ -92,27 +91,26 @@ class TransBasis:
|
||||
|
||||
gfreturn = gf_to_rot.copy()
|
||||
# Put back into CTQMC basis:
|
||||
for ibl in range(len(self.SK.gf_struct_solver[0])):
|
||||
for i in range(len(self.SK.gf_struct_solver[0][ibl][1])):
|
||||
for j in range(len(self.SK.gf_struct_solver[0][ibl][1])):
|
||||
bl = self.SK.gf_struct_solver[0][ibl][0]
|
||||
ind1 = self.SK.gf_struct_solver[0][ibl][1][i]
|
||||
ind2 = self.SK.gf_struct_solver[0][ibl][1][j]
|
||||
for bl, orblist in self.gf_struct_solver[s].iteritems():
|
||||
for i in range(len(orblist)):
|
||||
for j in range(len(orblist)):
|
||||
ind1 = orblist[i]
|
||||
ind2 = orblist[j]
|
||||
gfreturn[bl][ind1,ind2] <<= gfrotated[self.SK.map_inv[0][bl]][ind1,ind2]
|
||||
|
||||
return gfreturn
|
||||
|
||||
|
||||
|
||||
def write_trans_file(self, filename):
|
||||
'''writes the new transformation into a file, readable for dmftproj.'''
|
||||
'''Writes the new transformation into a file readable by dmftproj.'''
|
||||
|
||||
f=open(filename,'w')
|
||||
|
||||
Tnew = self.T.conjugate()
|
||||
N = self.SK.corr_shells[0][3]
|
||||
|
||||
|
||||
if (self.SK.SO==0):
|
||||
|
||||
|
||||
for i in range(N):
|
||||
st = ''
|
||||
for k in range(N):
|
||||
@ -120,14 +118,14 @@ class TransBasis:
|
||||
st += " %9.6f"%(Tnew[i,k].imag)
|
||||
for k in range(2*N):
|
||||
st += " 0.0"
|
||||
|
||||
|
||||
if (i<(N-1)):
|
||||
f.write("%s\n"%(st))
|
||||
else:
|
||||
st1=st.replace(' ','*',1)
|
||||
f.write("%s\n"%(st1))
|
||||
|
||||
|
||||
|
||||
|
||||
for i in range(N):
|
||||
st = ''
|
||||
for k in range(2*N):
|
||||
@ -135,7 +133,7 @@ class TransBasis:
|
||||
for k in range(N):
|
||||
st += " %9.6f"%(Tnew[i,k].real)
|
||||
st += " %9.6f"%(Tnew[i,k].imag)
|
||||
|
||||
|
||||
if (i<(N-1)):
|
||||
f.write("%s\n"%(st))
|
||||
else:
|
||||
@ -149,15 +147,12 @@ class TransBasis:
|
||||
for k in range(N):
|
||||
st += " %9.6f"%(Tnew[i,k].real)
|
||||
st += " %9.6f"%(Tnew[i,k].imag)
|
||||
|
||||
|
||||
if (i<(N-1)):
|
||||
f.write("%s\n"%(st))
|
||||
else:
|
||||
st1=st.replace(' ','*',1)
|
||||
f.write("%s\n"%(st1))
|
||||
#MPI.report("SO not implemented!")
|
||||
|
||||
|
||||
f.close()
|
||||
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user