mirror of
https://github.com/triqs/dft_tools
synced 2024-12-21 11:53:41 +01:00
Minor clean up, pep-ified to allow doc compilation to run smoothly
This commit is contained in:
parent
841f840df5
commit
390e8564b7
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,11 +18,12 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
|
|
||||||
from sumk_dft import SumkDFT
|
from sumk_dft import SumkDFT
|
||||||
from symmetry import Symmetry
|
from symmetry import Symmetry
|
||||||
from sumk_dft_tools import SumkDFTTools
|
from sumk_dft_tools import SumkDFTTools
|
||||||
from converters import *
|
from converters import *
|
||||||
|
|
||||||
__all__=['SumkDFT','Symmetry','SumkDFTTools','Wien2kConverter','HkConverter']
|
__all__ = ['SumkDFT', 'Symmetry', 'SumkDFTTools',
|
||||||
|
'Wien2kConverter', 'HkConverter']
|
||||||
|
@ -3,8 +3,8 @@ import sys
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
if len(sys.argv) < 2:
|
if len(sys.argv) < 2:
|
||||||
print "Usage: python clear_h5_output.py archive"
|
print "Usage: python clear_h5_output.py archive"
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
print """
|
print """
|
||||||
This script is to remove any SumkDFT generated output from the h5 archive
|
This script is to remove any SumkDFT generated output from the h5 archive
|
||||||
@ -13,13 +13,14 @@ and to restore it to the original post-converter state.
|
|||||||
|
|
||||||
filename = sys.argv[1]
|
filename = sys.argv[1]
|
||||||
A = h5py.File(filename)
|
A = h5py.File(filename)
|
||||||
for group in ['dmft_output','user_data']:
|
for group in ['dmft_output', 'user_data']:
|
||||||
if group in A: del(A[group])
|
if group in A:
|
||||||
|
del(A[group])
|
||||||
A.close()
|
A.close()
|
||||||
|
|
||||||
# Repack to reclaim disk space
|
# Repack to reclaim disk space
|
||||||
retcode = subprocess.call(["h5repack","-i%s"%filename, "-otemphgfrt.h5"])
|
retcode = subprocess.call(["h5repack", "-i%s" % filename, "-otemphgfrt.h5"])
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
print "h5repack failed!"
|
print "h5repack failed!"
|
||||||
else:
|
else:
|
||||||
subprocess.call(["mv","-f","temphgfrt.h5","%s"%filename])
|
subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % filename])
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,12 +18,10 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
|
|
||||||
from wien2k_converter import Wien2kConverter
|
from wien2k_converter import Wien2kConverter
|
||||||
from hk_converter import HkConverter
|
from hk_converter import HkConverter
|
||||||
from wannier90_converter import Wannier90Converter
|
from wannier90_converter import Wannier90Converter
|
||||||
|
|
||||||
__all__ =['Wien2kConverter','HkConverter','Wannier90Converter']
|
__all__ = ['Wien2kConverter', 'HkConverter', 'Wannier90Converter']
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,16 +18,17 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
from pytriqs.cmake_info import hdf5_command_path
|
from pytriqs.cmake_info import hdf5_command_path
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
|
|
||||||
|
|
||||||
class ConverterTools:
|
class ConverterTools:
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def read_fortran_file(self,filename,to_replace):
|
def read_fortran_file(self, filename, to_replace):
|
||||||
"""
|
"""
|
||||||
Returns a generator that yields all numbers in the Fortran file as float, with possible replacements.
|
Returns a generator that yields all numbers in the Fortran file as float, with possible replacements.
|
||||||
|
|
||||||
@ -37,7 +38,7 @@ class ConverterTools:
|
|||||||
Name of Fortran-produced file.
|
Name of Fortran-produced file.
|
||||||
to_replace : dict of str:str
|
to_replace : dict of str:str
|
||||||
Dictionary defining old_char:new_char.
|
Dictionary defining old_char:new_char.
|
||||||
|
|
||||||
Yields
|
Yields
|
||||||
------
|
------
|
||||||
string
|
string
|
||||||
@ -46,11 +47,13 @@ class ConverterTools:
|
|||||||
"""
|
"""
|
||||||
import os.path
|
import os.path
|
||||||
import string
|
import string
|
||||||
if not(os.path.exists(filename)) : raise IOError, "File %s does not exist."%filename
|
if not(os.path.exists(filename)):
|
||||||
for line in open(filename,'r') :
|
raise IOError, "File %s does not exist." % filename
|
||||||
for old,new in to_replace.iteritems(): line = line.replace(old,new)
|
for line in open(filename, 'r'):
|
||||||
for x in line.split(): yield string.atof(x)
|
for old, new in to_replace.iteritems():
|
||||||
|
line = line.replace(old, new)
|
||||||
|
for x in line.split():
|
||||||
|
yield string.atof(x)
|
||||||
|
|
||||||
def repack(self):
|
def repack(self):
|
||||||
"""
|
"""
|
||||||
@ -65,17 +68,18 @@ class ConverterTools:
|
|||||||
|
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
mpi.report("Repacking the file %s"%self.hdf_file)
|
return
|
||||||
|
mpi.report("Repacking the file %s" % self.hdf_file)
|
||||||
|
|
||||||
retcode = subprocess.call([hdf5_command_path+"/h5repack","-i%s"%self.hdf_file,"-otemphgfrt.h5"])
|
retcode = subprocess.call(
|
||||||
|
[hdf5_command_path + "/h5repack", "-i%s" % self.hdf_file, "-otemphgfrt.h5"])
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
mpi.report("h5repack failed!")
|
mpi.report("h5repack failed!")
|
||||||
else:
|
else:
|
||||||
subprocess.call(["mv","-f","temphgfrt.h5","%s"%self.hdf_file])
|
subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % self.hdf_file])
|
||||||
|
|
||||||
|
|
||||||
def det_shell_equivalence(self,corr_shells):
|
def det_shell_equivalence(self, corr_shells):
|
||||||
"""
|
"""
|
||||||
Determine the equivalence of correlated shells.
|
Determine the equivalence of correlated shells.
|
||||||
|
|
||||||
@ -83,7 +87,7 @@ class ConverterTools:
|
|||||||
----------
|
----------
|
||||||
corr_shells : list of dicts
|
corr_shells : list of dicts
|
||||||
See documentation of necessary hdf5 elements.
|
See documentation of necessary hdf5 elements.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
n_inequiv_shells : integer
|
n_inequiv_shells : integer
|
||||||
@ -105,19 +109,19 @@ class ConverterTools:
|
|||||||
n_inequiv_shells = 1
|
n_inequiv_shells = 1
|
||||||
|
|
||||||
if len(corr_shells) > 1:
|
if len(corr_shells) > 1:
|
||||||
inequiv_sort = [ corr_shells[0]['sort'] ]
|
inequiv_sort = [corr_shells[0]['sort']]
|
||||||
inequiv_l = [ corr_shells[0]['l'] ]
|
inequiv_l = [corr_shells[0]['l']]
|
||||||
for i in range(len(corr_shells)-1):
|
for i in range(len(corr_shells) - 1):
|
||||||
is_equiv = False
|
is_equiv = False
|
||||||
for j in range(n_inequiv_shells):
|
for j in range(n_inequiv_shells):
|
||||||
if (inequiv_sort[j]==corr_shells[i+1]['sort']) and (inequiv_l[j]==corr_shells[i+1]['l']):
|
if (inequiv_sort[j] == corr_shells[i + 1]['sort']) and (inequiv_l[j] == corr_shells[i + 1]['l']):
|
||||||
is_equiv = True
|
is_equiv = True
|
||||||
corr_to_inequiv[i+1] = j
|
corr_to_inequiv[i + 1] = j
|
||||||
if is_equiv==False:
|
if is_equiv == False:
|
||||||
corr_to_inequiv[i+1] = n_inequiv_shells
|
corr_to_inequiv[i + 1] = n_inequiv_shells
|
||||||
n_inequiv_shells += 1
|
n_inequiv_shells += 1
|
||||||
inequiv_sort.append( corr_shells[i+1]['sort'] )
|
inequiv_sort.append(corr_shells[i + 1]['sort'])
|
||||||
inequiv_l.append( corr_shells[i+1]['l'] )
|
inequiv_l.append(corr_shells[i + 1]['l'])
|
||||||
inequiv_to_corr.append( i+1 )
|
inequiv_to_corr.append(i + 1)
|
||||||
|
|
||||||
return n_inequiv_shells, corr_to_inequiv, inequiv_to_corr
|
return n_inequiv_shells, corr_to_inequiv, inequiv_to_corr
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,7 +18,7 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
|
|
||||||
from types import *
|
from types import *
|
||||||
import numpy
|
import numpy
|
||||||
@ -27,12 +27,13 @@ import pytriqs.utility.mpi as mpi
|
|||||||
from math import sqrt
|
from math import sqrt
|
||||||
from converter_tools import *
|
from converter_tools import *
|
||||||
|
|
||||||
|
|
||||||
class HkConverter(ConverterTools):
|
class HkConverter(ConverterTools):
|
||||||
"""
|
"""
|
||||||
Conversion from general H(k) file to an hdf5 file that can be used as input for the SumKDFT class.
|
Conversion from general H(k) file to an hdf5 file that can be used as input for the SumKDFT class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, filename, hdf_filename = None, dft_subgrp = 'dft_input', symmcorr_subgrp = 'dft_symmcorr_input', repacking = False):
|
def __init__(self, filename, hdf_filename=None, dft_subgrp='dft_input', symmcorr_subgrp='dft_symmcorr_input', repacking=False):
|
||||||
"""
|
"""
|
||||||
Initialise the class.
|
Initialise the class.
|
||||||
|
|
||||||
@ -49,24 +50,25 @@ class HkConverter(ConverterTools):
|
|||||||
The group is actually empty; it is just included for compatibility.
|
The group is actually empty; it is just included for compatibility.
|
||||||
repacking : boolean, optional
|
repacking : boolean, optional
|
||||||
Does the hdf5 archive need to be repacked to save space?
|
Does the hdf5 archive need to be repacked to save space?
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert type(filename)==StringType,"HkConverter: filename must be a filename."
|
assert type(
|
||||||
if hdf_filename is None: hdf_filename = filename+'.h5'
|
filename) == StringType, "HkConverter: filename must be a filename."
|
||||||
|
if hdf_filename is None:
|
||||||
|
hdf_filename = filename + '.h5'
|
||||||
self.hdf_file = hdf_filename
|
self.hdf_file = hdf_filename
|
||||||
self.dft_file = filename
|
self.dft_file = filename
|
||||||
self.dft_subgrp = dft_subgrp
|
self.dft_subgrp = dft_subgrp
|
||||||
self.symmcorr_subgrp = symmcorr_subgrp
|
self.symmcorr_subgrp = symmcorr_subgrp
|
||||||
self.fortran_to_replace = {'D':'E', '(':' ', ')':' ', ',':' '}
|
self.fortran_to_replace = {'D': 'E', '(': ' ', ')': ' ', ',': ' '}
|
||||||
|
|
||||||
# Checks if h5 file is there and repacks it if wanted:
|
# Checks if h5 file is there and repacks it if wanted:
|
||||||
import os.path
|
import os.path
|
||||||
if (os.path.exists(self.hdf_file) and repacking):
|
if (os.path.exists(self.hdf_file) and repacking):
|
||||||
ConverterTools.repack(self)
|
ConverterTools.repack(self)
|
||||||
|
|
||||||
|
def convert_dft_input(self, first_real_part_matrix=True, only_upper_triangle=False, weights_in_file=False):
|
||||||
def convert_dft_input(self, first_real_part_matrix = True, only_upper_triangle = False, weights_in_file = False):
|
|
||||||
"""
|
"""
|
||||||
Reads the appropriate files and stores the data for the dft_subgrp in the hdf5 archive.
|
Reads the appropriate files and stores the data for the dft_subgrp in the hdf5 archive.
|
||||||
|
|
||||||
@ -80,71 +82,97 @@ class HkConverter(ConverterTools):
|
|||||||
Are the k-point weights to be read in?
|
Are the k-point weights to be read in?
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Read and write only on the master node
|
|
||||||
if not (mpi.is_master_node()): return
|
|
||||||
mpi.report("Reading input from %s..."%self.dft_file)
|
|
||||||
|
|
||||||
# R is a generator : each R.Next() will return the next number in the file
|
# Read and write only on the master node
|
||||||
R = ConverterTools.read_fortran_file(self,self.dft_file,self.fortran_to_replace)
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
mpi.report("Reading input from %s..." % self.dft_file)
|
||||||
|
|
||||||
|
# R is a generator : each R.Next() will return the next number in the
|
||||||
|
# file
|
||||||
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, self.dft_file, self.fortran_to_replace)
|
||||||
try:
|
try:
|
||||||
energy_unit = 1.0 # the energy conversion factor is 1.0, we assume eV in files
|
# the energy conversion factor is 1.0, we assume eV in files
|
||||||
n_k = int(R.next()) # read the number of k points
|
energy_unit = 1.0
|
||||||
k_dep_projection = 0
|
# read the number of k points
|
||||||
|
n_k = int(R.next())
|
||||||
|
k_dep_projection = 0
|
||||||
SP = 0 # no spin-polarision
|
SP = 0 # no spin-polarision
|
||||||
SO = 0 # no spin-orbit
|
SO = 0 # no spin-orbit
|
||||||
charge_below = 0.0 # total charge below energy window is set to 0
|
# total charge below energy window is set to 0
|
||||||
density_required = R.next() # density required, for setting the chemical potential
|
charge_below = 0.0
|
||||||
|
# density required, for setting the chemical potential
|
||||||
|
density_required = R.next()
|
||||||
symm_op = 0 # No symmetry groups for the k-sum
|
symm_op = 0 # No symmetry groups for the k-sum
|
||||||
|
|
||||||
# the information on the non-correlated shells is needed for defining dimension of matrices:
|
# the information on the non-correlated shells is needed for
|
||||||
n_shells = int(R.next()) # number of shells considered in the Wanniers
|
# defining dimension of matrices:
|
||||||
# corresponds to index R in formulas
|
# number of shells considered in the Wanniers
|
||||||
|
n_shells = int(R.next())
|
||||||
|
# corresponds to index R in formulas
|
||||||
# now read the information about the shells (atom, sort, l, dim):
|
# now read the information about the shells (atom, sort, l, dim):
|
||||||
shell_entries = ['atom', 'sort', 'l', 'dim']
|
shell_entries = ['atom', 'sort', 'l', 'dim']
|
||||||
shells = [ {name: int(val) for name, val in zip(shell_entries, R)} for ish in range(n_shells) ]
|
shells = [{name: int(val) for name, val in zip(
|
||||||
|
shell_entries, R)} for ish in range(n_shells)]
|
||||||
|
|
||||||
n_corr_shells = int(R.next()) # number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
||||||
# corresponds to index R in formulas
|
n_corr_shells = int(R.next())
|
||||||
# now read the information about the shells (atom, sort, l, dim, SO flag, irep):
|
# corresponds to index R in formulas
|
||||||
|
# now read the information about the shells (atom, sort, l, dim, SO
|
||||||
|
# flag, irep):
|
||||||
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
||||||
corr_shells = [ {name: int(val) for name, val in zip(corr_shell_entries, R)} for icrsh in range(n_corr_shells) ]
|
corr_shells = [{name: int(val) for name, val in zip(
|
||||||
|
corr_shell_entries, R)} for icrsh in range(n_corr_shells)]
|
||||||
|
|
||||||
# determine the number of inequivalent correlated shells and maps, needed for further reading
|
# determine the number of inequivalent correlated shells and maps,
|
||||||
[n_inequiv_shells, corr_to_inequiv, inequiv_to_corr] = ConverterTools.det_shell_equivalence(self,corr_shells)
|
# needed for further reading
|
||||||
|
[n_inequiv_shells, corr_to_inequiv,
|
||||||
|
inequiv_to_corr] = ConverterTools.det_shell_equivalence(self, corr_shells)
|
||||||
|
|
||||||
use_rotations = 0
|
use_rotations = 0
|
||||||
rot_mat = [numpy.identity(corr_shells[icrsh]['dim'],numpy.complex_) for icrsh in range(n_corr_shells)]
|
rot_mat = [numpy.identity(
|
||||||
|
corr_shells[icrsh]['dim'], numpy.complex_) for icrsh in range(n_corr_shells)]
|
||||||
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
||||||
|
|
||||||
# Representative representations are read from file
|
# Representative representations are read from file
|
||||||
n_reps = [1 for i in range(n_inequiv_shells)]
|
n_reps = [1 for i in range(n_inequiv_shells)]
|
||||||
dim_reps = [0 for i in range(n_inequiv_shells)]
|
dim_reps = [0 for i in range(n_inequiv_shells)]
|
||||||
T = []
|
T = []
|
||||||
for ish in range(n_inequiv_shells):
|
for ish in range(n_inequiv_shells):
|
||||||
n_reps[ish] = int(R.next()) # number of representatives ("subsets"), e.g. t2g and eg
|
# number of representatives ("subsets"), e.g. t2g and eg
|
||||||
dim_reps[ish] = [int(R.next()) for i in range(n_reps[ish])] # dimensions of the subsets
|
n_reps[ish] = int(R.next())
|
||||||
|
dim_reps[ish] = [int(R.next()) for i in range(
|
||||||
|
n_reps[ish])] # dimensions of the subsets
|
||||||
|
|
||||||
# The transformation matrix:
|
# The transformation matrix:
|
||||||
# is of dimension 2l+1, it is taken to be standard d (as in Wien2k)
|
# is of dimension 2l+1, it is taken to be standard d (as in
|
||||||
ll = 2*corr_shells[inequiv_to_corr[ish]]['l']+1
|
# Wien2k)
|
||||||
|
ll = 2 * corr_shells[inequiv_to_corr[ish]]['l'] + 1
|
||||||
lmax = ll * (corr_shells[inequiv_to_corr[ish]]['SO'] + 1)
|
lmax = ll * (corr_shells[inequiv_to_corr[ish]]['SO'] + 1)
|
||||||
T.append(numpy.zeros([lmax,lmax],numpy.complex_))
|
T.append(numpy.zeros([lmax, lmax], numpy.complex_))
|
||||||
|
|
||||||
T[ish] = numpy.array([[0.0, 0.0, 1.0, 0.0, 0.0],
|
T[ish] = numpy.array([[0.0, 0.0, 1.0, 0.0, 0.0],
|
||||||
[1.0/sqrt(2.0), 0.0, 0.0, 0.0, 1.0/sqrt(2.0)],
|
[1.0 / sqrt(2.0), 0.0, 0.0,
|
||||||
[-1.0/sqrt(2.0), 0.0, 0.0, 0.0, 1.0/sqrt(2.0)],
|
0.0, 1.0 / sqrt(2.0)],
|
||||||
[0.0, 1.0/sqrt(2.0), 0.0, -1.0/sqrt(2.0), 0.0],
|
[-1.0 / sqrt(2.0), 0.0, 0.0,
|
||||||
[0.0, 1.0/sqrt(2.0), 0.0, 1.0/sqrt(2.0), 0.0]])
|
0.0, 1.0 / sqrt(2.0)],
|
||||||
|
[0.0, 1.0 /
|
||||||
|
sqrt(2.0), 0.0, -1.0 / sqrt(2.0), 0.0],
|
||||||
|
[0.0, 1.0 / sqrt(2.0), 0.0, 1.0 / sqrt(2.0), 0.0]])
|
||||||
|
|
||||||
# Spin blocks to be read:
|
# Spin blocks to be read:
|
||||||
n_spin_blocs = SP + 1 - SO # number of spins to read for Norbs and Ham, NOT Projectors
|
# number of spins to read for Norbs and Ham, NOT Projectors
|
||||||
|
n_spin_blocs = SP + 1 - SO
|
||||||
# define the number of n_orbitals for all k points: it is the number of total bands and independent of k!
|
|
||||||
n_orbitals = numpy.ones([n_k,n_spin_blocs],numpy.int) * sum([ sh['dim'] for sh in shells ])
|
# define the number of n_orbitals for all k points: it is the
|
||||||
|
# number of total bands and independent of k!
|
||||||
|
n_orbitals = numpy.ones(
|
||||||
|
[n_k, n_spin_blocs], numpy.int) * sum([sh['dim'] for sh in shells])
|
||||||
|
|
||||||
# Initialise the projectors:
|
# Initialise the projectors:
|
||||||
proj_mat = numpy.zeros([n_k,n_spin_blocs,n_corr_shells,max([crsh['dim'] for crsh in corr_shells]),max(n_orbitals)],numpy.complex_)
|
proj_mat = numpy.zeros([n_k, n_spin_blocs, n_corr_shells, max(
|
||||||
|
[crsh['dim'] for crsh in corr_shells]), max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
# Read the projectors from the file:
|
# Read the projectors from the file:
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
@ -155,76 +183,90 @@ class HkConverter(ConverterTools):
|
|||||||
offset = 0
|
offset = 0
|
||||||
n_orb = 0
|
n_orb = 0
|
||||||
for ish in range(n_shells):
|
for ish in range(n_shells):
|
||||||
if (n_orb==0):
|
if (n_orb == 0):
|
||||||
if (shells[ish]['atom']==corr_shells[icrsh]['atom']) and (shells[ish]['sort']==corr_shells[icrsh]['sort']):
|
if (shells[ish]['atom'] == corr_shells[icrsh]['atom']) and (shells[ish]['sort'] == corr_shells[icrsh]['sort']):
|
||||||
n_orb = corr_shells[icrsh]['dim']
|
n_orb = corr_shells[icrsh]['dim']
|
||||||
else:
|
else:
|
||||||
offset += shells[ish]['dim']
|
offset += shells[ish]['dim']
|
||||||
|
|
||||||
proj_mat[ik,isp,icrsh,0:n_orb,offset:offset+n_orb] = numpy.identity(n_orb)
|
proj_mat[ik, isp, icrsh, 0:n_orb,
|
||||||
|
offset:offset + n_orb] = numpy.identity(n_orb)
|
||||||
|
|
||||||
# now define the arrays for weights and hopping ...
|
# now define the arrays for weights and hopping ...
|
||||||
bz_weights = numpy.ones([n_k],numpy.float_)/ float(n_k) # w(k_index), default normalisation
|
# w(k_index), default normalisation
|
||||||
hopping = numpy.zeros([n_k,n_spin_blocs,max(n_orbitals),max(n_orbitals)],numpy.complex_)
|
bz_weights = numpy.ones([n_k], numpy.float_) / float(n_k)
|
||||||
|
hopping = numpy.zeros([n_k, n_spin_blocs, max(
|
||||||
|
n_orbitals), max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
if (weights_in_file):
|
if (weights_in_file):
|
||||||
# weights in the file
|
# weights in the file
|
||||||
for ik in range(n_k) : bz_weights[ik] = R.next()
|
for ik in range(n_k):
|
||||||
|
bz_weights[ik] = R.next()
|
||||||
|
|
||||||
# if the sum over spins is in the weights, take it out again!!
|
# if the sum over spins is in the weights, take it out again!!
|
||||||
sm = sum(bz_weights)
|
sm = sum(bz_weights)
|
||||||
bz_weights[:] /= sm
|
bz_weights[:] /= sm
|
||||||
|
|
||||||
# Grab the H
|
# Grab the H
|
||||||
for isp in range(n_spin_blocs):
|
for isp in range(n_spin_blocs):
|
||||||
for ik in range(n_k) :
|
for ik in range(n_k):
|
||||||
n_orb = n_orbitals[ik,isp]
|
n_orb = n_orbitals[ik, isp]
|
||||||
|
|
||||||
|
# first read all real components for given k, then read
|
||||||
|
# imaginary parts
|
||||||
|
if (first_real_part_matrix):
|
||||||
|
|
||||||
if (first_real_part_matrix): # first read all real components for given k, then read imaginary parts
|
|
||||||
|
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
if (only_upper_triangle):
|
if (only_upper_triangle):
|
||||||
istart = i
|
istart = i
|
||||||
else:
|
else:
|
||||||
istart = 0
|
istart = 0
|
||||||
for j in range(istart,n_orb):
|
for j in range(istart, n_orb):
|
||||||
hopping[ik,isp,i,j] = R.next()
|
hopping[ik, isp, i, j] = R.next()
|
||||||
|
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
if (only_upper_triangle):
|
if (only_upper_triangle):
|
||||||
istart = i
|
istart = i
|
||||||
else:
|
else:
|
||||||
istart = 0
|
istart = 0
|
||||||
for j in range(istart,n_orb):
|
for j in range(istart, n_orb):
|
||||||
hopping[ik,isp,i,j] += R.next() * 1j
|
hopping[ik, isp, i, j] += R.next() * 1j
|
||||||
if ((only_upper_triangle)and(i!=j)): hopping[ik,isp,j,i] = hopping[ik,isp,i,j].conjugate()
|
if ((only_upper_triangle)and(i != j)):
|
||||||
|
hopping[ik, isp, j, i] = hopping[
|
||||||
else: # read (real,im) tuple
|
ik, isp, i, j].conjugate()
|
||||||
|
|
||||||
|
else: # read (real,im) tuple
|
||||||
|
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
if (only_upper_triangle):
|
if (only_upper_triangle):
|
||||||
istart = i
|
istart = i
|
||||||
else:
|
else:
|
||||||
istart = 0
|
istart = 0
|
||||||
for j in range(istart,n_orb):
|
for j in range(istart, n_orb):
|
||||||
hopping[ik,isp,i,j] = R.next()
|
hopping[ik, isp, i, j] = R.next()
|
||||||
hopping[ik,isp,i,j] += R.next() * 1j
|
hopping[ik, isp, i, j] += R.next() * 1j
|
||||||
|
|
||||||
if ((only_upper_triangle)and(i!=j)): hopping[ik,isp,j,i] = hopping[ik,isp,i,j].conjugate()
|
if ((only_upper_triangle)and(i != j)):
|
||||||
|
hopping[ik, isp, j, i] = hopping[
|
||||||
|
ik, isp, i, j].conjugate()
|
||||||
# keep some things that we need for reading parproj:
|
# keep some things that we need for reading parproj:
|
||||||
things_to_set = ['n_shells','shells','n_corr_shells','corr_shells','n_spin_blocs','n_orbitals','n_k','SO','SP','energy_unit']
|
things_to_set = ['n_shells', 'shells', 'n_corr_shells', 'corr_shells',
|
||||||
for it in things_to_set: setattr(self,it,locals()[it])
|
'n_spin_blocs', 'n_orbitals', 'n_k', 'SO', 'SP', 'energy_unit']
|
||||||
except StopIteration : # a more explicit error if the file is corrupted.
|
for it in things_to_set:
|
||||||
|
setattr(self, it, locals()[it])
|
||||||
|
except StopIteration: # a more explicit error if the file is corrupted.
|
||||||
raise "HK Converter : reading file dft_file failed!"
|
raise "HK Converter : reading file dft_file failed!"
|
||||||
|
|
||||||
R.close()
|
R.close()
|
||||||
|
|
||||||
# Save to the HDF5:
|
# Save to the HDF5:
|
||||||
ar = HDFArchive(self.hdf_file,'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.dft_subgrp in ar): ar.create_group(self.dft_subgrp)
|
if not (self.dft_subgrp in ar):
|
||||||
things_to_save = ['energy_unit','n_k','k_dep_projection','SP','SO','charge_below','density_required',
|
ar.create_group(self.dft_subgrp)
|
||||||
'symm_op','n_shells','shells','n_corr_shells','corr_shells','use_rotations','rot_mat',
|
things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
||||||
'rot_mat_time_inv','n_reps','dim_reps','T','n_orbitals','proj_mat','bz_weights','hopping',
|
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
||||||
|
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
||||||
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr']
|
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr']
|
||||||
for it in things_to_save: ar[self.dft_subgrp][it] = locals()[it]
|
for it in things_to_save:
|
||||||
|
ar[self.dft_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
@ -91,7 +91,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
self.dft_subgrp = dft_subgrp
|
self.dft_subgrp = dft_subgrp
|
||||||
self.symmcorr_subgrp = symmcorr_subgrp
|
self.symmcorr_subgrp = symmcorr_subgrp
|
||||||
self.fortran_to_replace = {'D': 'E'}
|
self.fortran_to_replace = {'D': 'E'}
|
||||||
# threshold below which matrix elements from wannier90 should be considered equal
|
# threshold below which matrix elements from wannier90 should be
|
||||||
|
# considered equal
|
||||||
self._w90zero = 2.e-6
|
self._w90zero = 2.e-6
|
||||||
|
|
||||||
# Checks if h5 file is there and repacks it if wanted:
|
# Checks if h5 file is there and repacks it if wanted:
|
||||||
@ -114,12 +115,14 @@ class Wannier90Converter(ConverterTools):
|
|||||||
return
|
return
|
||||||
mpi.report("Reading input from %s..." % self.inp_file)
|
mpi.report("Reading input from %s..." % self.inp_file)
|
||||||
|
|
||||||
# R is a generator : each R.Next() will return the next number in the file
|
# R is a generator : each R.Next() will return the next number in the
|
||||||
|
# file
|
||||||
R = ConverterTools.read_fortran_file(
|
R = ConverterTools.read_fortran_file(
|
||||||
self, self.inp_file, self.fortran_to_replace)
|
self, self.inp_file, self.fortran_to_replace)
|
||||||
shell_entries = ['atom', 'sort', 'l', 'dim']
|
shell_entries = ['atom', 'sort', 'l', 'dim']
|
||||||
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
||||||
# First, let's read the input file with the parameters needed for the conversion
|
# First, let's read the input file with the parameters needed for the
|
||||||
|
# conversion
|
||||||
try:
|
try:
|
||||||
# read k - point mesh generation option
|
# read k - point mesh generation option
|
||||||
kmesh_mode = int(R.next())
|
kmesh_mode = int(R.next())
|
||||||
@ -135,7 +138,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
# and the data will be copied from corr_shells into shells (see below)
|
# and the data will be copied from corr_shells into shells (see below)
|
||||||
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
||||||
n_corr_shells = int(R.next())
|
n_corr_shells = int(R.next())
|
||||||
# now read the information about the correlated shells (atom, sort, l, dim, SO flag, irep):
|
# now read the information about the correlated shells (atom, sort,
|
||||||
|
# l, dim, SO flag, irep):
|
||||||
corr_shells = [{name: int(val) for name, val in zip(
|
corr_shells = [{name: int(val) for name, val in zip(
|
||||||
corr_shell_entries, R)} for icrsh in range(n_corr_shells)]
|
corr_shell_entries, R)} for icrsh in range(n_corr_shells)]
|
||||||
except StopIteration: # a more explicit error if the file is corrupted.
|
except StopIteration: # a more explicit error if the file is corrupted.
|
||||||
@ -147,7 +151,7 @@ class Wannier90Converter(ConverterTools):
|
|||||||
# Set or derive some quantities
|
# Set or derive some quantities
|
||||||
# Wannier90 does not use symmetries to reduce the k-points
|
# Wannier90 does not use symmetries to reduce the k-points
|
||||||
# the following might change in future versions
|
# the following might change in future versions
|
||||||
symm_op = 0
|
symm_op = 0
|
||||||
# copy corr_shells into shells (see above)
|
# copy corr_shells into shells (see above)
|
||||||
n_shells = n_corr_shells
|
n_shells = n_corr_shells
|
||||||
shells = []
|
shells = []
|
||||||
@ -166,7 +170,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
mpi.report(
|
mpi.report(
|
||||||
"Total number of WFs expected in the correlated shells: %d" % dim_corr_shells)
|
"Total number of WFs expected in the correlated shells: %d" % dim_corr_shells)
|
||||||
|
|
||||||
# determine the number of inequivalent correlated shells and maps, needed for further processing
|
# determine the number of inequivalent correlated shells and maps,
|
||||||
|
# needed for further processing
|
||||||
n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(
|
n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(
|
||||||
self, corr_shells)
|
self, corr_shells)
|
||||||
mpi.report("Number of inequivalent shells: %d" % n_inequiv_shells)
|
mpi.report("Number of inequivalent shells: %d" % n_inequiv_shells)
|
||||||
@ -176,7 +181,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
mpi.report("Mapping: " + format(shells_map))
|
mpi.report("Mapping: " + format(shells_map))
|
||||||
|
|
||||||
# build the k-point mesh, if its size was given on input (kmesh_mode >= 0),
|
# build the k-point mesh, if its size was given on input (kmesh_mode >= 0),
|
||||||
# otherwise it is built according to the data in the hr file (see below)
|
# otherwise it is built according to the data in the hr file (see
|
||||||
|
# below)
|
||||||
if kmesh_mode >= 0:
|
if kmesh_mode >= 0:
|
||||||
n_k, k_mesh, bz_weights = self.kmesh_build(nki, kmesh_mode)
|
n_k, k_mesh, bz_weights = self.kmesh_build(nki, kmesh_mode)
|
||||||
self.n_k = n_k
|
self.n_k = n_k
|
||||||
@ -197,7 +203,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
# TODO: generalise to SP=1 (only partially done)
|
# TODO: generalise to SP=1 (only partially done)
|
||||||
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
||||||
|
|
||||||
# Second, let's read the file containing the Hamiltonian in WF basis produced by Wannier90
|
# Second, let's read the file containing the Hamiltonian in WF basis
|
||||||
|
# produced by Wannier90
|
||||||
for isp in range(n_spin):
|
for isp in range(n_spin):
|
||||||
# begin loop on isp
|
# begin loop on isp
|
||||||
|
|
||||||
@ -212,20 +219,24 @@ class Wannier90Converter(ConverterTools):
|
|||||||
mpi.report(
|
mpi.report(
|
||||||
"The Hamiltonian in MLWF basis is extracted from %s ..." % hr_file)
|
"The Hamiltonian in MLWF basis is extracted from %s ..." % hr_file)
|
||||||
nr, rvec, rdeg, nw, hamr = self.read_wannier90hr(hr_file)
|
nr, rvec, rdeg, nw, hamr = self.read_wannier90hr(hr_file)
|
||||||
# number of R vectors, their indices, their degeneracy, number of WFs, H(R)
|
# number of R vectors, their indices, their degeneracy, number of
|
||||||
|
# WFs, H(R)
|
||||||
mpi.report("... done: %d R vectors, %d WFs found" % (nr, nw))
|
mpi.report("... done: %d R vectors, %d WFs found" % (nr, nw))
|
||||||
|
|
||||||
if isp == 0:
|
if isp == 0:
|
||||||
# set or check some quantities that must be the same for both spins
|
# set or check some quantities that must be the same for both
|
||||||
|
# spins
|
||||||
self.nrpt = nr
|
self.nrpt = nr
|
||||||
|
|
||||||
# k-point grid: (if not defined before)
|
# k-point grid: (if not defined before)
|
||||||
if kmesh_mode == -1:
|
if kmesh_mode == -1:
|
||||||
# the size of the k-point mesh is determined from the largest R vector
|
# the size of the k-point mesh is determined from the
|
||||||
|
# largest R vector
|
||||||
nki = [2 * rvec[:, idir].max() + 1 for idir in range(3)]
|
nki = [2 * rvec[:, idir].max() + 1 for idir in range(3)]
|
||||||
# it will be the same as in the win only when nki is odd, because of the
|
# it will be the same as in the win only when nki is odd, because of the
|
||||||
# wannier90 convention: if we have nki k-points along the i-th direction,
|
# wannier90 convention: if we have nki k-points along the i-th direction,
|
||||||
# then we should get 2*(nki/2)+nki%2 R points along that direction
|
# then we should get 2*(nki/2)+nki%2 R points along that
|
||||||
|
# direction
|
||||||
n_k, k_mesh, bz_weights = self.kmesh_build(nki)
|
n_k, k_mesh, bz_weights = self.kmesh_build(nki)
|
||||||
self.n_k = n_k
|
self.n_k = n_k
|
||||||
self.k_mesh = k_mesh
|
self.k_mesh = k_mesh
|
||||||
@ -237,33 +248,41 @@ class Wannier90Converter(ConverterTools):
|
|||||||
self.nwfs = nw
|
self.nwfs = nw
|
||||||
# check that the total number of WFs makes sense
|
# check that the total number of WFs makes sense
|
||||||
if self.nwfs < dim_corr_shells:
|
if self.nwfs < dim_corr_shells:
|
||||||
mpi.report("ERROR: number of WFs in the file smaller than number of correlated orbitals!")
|
mpi.report(
|
||||||
|
"ERROR: number of WFs in the file smaller than number of correlated orbitals!")
|
||||||
elif self.nwfs > dim_corr_shells:
|
elif self.nwfs > dim_corr_shells:
|
||||||
# NOTE: correlated shells must appear before uncorrelated ones inside the file
|
# NOTE: correlated shells must appear before uncorrelated
|
||||||
|
# ones inside the file
|
||||||
mpi.report("Number of WFs larger than correlated orbitals:\n" +
|
mpi.report("Number of WFs larger than correlated orbitals:\n" +
|
||||||
"WFs from %d to %d treated as uncorrelated" % (dim_corr_shells + 1, self.nwfs))
|
"WFs from %d to %d treated as uncorrelated" % (dim_corr_shells + 1, self.nwfs))
|
||||||
else:
|
else:
|
||||||
mpi.report("Number of WFs equal to number of correlated orbitals")
|
mpi.report(
|
||||||
|
"Number of WFs equal to number of correlated orbitals")
|
||||||
|
|
||||||
# we assume spin up and spin down always have same total number of WFs
|
# we assume spin up and spin down always have same total number
|
||||||
|
# of WFs
|
||||||
n_orbitals = numpy.ones(
|
n_orbitals = numpy.ones(
|
||||||
[self.n_k, n_spin], numpy.int) * self.nwfs
|
[self.n_k, n_spin], numpy.int) * self.nwfs
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# consistency check between the _up and _down file contents
|
# consistency check between the _up and _down file contents
|
||||||
if nr != self.nrpt:
|
if nr != self.nrpt:
|
||||||
mpi.report("Different number of R vectors for spin-up/spin-down!")
|
mpi.report(
|
||||||
|
"Different number of R vectors for spin-up/spin-down!")
|
||||||
if nw != self.nwfs:
|
if nw != self.nwfs:
|
||||||
mpi.report("Different number of WFs for spin-up/spin-down!")
|
mpi.report(
|
||||||
|
"Different number of WFs for spin-up/spin-down!")
|
||||||
|
|
||||||
hamr_full.append(hamr)
|
hamr_full.append(hamr)
|
||||||
# FIXME: when do we actually need deepcopy()?
|
# FIXME: when do we actually need deepcopy()?
|
||||||
# hamr_full.append(deepcopy(hamr))
|
# hamr_full.append(deepcopy(hamr))
|
||||||
|
|
||||||
for ir in range(nr):
|
for ir in range(nr):
|
||||||
# checks if the Hamiltonian is real (it should, if wannierisation worked fine)
|
# checks if the Hamiltonian is real (it should, if
|
||||||
|
# wannierisation worked fine)
|
||||||
if numpy.abs((hamr[ir].imag.max()).max()) > self._w90zero:
|
if numpy.abs((hamr[ir].imag.max()).max()) > self._w90zero:
|
||||||
mpi.report("H(R) has large complex components at R %d" % ir)
|
mpi.report(
|
||||||
|
"H(R) has large complex components at R %d" % ir)
|
||||||
# copy the R=0 block corresponding to the correlated shells
|
# copy the R=0 block corresponding to the correlated shells
|
||||||
# into another variable (needed later for finding rot_mat)
|
# into another variable (needed later for finding rot_mat)
|
||||||
if rvec[ir, 0] == 0 and rvec[ir, 1] == 0 and rvec[ir, 2] == 0:
|
if rvec[ir, 0] == 0 and rvec[ir, 1] == 0 and rvec[ir, 2] == 0:
|
||||||
@ -273,17 +292,22 @@ class Wannier90Converter(ConverterTools):
|
|||||||
if not numpy.allclose(ham_corr0.transpose().conjugate(), ham_corr0, atol=self._w90zero, rtol=1.e-9):
|
if not numpy.allclose(ham_corr0.transpose().conjugate(), ham_corr0, atol=self._w90zero, rtol=1.e-9):
|
||||||
raise ValueError("H(R=0) matrix is not Hermitian!")
|
raise ValueError("H(R=0) matrix is not Hermitian!")
|
||||||
|
|
||||||
# find rot_mat symmetries by diagonalising the on-site Hamiltonian of the first spin
|
# find rot_mat symmetries by diagonalising the on-site Hamiltonian
|
||||||
|
# of the first spin
|
||||||
if isp == 0:
|
if isp == 0:
|
||||||
use_rotations, rot_mat = self.find_rot_mat(n_corr_shells, corr_shells, shells_map, ham_corr0)
|
use_rotations, rot_mat = self.find_rot_mat(
|
||||||
|
n_corr_shells, corr_shells, shells_map, ham_corr0)
|
||||||
else:
|
else:
|
||||||
# consistency check
|
# consistency check
|
||||||
use_rotations_, rot_mat_ = self.find_rot_mat(n_corr_shells, corr_shells, shells_map, ham_corr0)
|
use_rotations_, rot_mat_ = self.find_rot_mat(
|
||||||
|
n_corr_shells, corr_shells, shells_map, ham_corr0)
|
||||||
if (use_rotations and not use_rotations_):
|
if (use_rotations and not use_rotations_):
|
||||||
mpi.report("Rotations cannot be used for spin component n. %d" % isp)
|
mpi.report(
|
||||||
|
"Rotations cannot be used for spin component n. %d" % isp)
|
||||||
for icrsh in range(n_corr_shells):
|
for icrsh in range(n_corr_shells):
|
||||||
if not numpy.allclose(rot_mat_[icrsh], rot_mat[icrsh], atol=self._w90zero, rtol=1.e-15):
|
if not numpy.allclose(rot_mat_[icrsh], rot_mat[icrsh], atol=self._w90zero, rtol=1.e-15):
|
||||||
mpi.report("Rotations for spin component n. %d do not match!" % isp)
|
mpi.report(
|
||||||
|
"Rotations for spin component n. %d do not match!" % isp)
|
||||||
# end loop on isp
|
# end loop on isp
|
||||||
|
|
||||||
mpi.report("The k-point grid has dimensions: %d, %d, %d" % tuple(nki))
|
mpi.report("The k-point grid has dimensions: %d, %d, %d" % tuple(nki))
|
||||||
@ -292,11 +316,14 @@ class Wannier90Converter(ConverterTools):
|
|||||||
bz_weights = 0.5 * bz_weights
|
bz_weights = 0.5 * bz_weights
|
||||||
|
|
||||||
# Third, compute the hoppings in reciprocal space
|
# Third, compute the hoppings in reciprocal space
|
||||||
hopping = numpy.zeros([self.n_k, n_spin, numpy.max(n_orbitals), numpy.max(n_orbitals)], numpy.complex_)
|
hopping = numpy.zeros([self.n_k, n_spin, numpy.max(
|
||||||
|
n_orbitals), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
for isp in range(n_spin):
|
for isp in range(n_spin):
|
||||||
# make Fourier transform H(R) -> H(k) : it can be done one spin at a time
|
# make Fourier transform H(R) -> H(k) : it can be done one spin at
|
||||||
|
# a time
|
||||||
hamk = self.fourier_ham(self.nwfs, hamr_full[isp])
|
hamk = self.fourier_ham(self.nwfs, hamr_full[isp])
|
||||||
# copy the H(k) in the right place of hoppings... is there a better way to do this??
|
# copy the H(k) in the right place of hoppings... is there a better
|
||||||
|
# way to do this??
|
||||||
for ik in range(self.n_k):
|
for ik in range(self.n_k):
|
||||||
#hopping[ik,isp,:,:] = deepcopy(hamk[ik][:,:])*energy_unit
|
#hopping[ik,isp,:,:] = deepcopy(hamk[ik][:,:])*energy_unit
|
||||||
hopping[ik, isp, :, :] = hamk[ik][:, :] * energy_unit
|
hopping[ik, isp, :, :] = hamk[ik][:, :] * energy_unit
|
||||||
@ -309,7 +336,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
# Projectors simply consist in identity matrix blocks selecting those MLWFs that
|
# Projectors simply consist in identity matrix blocks selecting those MLWFs that
|
||||||
# correspond to the specific correlated shell indexed by icrsh.
|
# correspond to the specific correlated shell indexed by icrsh.
|
||||||
# NOTE: we assume that the correlated orbitals appear at the beginning of the H(R)
|
# NOTE: we assume that the correlated orbitals appear at the beginning of the H(R)
|
||||||
# file and that the ordering of MLWFs matches the corr_shell info from the input.
|
# file and that the ordering of MLWFs matches the corr_shell info from
|
||||||
|
# the input.
|
||||||
for icrsh in range(n_corr_shells):
|
for icrsh in range(n_corr_shells):
|
||||||
norb = corr_shells[icrsh]['dim']
|
norb = corr_shells[icrsh]['dim']
|
||||||
proj_mat[:, :, icrsh, 0:norb, iorb:iorb +
|
proj_mat[:, :, icrsh, 0:norb, iorb:iorb +
|
||||||
@ -320,7 +348,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.dft_subgrp in ar):
|
if not (self.dft_subgrp in ar):
|
||||||
ar.create_group(self.dft_subgrp)
|
ar.create_group(self.dft_subgrp)
|
||||||
# The subgroup containing the data. If it does not exist, it is created. If it exists, the data is overwritten!
|
# The subgroup containing the data. If it does not exist, it is
|
||||||
|
# created. If it exists, the data is overwritten!
|
||||||
things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
||||||
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
||||||
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
||||||
@ -373,7 +402,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
mpi.report("Could not read number of WFs or R vectors")
|
mpi.report("Could not read number of WFs or R vectors")
|
||||||
|
|
||||||
# allocate arrays to save the R vector indexes and degeneracies and the Hamiltonian
|
# allocate arrays to save the R vector indexes and degeneracies and the
|
||||||
|
# Hamiltonian
|
||||||
rvec_idx = numpy.zeros((nrpt, 3), dtype=int)
|
rvec_idx = numpy.zeros((nrpt, 3), dtype=int)
|
||||||
rvec_deg = numpy.zeros(nrpt, dtype=int)
|
rvec_deg = numpy.zeros(nrpt, dtype=int)
|
||||||
h_of_r = [numpy.zeros((num_wf, num_wf), dtype=numpy.complex_)
|
h_of_r = [numpy.zeros((num_wf, num_wf), dtype=numpy.complex_)
|
||||||
@ -383,7 +413,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
currpos = 2
|
currpos = 2
|
||||||
try:
|
try:
|
||||||
ir = 0
|
ir = 0
|
||||||
# read the degeneracy of the R vectors (needed for the Fourier transform)
|
# read the degeneracy of the R vectors (needed for the Fourier
|
||||||
|
# transform)
|
||||||
while ir < nrpt:
|
while ir < nrpt:
|
||||||
currpos += 1
|
currpos += 1
|
||||||
for x in hr_data[currpos].split():
|
for x in hr_data[currpos].split():
|
||||||
@ -540,7 +571,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
kmesh = numpy.zeros((nkpt, 3), dtype=float)
|
kmesh = numpy.zeros((nkpt, 3), dtype=float)
|
||||||
ii = 0
|
ii = 0
|
||||||
for ix, iy, iz in product(range(msize[0]), range(msize[1]), range(msize[2])):
|
for ix, iy, iz in product(range(msize[0]), range(msize[1]), range(msize[2])):
|
||||||
kmesh[ii, :] = [float(ix) / msize[0], float(iy) / msize[1], float(iz) / msize[2]]
|
kmesh[ii, :] = [float(ix) / msize[0], float(iy) /
|
||||||
|
msize[1], float(iz) / msize[2]]
|
||||||
ii += 1
|
ii += 1
|
||||||
# weight is equal for all k-points because wannier90 uses uniform grid on whole BZ
|
# weight is equal for all k-points because wannier90 uses uniform grid on whole BZ
|
||||||
# (normalization is always 1 and takes into account spin degeneracy)
|
# (normalization is always 1 and takes into account spin degeneracy)
|
||||||
@ -568,11 +600,13 @@ class Wannier90Converter(ConverterTools):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
twopi = 2 * numpy.pi
|
twopi = 2 * numpy.pi
|
||||||
h_of_k = [numpy.zeros((norb, norb), dtype=numpy.complex_) for ik in range(self.n_k)]
|
h_of_k = [numpy.zeros((norb, norb), dtype=numpy.complex_)
|
||||||
|
for ik in range(self.n_k)]
|
||||||
ridx = numpy.array(range(self.nrpt))
|
ridx = numpy.array(range(self.nrpt))
|
||||||
for ik, ir in product(range(self.n_k), ridx):
|
for ik, ir in product(range(self.n_k), ridx):
|
||||||
rdotk = twopi * numpy.dot(self.k_mesh[ik], self.rvec[ir])
|
rdotk = twopi * numpy.dot(self.k_mesh[ik], self.rvec[ir])
|
||||||
factor = (math.cos(rdotk) + 1j * math.sin(rdotk)) / float(self.rdeg[ir])
|
factor = (math.cos(rdotk) + 1j * math.sin(rdotk)) / \
|
||||||
|
float(self.rdeg[ir])
|
||||||
h_of_k[ik][:, :] += factor * h_of_r[ir][:, :]
|
h_of_k[ik][:, :] += factor * h_of_r[ir][:, :]
|
||||||
|
|
||||||
return h_of_k
|
return h_of_k
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,7 +18,7 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
|
|
||||||
from types import *
|
from types import *
|
||||||
import numpy
|
import numpy
|
||||||
@ -26,16 +26,17 @@ from pytriqs.archive import *
|
|||||||
from converter_tools import *
|
from converter_tools import *
|
||||||
import os.path
|
import os.path
|
||||||
|
|
||||||
|
|
||||||
class Wien2kConverter(ConverterTools):
|
class Wien2kConverter(ConverterTools):
|
||||||
"""
|
"""
|
||||||
Conversion from Wien2k output to an hdf5 file that can be used as input for the SumkDFT class.
|
Conversion from Wien2k output to an hdf5 file that can be used as input for the SumkDFT class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, filename, hdf_filename = None,
|
def __init__(self, filename, hdf_filename=None,
|
||||||
dft_subgrp = 'dft_input', symmcorr_subgrp = 'dft_symmcorr_input',
|
dft_subgrp='dft_input', symmcorr_subgrp='dft_symmcorr_input',
|
||||||
parproj_subgrp='dft_parproj_input', symmpar_subgrp='dft_symmpar_input',
|
parproj_subgrp='dft_parproj_input', symmpar_subgrp='dft_symmpar_input',
|
||||||
bands_subgrp = 'dft_bands_input', misc_subgrp = 'dft_misc_input',
|
bands_subgrp='dft_bands_input', misc_subgrp='dft_misc_input',
|
||||||
transp_subgrp = 'dft_transp_input', repacking = False):
|
transp_subgrp='dft_transp_input', repacking=False):
|
||||||
"""
|
"""
|
||||||
Initialise the class.
|
Initialise the class.
|
||||||
|
|
||||||
@ -61,21 +62,23 @@ class Wien2kConverter(ConverterTools):
|
|||||||
Name of subgroup storing transport data.
|
Name of subgroup storing transport data.
|
||||||
repacking : boolean, optional
|
repacking : boolean, optional
|
||||||
Does the hdf5 archive need to be repacked to save space?
|
Does the hdf5 archive need to be repacked to save space?
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert type(filename)==StringType, "Wien2kConverter: Please provide the DFT files' base name as a string."
|
assert type(
|
||||||
if hdf_filename is None: hdf_filename = filename+'.h5'
|
filename) == StringType, "Wien2kConverter: Please provide the DFT files' base name as a string."
|
||||||
|
if hdf_filename is None:
|
||||||
|
hdf_filename = filename + '.h5'
|
||||||
self.hdf_file = hdf_filename
|
self.hdf_file = hdf_filename
|
||||||
self.dft_file = filename+'.ctqmcout'
|
self.dft_file = filename + '.ctqmcout'
|
||||||
self.symmcorr_file = filename+'.symqmc'
|
self.symmcorr_file = filename + '.symqmc'
|
||||||
self.parproj_file = filename+'.parproj'
|
self.parproj_file = filename + '.parproj'
|
||||||
self.symmpar_file = filename+'.sympar'
|
self.symmpar_file = filename + '.sympar'
|
||||||
self.band_file = filename+'.outband'
|
self.band_file = filename + '.outband'
|
||||||
self.bandwin_file = filename+'.oubwin'
|
self.bandwin_file = filename + '.oubwin'
|
||||||
self.struct_file = filename+'.struct'
|
self.struct_file = filename + '.struct'
|
||||||
self.outputs_file = filename+'.outputs'
|
self.outputs_file = filename + '.outputs'
|
||||||
self.pmat_file = filename+'.pmat'
|
self.pmat_file = filename + '.pmat'
|
||||||
self.dft_subgrp = dft_subgrp
|
self.dft_subgrp = dft_subgrp
|
||||||
self.symmcorr_subgrp = symmcorr_subgrp
|
self.symmcorr_subgrp = symmcorr_subgrp
|
||||||
self.parproj_subgrp = parproj_subgrp
|
self.parproj_subgrp = parproj_subgrp
|
||||||
@ -83,13 +86,12 @@ class Wien2kConverter(ConverterTools):
|
|||||||
self.bands_subgrp = bands_subgrp
|
self.bands_subgrp = bands_subgrp
|
||||||
self.misc_subgrp = misc_subgrp
|
self.misc_subgrp = misc_subgrp
|
||||||
self.transp_subgrp = transp_subgrp
|
self.transp_subgrp = transp_subgrp
|
||||||
self.fortran_to_replace = {'D':'E'}
|
self.fortran_to_replace = {'D': 'E'}
|
||||||
|
|
||||||
# Checks if h5 file is there and repacks it if wanted:
|
# Checks if h5 file is there and repacks it if wanted:
|
||||||
if (os.path.exists(self.hdf_file) and repacking):
|
if (os.path.exists(self.hdf_file) and repacking):
|
||||||
ConverterTools.repack(self)
|
ConverterTools.repack(self)
|
||||||
|
|
||||||
|
|
||||||
def convert_dft_input(self):
|
def convert_dft_input(self):
|
||||||
"""
|
"""
|
||||||
Reads the appropriate files and stores the data for the
|
Reads the appropriate files and stores the data for the
|
||||||
@ -101,149 +103,180 @@ class Wien2kConverter(ConverterTools):
|
|||||||
in the hdf5 archive.
|
in the hdf5 archive.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Read and write only on the master node
|
|
||||||
if not (mpi.is_master_node()): return
|
|
||||||
mpi.report("Reading input from %s..."%self.dft_file)
|
|
||||||
|
|
||||||
# R is a generator : each R.Next() will return the next number in the file
|
# Read and write only on the master node
|
||||||
R = ConverterTools.read_fortran_file(self,self.dft_file,self.fortran_to_replace)
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
mpi.report("Reading input from %s..." % self.dft_file)
|
||||||
|
|
||||||
|
# R is a generator : each R.Next() will return the next number in the
|
||||||
|
# file
|
||||||
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, self.dft_file, self.fortran_to_replace)
|
||||||
try:
|
try:
|
||||||
energy_unit = R.next() # read the energy convertion factor
|
energy_unit = R.next() # read the energy convertion factor
|
||||||
n_k = int(R.next()) # read the number of k points
|
# read the number of k points
|
||||||
k_dep_projection = 1
|
n_k = int(R.next())
|
||||||
SP = int(R.next()) # flag for spin-polarised calculation
|
k_dep_projection = 1
|
||||||
SO = int(R.next()) # flag for spin-orbit calculation
|
# flag for spin-polarised calculation
|
||||||
|
SP = int(R.next())
|
||||||
|
# flag for spin-orbit calculation
|
||||||
|
SO = int(R.next())
|
||||||
charge_below = R.next() # total charge below energy window
|
charge_below = R.next() # total charge below energy window
|
||||||
density_required = R.next() # total density required, for setting the chemical potential
|
# total density required, for setting the chemical potential
|
||||||
|
density_required = R.next()
|
||||||
symm_op = 1 # Use symmetry groups for the k-sum
|
symm_op = 1 # Use symmetry groups for the k-sum
|
||||||
|
|
||||||
# the information on the non-correlated shells is not important here, maybe skip:
|
# the information on the non-correlated shells is not important
|
||||||
n_shells = int(R.next()) # number of shells (e.g. Fe d, As p, O p) in the unit cell,
|
# here, maybe skip:
|
||||||
# corresponds to index R in formulas
|
# number of shells (e.g. Fe d, As p, O p) in the unit cell,
|
||||||
|
n_shells = int(R.next())
|
||||||
|
# corresponds to index R in formulas
|
||||||
# now read the information about the shells (atom, sort, l, dim):
|
# now read the information about the shells (atom, sort, l, dim):
|
||||||
shell_entries = ['atom', 'sort', 'l', 'dim']
|
shell_entries = ['atom', 'sort', 'l', 'dim']
|
||||||
shells = [ {name: int(val) for name, val in zip(shell_entries, R)} for ish in range(n_shells) ]
|
shells = [{name: int(val) for name, val in zip(
|
||||||
|
shell_entries, R)} for ish in range(n_shells)]
|
||||||
|
|
||||||
n_corr_shells = int(R.next()) # number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
||||||
# corresponds to index R in formulas
|
n_corr_shells = int(R.next())
|
||||||
# now read the information about the shells (atom, sort, l, dim, SO flag, irep):
|
# corresponds to index R in formulas
|
||||||
|
# now read the information about the shells (atom, sort, l, dim, SO
|
||||||
|
# flag, irep):
|
||||||
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
||||||
corr_shells = [ {name: int(val) for name, val in zip(corr_shell_entries, R)} for icrsh in range(n_corr_shells) ]
|
corr_shells = [{name: int(val) for name, val in zip(
|
||||||
|
corr_shell_entries, R)} for icrsh in range(n_corr_shells)]
|
||||||
|
|
||||||
# determine the number of inequivalent correlated shells and maps, needed for further reading
|
# determine the number of inequivalent correlated shells and maps,
|
||||||
n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(self,corr_shells)
|
# needed for further reading
|
||||||
|
n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(
|
||||||
|
self, corr_shells)
|
||||||
|
|
||||||
use_rotations = 1
|
use_rotations = 1
|
||||||
rot_mat = [numpy.identity(corr_shells[icrsh]['dim'],numpy.complex_) for icrsh in range(n_corr_shells)]
|
rot_mat = [numpy.identity(
|
||||||
|
corr_shells[icrsh]['dim'], numpy.complex_) for icrsh in range(n_corr_shells)]
|
||||||
|
|
||||||
# read the matrices
|
# read the matrices
|
||||||
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
||||||
|
|
||||||
for icrsh in range(n_corr_shells):
|
for icrsh in range(n_corr_shells):
|
||||||
for i in range(corr_shells[icrsh]['dim']): # read real part:
|
for i in range(corr_shells[icrsh]['dim']): # read real part:
|
||||||
for j in range(corr_shells[icrsh]['dim']):
|
for j in range(corr_shells[icrsh]['dim']):
|
||||||
rot_mat[icrsh][i,j] = R.next()
|
rot_mat[icrsh][i, j] = R.next()
|
||||||
for i in range(corr_shells[icrsh]['dim']): # read imaginary part:
|
# read imaginary part:
|
||||||
|
for i in range(corr_shells[icrsh]['dim']):
|
||||||
for j in range(corr_shells[icrsh]['dim']):
|
for j in range(corr_shells[icrsh]['dim']):
|
||||||
rot_mat[icrsh][i,j] += 1j * R.next()
|
rot_mat[icrsh][i, j] += 1j * R.next()
|
||||||
|
|
||||||
if (SP==1): # read time inversion flag:
|
if (SP == 1): # read time inversion flag:
|
||||||
rot_mat_time_inv[icrsh] = int(R.next())
|
rot_mat_time_inv[icrsh] = int(R.next())
|
||||||
|
|
||||||
# Read here the info for the transformation of the basis:
|
# Read here the info for the transformation of the basis:
|
||||||
n_reps = [1 for i in range(n_inequiv_shells)]
|
n_reps = [1 for i in range(n_inequiv_shells)]
|
||||||
dim_reps = [0 for i in range(n_inequiv_shells)]
|
dim_reps = [0 for i in range(n_inequiv_shells)]
|
||||||
T = []
|
T = []
|
||||||
for ish in range(n_inequiv_shells):
|
for ish in range(n_inequiv_shells):
|
||||||
n_reps[ish] = int(R.next()) # number of representatives ("subsets"), e.g. t2g and eg
|
# number of representatives ("subsets"), e.g. t2g and eg
|
||||||
dim_reps[ish] = [int(R.next()) for i in range(n_reps[ish])] # dimensions of the subsets
|
n_reps[ish] = int(R.next())
|
||||||
|
dim_reps[ish] = [int(R.next()) for i in range(
|
||||||
|
n_reps[ish])] # dimensions of the subsets
|
||||||
|
|
||||||
# The transformation matrix:
|
# The transformation matrix:
|
||||||
# is of dimension 2l+1 without SO, and 2*(2l+1) with SO!
|
# is of dimension 2l+1 without SO, and 2*(2l+1) with SO!
|
||||||
ll = 2*corr_shells[inequiv_to_corr[ish]]['l']+1
|
ll = 2 * corr_shells[inequiv_to_corr[ish]]['l'] + 1
|
||||||
lmax = ll * (corr_shells[inequiv_to_corr[ish]]['SO'] + 1)
|
lmax = ll * (corr_shells[inequiv_to_corr[ish]]['SO'] + 1)
|
||||||
T.append(numpy.zeros([lmax,lmax],numpy.complex_))
|
T.append(numpy.zeros([lmax, lmax], numpy.complex_))
|
||||||
|
|
||||||
# now read it from file:
|
# now read it from file:
|
||||||
for i in range(lmax):
|
for i in range(lmax):
|
||||||
for j in range(lmax):
|
for j in range(lmax):
|
||||||
T[ish][i,j] = R.next()
|
T[ish][i, j] = R.next()
|
||||||
for i in range(lmax):
|
for i in range(lmax):
|
||||||
for j in range(lmax):
|
for j in range(lmax):
|
||||||
T[ish][i,j] += 1j * R.next()
|
T[ish][i, j] += 1j * R.next()
|
||||||
|
|
||||||
# Spin blocks to be read:
|
# Spin blocks to be read:
|
||||||
n_spin_blocs = SP + 1 - SO
|
n_spin_blocs = SP + 1 - SO
|
||||||
|
|
||||||
# read the list of n_orbitals for all k points
|
# read the list of n_orbitals for all k points
|
||||||
n_orbitals = numpy.zeros([n_k,n_spin_blocs],numpy.int)
|
n_orbitals = numpy.zeros([n_k, n_spin_blocs], numpy.int)
|
||||||
for isp in range(n_spin_blocs):
|
for isp in range(n_spin_blocs):
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
n_orbitals[ik,isp] = int(R.next())
|
n_orbitals[ik, isp] = int(R.next())
|
||||||
|
|
||||||
# Initialise the projectors:
|
# Initialise the projectors:
|
||||||
proj_mat = numpy.zeros([n_k,n_spin_blocs,n_corr_shells,max([crsh['dim'] for crsh in corr_shells]),numpy.max(n_orbitals)],numpy.complex_)
|
proj_mat = numpy.zeros([n_k, n_spin_blocs, n_corr_shells, max(
|
||||||
|
[crsh['dim'] for crsh in corr_shells]), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
# Read the projectors from the file:
|
# Read the projectors from the file:
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
for icrsh in range(n_corr_shells):
|
for icrsh in range(n_corr_shells):
|
||||||
n_orb = corr_shells[icrsh]['dim']
|
n_orb = corr_shells[icrsh]['dim']
|
||||||
# first Real part for BOTH spins, due to conventions in dmftproj:
|
# first Real part for BOTH spins, due to conventions in
|
||||||
|
# dmftproj:
|
||||||
for isp in range(n_spin_blocs):
|
for isp in range(n_spin_blocs):
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
for j in range(n_orbitals[ik][isp]):
|
for j in range(n_orbitals[ik][isp]):
|
||||||
proj_mat[ik,isp,icrsh,i,j] = R.next()
|
proj_mat[ik, isp, icrsh, i, j] = R.next()
|
||||||
# now Imag part:
|
# now Imag part:
|
||||||
for isp in range(n_spin_blocs):
|
for isp in range(n_spin_blocs):
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
for j in range(n_orbitals[ik][isp]):
|
for j in range(n_orbitals[ik][isp]):
|
||||||
proj_mat[ik,isp,icrsh,i,j] += 1j * R.next()
|
proj_mat[ik, isp, icrsh, i, j] += 1j * R.next()
|
||||||
|
|
||||||
# now define the arrays for weights and hopping ...
|
# now define the arrays for weights and hopping ...
|
||||||
bz_weights = numpy.ones([n_k],numpy.float_)/ float(n_k) # w(k_index), default normalisation
|
# w(k_index), default normalisation
|
||||||
hopping = numpy.zeros([n_k,n_spin_blocs,numpy.max(n_orbitals),numpy.max(n_orbitals)],numpy.complex_)
|
bz_weights = numpy.ones([n_k], numpy.float_) / float(n_k)
|
||||||
|
hopping = numpy.zeros([n_k, n_spin_blocs, numpy.max(
|
||||||
|
n_orbitals), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
# weights in the file
|
# weights in the file
|
||||||
for ik in range(n_k) : bz_weights[ik] = R.next()
|
for ik in range(n_k):
|
||||||
|
bz_weights[ik] = R.next()
|
||||||
|
|
||||||
# if the sum over spins is in the weights, take it out again!!
|
# if the sum over spins is in the weights, take it out again!!
|
||||||
sm = sum(bz_weights)
|
sm = sum(bz_weights)
|
||||||
bz_weights[:] /= sm
|
bz_weights[:] /= sm
|
||||||
|
|
||||||
# Grab the H
|
# Grab the H
|
||||||
# we use now the convention of a DIAGONAL Hamiltonian -- convention for Wien2K.
|
# we use now the convention of a DIAGONAL Hamiltonian -- convention
|
||||||
|
# for Wien2K.
|
||||||
for isp in range(n_spin_blocs):
|
for isp in range(n_spin_blocs):
|
||||||
for ik in range(n_k) :
|
for ik in range(n_k):
|
||||||
n_orb = n_orbitals[ik,isp]
|
n_orb = n_orbitals[ik, isp]
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
hopping[ik,isp,i,i] = R.next() * energy_unit
|
hopping[ik, isp, i, i] = R.next() * energy_unit
|
||||||
|
|
||||||
# keep some things that we need for reading parproj:
|
# keep some things that we need for reading parproj:
|
||||||
things_to_set = ['n_shells','shells','n_corr_shells','corr_shells','n_spin_blocs','n_orbitals','n_k','SO','SP','energy_unit']
|
things_to_set = ['n_shells', 'shells', 'n_corr_shells', 'corr_shells',
|
||||||
for it in things_to_set: setattr(self,it,locals()[it])
|
'n_spin_blocs', 'n_orbitals', 'n_k', 'SO', 'SP', 'energy_unit']
|
||||||
except StopIteration : # a more explicit error if the file is corrupted.
|
for it in things_to_set:
|
||||||
raise "Wien2k_converter : reading file %s failed!"%self.dft_file
|
setattr(self, it, locals()[it])
|
||||||
|
except StopIteration: # a more explicit error if the file is corrupted.
|
||||||
|
raise "Wien2k_converter : reading file %s failed!" % self.dft_file
|
||||||
|
|
||||||
R.close()
|
R.close()
|
||||||
# Reading done!
|
# Reading done!
|
||||||
|
|
||||||
# Save it to the HDF:
|
# Save it to the HDF:
|
||||||
ar = HDFArchive(self.hdf_file,'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.dft_subgrp in ar): ar.create_group(self.dft_subgrp)
|
if not (self.dft_subgrp in ar):
|
||||||
# The subgroup containing the data. If it does not exist, it is created. If it exists, the data is overwritten!
|
ar.create_group(self.dft_subgrp)
|
||||||
things_to_save = ['energy_unit','n_k','k_dep_projection','SP','SO','charge_below','density_required',
|
# The subgroup containing the data. If it does not exist, it is
|
||||||
'symm_op','n_shells','shells','n_corr_shells','corr_shells','use_rotations','rot_mat',
|
# created. If it exists, the data is overwritten!
|
||||||
'rot_mat_time_inv','n_reps','dim_reps','T','n_orbitals','proj_mat','bz_weights','hopping',
|
things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
||||||
|
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
||||||
|
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
||||||
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr']
|
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr']
|
||||||
for it in things_to_save: ar[self.dft_subgrp][it] = locals()[it]
|
for it in things_to_save:
|
||||||
|
ar[self.dft_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
# Symmetries are used, so now convert symmetry information for *correlated* orbitals:
|
# Symmetries are used, so now convert symmetry information for
|
||||||
self.convert_symmetry_input(orbits=self.corr_shells,symm_file=self.symmcorr_file,symm_subgrp=self.symmcorr_subgrp,SO=self.SO,SP=self.SP)
|
# *correlated* orbitals:
|
||||||
|
self.convert_symmetry_input(orbits=self.corr_shells, symm_file=self.symmcorr_file,
|
||||||
|
symm_subgrp=self.symmcorr_subgrp, SO=self.SO, SP=self.SP)
|
||||||
self.convert_misc_input()
|
self.convert_misc_input()
|
||||||
|
|
||||||
|
|
||||||
def convert_parproj_input(self):
|
def convert_parproj_input(self):
|
||||||
"""
|
"""
|
||||||
Reads the appropriate files and stores the data for the
|
Reads the appropriate files and stores the data for the
|
||||||
@ -255,31 +288,37 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
|
||||||
# get needed data from hdf file
|
# get needed data from hdf file
|
||||||
ar = HDFArchive(self.hdf_file,'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
things_to_read = ['SP','SO','n_shells','n_k','n_orbitals','shells']
|
things_to_read = ['SP', 'SO', 'n_shells',
|
||||||
|
'n_k', 'n_orbitals', 'shells']
|
||||||
|
|
||||||
for it in things_to_read:
|
for it in things_to_read:
|
||||||
if not hasattr(self,it): setattr(self,it,ar[self.dft_subgrp][it])
|
if not hasattr(self, it):
|
||||||
|
setattr(self, it, ar[self.dft_subgrp][it])
|
||||||
self.n_spin_blocs = self.SP + 1 - self.SO
|
self.n_spin_blocs = self.SP + 1 - self.SO
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
mpi.report("Reading input from %s..."%self.parproj_file)
|
mpi.report("Reading input from %s..." % self.parproj_file)
|
||||||
|
|
||||||
dens_mat_below = [ [numpy.zeros([self.shells[ish]['dim'],self.shells[ish]['dim']],numpy.complex_) for ish in range(self.n_shells)]
|
dens_mat_below = [[numpy.zeros([self.shells[ish]['dim'], self.shells[ish]['dim']], numpy.complex_) for ish in range(self.n_shells)]
|
||||||
for isp in range(self.n_spin_blocs) ]
|
for isp in range(self.n_spin_blocs)]
|
||||||
|
|
||||||
R = ConverterTools.read_fortran_file(self,self.parproj_file,self.fortran_to_replace)
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, self.parproj_file, self.fortran_to_replace)
|
||||||
|
|
||||||
n_parproj = [int(R.next()) for i in range(self.n_shells)]
|
n_parproj = [int(R.next()) for i in range(self.n_shells)]
|
||||||
n_parproj = numpy.array(n_parproj)
|
n_parproj = numpy.array(n_parproj)
|
||||||
|
|
||||||
# Initialise P, here a double list of matrices:
|
# Initialise P, here a double list of matrices:
|
||||||
proj_mat_all = numpy.zeros([self.n_k,self.n_spin_blocs,self.n_shells,max(n_parproj),max([sh['dim'] for sh in self.shells]),max(self.n_orbitals)],numpy.complex_)
|
proj_mat_all = numpy.zeros([self.n_k, self.n_spin_blocs, self.n_shells, max(
|
||||||
|
n_parproj), max([sh['dim'] for sh in self.shells]), max(self.n_orbitals)], numpy.complex_)
|
||||||
rot_mat_all = [numpy.identity(self.shells[ish]['dim'],numpy.complex_) for ish in range(self.n_shells)]
|
|
||||||
|
rot_mat_all = [numpy.identity(
|
||||||
|
self.shells[ish]['dim'], numpy.complex_) for ish in range(self.n_shells)]
|
||||||
rot_mat_all_time_inv = [0 for i in range(self.n_shells)]
|
rot_mat_all_time_inv = [0 for i in range(self.n_shells)]
|
||||||
|
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
@ -288,35 +327,40 @@ class Wien2kConverter(ConverterTools):
|
|||||||
for ir in range(n_parproj[ish]):
|
for ir in range(n_parproj[ish]):
|
||||||
|
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(self.shells[ish]['dim']): # read real part:
|
# read real part:
|
||||||
|
for i in range(self.shells[ish]['dim']):
|
||||||
for j in range(self.n_orbitals[ik][isp]):
|
for j in range(self.n_orbitals[ik][isp]):
|
||||||
proj_mat_all[ik,isp,ish,ir,i,j] = R.next()
|
proj_mat_all[ik, isp, ish, ir, i, j] = R.next()
|
||||||
|
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(self.shells[ish]['dim']): # read imaginary part:
|
# read imaginary part:
|
||||||
|
for i in range(self.shells[ish]['dim']):
|
||||||
for j in range(self.n_orbitals[ik][isp]):
|
for j in range(self.n_orbitals[ik][isp]):
|
||||||
proj_mat_all[ik,isp,ish,ir,i,j] += 1j * R.next()
|
proj_mat_all[ik, isp, ish,
|
||||||
|
ir, i, j] += 1j * R.next()
|
||||||
|
|
||||||
# now read the Density Matrix for this orbital below the energy window:
|
# now read the Density Matrix for this orbital below the energy
|
||||||
|
# window:
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(self.shells[ish]['dim']): # read real part:
|
for i in range(self.shells[ish]['dim']): # read real part:
|
||||||
for j in range(self.shells[ish]['dim']):
|
for j in range(self.shells[ish]['dim']):
|
||||||
dens_mat_below[isp][ish][i,j] = R.next()
|
dens_mat_below[isp][ish][i, j] = R.next()
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(self.shells[ish]['dim']): # read imaginary part:
|
# read imaginary part:
|
||||||
|
for i in range(self.shells[ish]['dim']):
|
||||||
for j in range(self.shells[ish]['dim']):
|
for j in range(self.shells[ish]['dim']):
|
||||||
dens_mat_below[isp][ish][i,j] += 1j * R.next()
|
dens_mat_below[isp][ish][i, j] += 1j * R.next()
|
||||||
if (self.SP==0): dens_mat_below[isp][ish] /= 2.0
|
if (self.SP == 0):
|
||||||
|
dens_mat_below[isp][ish] /= 2.0
|
||||||
|
|
||||||
# Global -> local rotation matrix for this shell:
|
# Global -> local rotation matrix for this shell:
|
||||||
for i in range(self.shells[ish]['dim']): # read real part:
|
for i in range(self.shells[ish]['dim']): # read real part:
|
||||||
for j in range(self.shells[ish]['dim']):
|
for j in range(self.shells[ish]['dim']):
|
||||||
rot_mat_all[ish][i,j] = R.next()
|
rot_mat_all[ish][i, j] = R.next()
|
||||||
for i in range(self.shells[ish]['dim']): # read imaginary part:
|
for i in range(self.shells[ish]['dim']): # read imaginary part:
|
||||||
for j in range(self.shells[ish]['dim']):
|
for j in range(self.shells[ish]['dim']):
|
||||||
rot_mat_all[ish][i,j] += 1j * R.next()
|
rot_mat_all[ish][i, j] += 1j * R.next()
|
||||||
|
|
||||||
if (self.SP):
|
if (self.SP):
|
||||||
rot_mat_all_time_inv[ish] = int(R.next())
|
rot_mat_all_time_inv[ish] = int(R.next())
|
||||||
|
|
||||||
@ -324,16 +368,21 @@ class Wien2kConverter(ConverterTools):
|
|||||||
# Reading done!
|
# Reading done!
|
||||||
|
|
||||||
# Save it to the HDF:
|
# Save it to the HDF:
|
||||||
ar = HDFArchive(self.hdf_file,'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.parproj_subgrp in ar): ar.create_group(self.parproj_subgrp)
|
if not (self.parproj_subgrp in ar):
|
||||||
# The subgroup containing the data. If it does not exist, it is created. If it exists, the data is overwritten!
|
ar.create_group(self.parproj_subgrp)
|
||||||
things_to_save = ['dens_mat_below','n_parproj','proj_mat_all','rot_mat_all','rot_mat_all_time_inv']
|
# The subgroup containing the data. If it does not exist, it is
|
||||||
for it in things_to_save: ar[self.parproj_subgrp][it] = locals()[it]
|
# created. If it exists, the data is overwritten!
|
||||||
|
things_to_save = ['dens_mat_below', 'n_parproj',
|
||||||
|
'proj_mat_all', 'rot_mat_all', 'rot_mat_all_time_inv']
|
||||||
|
for it in things_to_save:
|
||||||
|
ar[self.parproj_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
# Symmetries are used, so now convert symmetry information for *all* orbitals:
|
# Symmetries are used, so now convert symmetry information for *all*
|
||||||
self.convert_symmetry_input(orbits=self.shells,symm_file=self.symmpar_file,symm_subgrp=self.symmpar_subgrp,SO=self.SO,SP=self.SP)
|
# orbitals:
|
||||||
|
self.convert_symmetry_input(orbits=self.shells, symm_file=self.symmpar_file,
|
||||||
|
symm_subgrp=self.symmpar_subgrp, SO=self.SO, SP=self.SP)
|
||||||
|
|
||||||
def convert_bands_input(self):
|
def convert_bands_input(self):
|
||||||
"""
|
"""
|
||||||
@ -341,117 +390,134 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# get needed data from hdf file
|
# get needed data from hdf file
|
||||||
ar = HDFArchive(self.hdf_file,'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
things_to_read = ['SP','SO','n_corr_shells','n_shells','corr_shells','shells','energy_unit']
|
things_to_read = ['SP', 'SO', 'n_corr_shells',
|
||||||
|
'n_shells', 'corr_shells', 'shells', 'energy_unit']
|
||||||
|
|
||||||
for it in things_to_read:
|
for it in things_to_read:
|
||||||
if not hasattr(self,it): setattr(self,it,ar[self.dft_subgrp][it])
|
if not hasattr(self, it):
|
||||||
|
setattr(self, it, ar[self.dft_subgrp][it])
|
||||||
self.n_spin_blocs = self.SP + 1 - self.SO
|
self.n_spin_blocs = self.SP + 1 - self.SO
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
mpi.report("Reading input from %s..."%self.band_file)
|
mpi.report("Reading input from %s..." % self.band_file)
|
||||||
R = ConverterTools.read_fortran_file(self,self.band_file,self.fortran_to_replace)
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, self.band_file, self.fortran_to_replace)
|
||||||
n_k = int(R.next())
|
n_k = int(R.next())
|
||||||
|
|
||||||
# read the list of n_orbitals for all k points
|
# read the list of n_orbitals for all k points
|
||||||
n_orbitals = numpy.zeros([n_k,self.n_spin_blocs],numpy.int)
|
n_orbitals = numpy.zeros([n_k, self.n_spin_blocs], numpy.int)
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
n_orbitals[ik,isp] = int(R.next())
|
n_orbitals[ik, isp] = int(R.next())
|
||||||
|
|
||||||
# Initialise the projectors:
|
# Initialise the projectors:
|
||||||
proj_mat = numpy.zeros([n_k,self.n_spin_blocs,self.n_corr_shells,max([crsh['dim'] for crsh in self.corr_shells]),numpy.max(n_orbitals)],numpy.complex_)
|
proj_mat = numpy.zeros([n_k, self.n_spin_blocs, self.n_corr_shells, max(
|
||||||
|
[crsh['dim'] for crsh in self.corr_shells]), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
# Read the projectors from the file:
|
# Read the projectors from the file:
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
for icrsh in range(self.n_corr_shells):
|
for icrsh in range(self.n_corr_shells):
|
||||||
n_orb = self.corr_shells[icrsh]['dim']
|
n_orb = self.corr_shells[icrsh]['dim']
|
||||||
# first Real part for BOTH spins, due to conventions in dmftproj:
|
# first Real part for BOTH spins, due to conventions in
|
||||||
|
# dmftproj:
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
for j in range(n_orbitals[ik,isp]):
|
for j in range(n_orbitals[ik, isp]):
|
||||||
proj_mat[ik,isp,icrsh,i,j] = R.next()
|
proj_mat[ik, isp, icrsh, i, j] = R.next()
|
||||||
# now Imag part:
|
# now Imag part:
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
for j in range(n_orbitals[ik,isp]):
|
for j in range(n_orbitals[ik, isp]):
|
||||||
proj_mat[ik,isp,icrsh,i,j] += 1j * R.next()
|
proj_mat[ik, isp, icrsh, i, j] += 1j * R.next()
|
||||||
|
|
||||||
|
hopping = numpy.zeros([n_k, self.n_spin_blocs, numpy.max(
|
||||||
|
n_orbitals), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
hopping = numpy.zeros([n_k,self.n_spin_blocs,numpy.max(n_orbitals),numpy.max(n_orbitals)],numpy.complex_)
|
|
||||||
|
|
||||||
# Grab the H
|
# Grab the H
|
||||||
# we use now the convention of a DIAGONAL Hamiltonian!!!!
|
# we use now the convention of a DIAGONAL Hamiltonian!!!!
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for ik in range(n_k) :
|
for ik in range(n_k):
|
||||||
n_orb = n_orbitals[ik,isp]
|
n_orb = n_orbitals[ik, isp]
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
hopping[ik,isp,i,i] = R.next() * self.energy_unit
|
hopping[ik, isp, i, i] = R.next() * self.energy_unit
|
||||||
|
|
||||||
# now read the partial projectors:
|
# now read the partial projectors:
|
||||||
n_parproj = [int(R.next()) for i in range(self.n_shells)]
|
n_parproj = [int(R.next()) for i in range(self.n_shells)]
|
||||||
n_parproj = numpy.array(n_parproj)
|
n_parproj = numpy.array(n_parproj)
|
||||||
|
|
||||||
# Initialise P, here a double list of matrices:
|
# Initialise P, here a double list of matrices:
|
||||||
proj_mat_all = numpy.zeros([n_k,self.n_spin_blocs,self.n_shells,max(n_parproj),max([sh['dim'] for sh in self.shells]),numpy.max(n_orbitals)],numpy.complex_)
|
proj_mat_all = numpy.zeros([n_k, self.n_spin_blocs, self.n_shells, max(n_parproj), max(
|
||||||
|
[sh['dim'] for sh in self.shells]), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
for ir in range(n_parproj[ish]):
|
for ir in range(n_parproj[ish]):
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
|
|
||||||
for i in range(self.shells[ish]['dim']): # read real part:
|
# read real part:
|
||||||
for j in range(n_orbitals[ik,isp]):
|
for i in range(self.shells[ish]['dim']):
|
||||||
proj_mat_all[ik,isp,ish,ir,i,j] = R.next()
|
for j in range(n_orbitals[ik, isp]):
|
||||||
|
proj_mat_all[ik, isp, ish,
|
||||||
for i in range(self.shells[ish]['dim']): # read imaginary part:
|
ir, i, j] = R.next()
|
||||||
for j in range(n_orbitals[ik,isp]):
|
|
||||||
proj_mat_all[ik,isp,ish,ir,i,j] += 1j * R.next()
|
# read imaginary part:
|
||||||
|
for i in range(self.shells[ish]['dim']):
|
||||||
|
for j in range(n_orbitals[ik, isp]):
|
||||||
|
proj_mat_all[ik, isp, ish,
|
||||||
|
ir, i, j] += 1j * R.next()
|
||||||
|
|
||||||
R.close()
|
R.close()
|
||||||
|
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise "convert_bands_input : Needed data not found in hdf file. Consider calling convert_dft_input first!"
|
raise "convert_bands_input : Needed data not found in hdf file. Consider calling convert_dft_input first!"
|
||||||
except StopIteration : # a more explicit error if the file is corrupted.
|
except StopIteration: # a more explicit error if the file is corrupted.
|
||||||
raise "Wien2k_converter : reading file band_file failed!"
|
raise "Wien2k_converter : reading file band_file failed!"
|
||||||
|
|
||||||
# Reading done!
|
# Reading done!
|
||||||
|
|
||||||
# Save it to the HDF:
|
# Save it to the HDF:
|
||||||
ar = HDFArchive(self.hdf_file,'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.bands_subgrp in ar): ar.create_group(self.bands_subgrp)
|
if not (self.bands_subgrp in ar):
|
||||||
# The subgroup containing the data. If it does not exist, it is created. If it exists, the data is overwritten!
|
ar.create_group(self.bands_subgrp)
|
||||||
things_to_save = ['n_k','n_orbitals','proj_mat','hopping','n_parproj','proj_mat_all']
|
# The subgroup containing the data. If it does not exist, it is
|
||||||
for it in things_to_save: ar[self.bands_subgrp][it] = locals()[it]
|
# created. If it exists, the data is overwritten!
|
||||||
|
things_to_save = ['n_k', 'n_orbitals', 'proj_mat',
|
||||||
|
'hopping', 'n_parproj', 'proj_mat_all']
|
||||||
|
for it in things_to_save:
|
||||||
|
ar[self.bands_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
|
|
||||||
def convert_misc_input(self):
|
def convert_misc_input(self):
|
||||||
"""
|
"""
|
||||||
Reads additional information on:
|
Reads additional information on:
|
||||||
|
|
||||||
- the band window from :file:`case.oubwin`,
|
- the band window from :file:`case.oubwin`,
|
||||||
- lattice parameters from :file:`case.struct`,
|
- lattice parameters from :file:`case.struct`,
|
||||||
- symmetries from :file:`case.outputs`,
|
- symmetries from :file:`case.outputs`,
|
||||||
|
|
||||||
if those Wien2k files are present and stores the data in the hdf5 archive.
|
if those Wien2k files are present and stores the data in the hdf5 archive.
|
||||||
This function is automatically called by :meth:`convert_dft_input <pytriqs.applications.dft.converters.wien2k_converter.Wien2kConverter.convert_dft_input>`.
|
This function is automatically called by :meth:`convert_dft_input <pytriqs.applications.dft.converters.wien2k_converter.Wien2kConverter.convert_dft_input>`.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
|
||||||
# Check if SP, SO and n_k are already in h5
|
# Check if SP, SO and n_k are already in h5
|
||||||
ar = HDFArchive(self.hdf_file, 'r')
|
ar = HDFArchive(self.hdf_file, 'r')
|
||||||
if not (self.dft_subgrp in ar): raise IOError, "convert_misc_input: No %s subgroup in hdf file found! Call convert_dft_input first." %self.dft_subgrp
|
if not (self.dft_subgrp in ar):
|
||||||
|
raise IOError, "convert_misc_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp
|
||||||
SP = ar[self.dft_subgrp]['SP']
|
SP = ar[self.dft_subgrp]['SP']
|
||||||
SO = ar[self.dft_subgrp]['SO']
|
SO = ar[self.dft_subgrp]['SO']
|
||||||
n_k = ar[self.dft_subgrp]['n_k']
|
n_k = ar[self.dft_subgrp]['n_k']
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
things_to_save = []
|
things_to_save = []
|
||||||
|
|
||||||
# Read relevant data from .oubwin/up/dn files
|
# Read relevant data from .oubwin/up/dn files
|
||||||
@ -459,32 +525,35 @@ class Wien2kConverter(ConverterTools):
|
|||||||
# band_window: Contains the index of the lowest and highest band within the
|
# band_window: Contains the index of the lowest and highest band within the
|
||||||
# projected subspace (used by dmftproj) for each k-point.
|
# projected subspace (used by dmftproj) for each k-point.
|
||||||
|
|
||||||
if (SP == 0 or SO == 1):
|
if (SP == 0 or SO == 1):
|
||||||
files = [self.bandwin_file]
|
files = [self.bandwin_file]
|
||||||
elif SP == 1:
|
elif SP == 1:
|
||||||
files = [self.bandwin_file+'up', self.bandwin_file+'dn']
|
files = [self.bandwin_file + 'up', self.bandwin_file + 'dn']
|
||||||
else: # SO and SP can't both be 1
|
else: # SO and SP can't both be 1
|
||||||
assert 0, "convert_misc_input: Reading oubwin error! Check SP and SO!"
|
assert 0, "convert_misc_input: Reading oubwin error! Check SP and SO!"
|
||||||
|
|
||||||
band_window = [None for isp in range(SP + 1 - SO)]
|
band_window = [None for isp in range(SP + 1 - SO)]
|
||||||
for isp, f in enumerate(files):
|
for isp, f in enumerate(files):
|
||||||
if os.path.exists(f):
|
if os.path.exists(f):
|
||||||
mpi.report("Reading input from %s..."%f)
|
mpi.report("Reading input from %s..." % f)
|
||||||
R = ConverterTools.read_fortran_file(self, f, self.fortran_to_replace)
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, f, self.fortran_to_replace)
|
||||||
n_k_oubwin = int(R.next())
|
n_k_oubwin = int(R.next())
|
||||||
if (n_k_oubwin != n_k):
|
if (n_k_oubwin != n_k):
|
||||||
mpi.report("convert_misc_input : WARNING : n_k in case.oubwin is different from n_k in case.klist")
|
mpi.report(
|
||||||
assert int(R.next()) == SO, "convert_misc_input: SO is inconsistent in oubwin file!"
|
"convert_misc_input : WARNING : n_k in case.oubwin is different from n_k in case.klist")
|
||||||
|
assert int(
|
||||||
|
R.next()) == SO, "convert_misc_input: SO is inconsistent in oubwin file!"
|
||||||
|
|
||||||
band_window[isp] = numpy.zeros((n_k_oubwin, 2), dtype=int)
|
band_window[isp] = numpy.zeros((n_k_oubwin, 2), dtype=int)
|
||||||
for ik in xrange(n_k_oubwin):
|
for ik in xrange(n_k_oubwin):
|
||||||
R.next()
|
R.next()
|
||||||
band_window[isp][ik,0] = R.next() # lowest band
|
band_window[isp][ik, 0] = R.next() # lowest band
|
||||||
band_window[isp][ik,1] = R.next() # highest band
|
band_window[isp][ik, 1] = R.next() # highest band
|
||||||
R.next()
|
R.next()
|
||||||
things_to_save.append('band_window')
|
things_to_save.append('band_window')
|
||||||
|
|
||||||
R.close() # Reading done!
|
R.close() # Reading done!
|
||||||
|
|
||||||
# Read relevant data from .struct file
|
# Read relevant data from .struct file
|
||||||
######################################
|
######################################
|
||||||
@ -493,39 +562,44 @@ class Wien2kConverter(ConverterTools):
|
|||||||
# lattice_angles: unit cell angles in rad
|
# lattice_angles: unit cell angles in rad
|
||||||
|
|
||||||
if (os.path.exists(self.struct_file)):
|
if (os.path.exists(self.struct_file)):
|
||||||
mpi.report("Reading input from %s..."%self.struct_file)
|
mpi.report("Reading input from %s..." % self.struct_file)
|
||||||
|
|
||||||
with open(self.struct_file) as R:
|
with open(self.struct_file) as R:
|
||||||
try:
|
try:
|
||||||
R.readline()
|
R.readline()
|
||||||
lattice_type = R.readline().split()[0]
|
lattice_type = R.readline().split()[0]
|
||||||
R.readline()
|
R.readline()
|
||||||
temp = R.readline()
|
temp = R.readline()
|
||||||
lattice_constants = numpy.array([float(temp[0+10*i:10+10*i].strip()) for i in range(3)])
|
lattice_constants = numpy.array(
|
||||||
lattice_angles = numpy.array([float(temp[30+10*i:40+10*i].strip()) for i in range(3)]) * numpy.pi / 180.0
|
[float(temp[0 + 10 * i:10 + 10 * i].strip()) for i in range(3)])
|
||||||
things_to_save.extend(['lattice_type', 'lattice_constants', 'lattice_angles'])
|
lattice_angles = numpy.array(
|
||||||
|
[float(temp[30 + 10 * i:40 + 10 * i].strip()) for i in range(3)]) * numpy.pi / 180.0
|
||||||
|
things_to_save.extend(
|
||||||
|
['lattice_type', 'lattice_constants', 'lattice_angles'])
|
||||||
except IOError:
|
except IOError:
|
||||||
raise "convert_misc_input: reading file %s failed" %self.struct_file
|
raise "convert_misc_input: reading file %s failed" % self.struct_file
|
||||||
|
|
||||||
# Read relevant data from .outputs file
|
# Read relevant data from .outputs file
|
||||||
#######################################
|
#######################################
|
||||||
# rot_symmetries: matrix representation of all (space group) symmetry operations
|
# rot_symmetries: matrix representation of all (space group) symmetry
|
||||||
|
# operations
|
||||||
|
|
||||||
if (os.path.exists(self.outputs_file)):
|
if (os.path.exists(self.outputs_file)):
|
||||||
mpi.report("Reading input from %s..."%self.outputs_file)
|
mpi.report("Reading input from %s..." % self.outputs_file)
|
||||||
|
|
||||||
rot_symmetries = []
|
rot_symmetries = []
|
||||||
with open(self.outputs_file) as R:
|
with open(self.outputs_file) as R:
|
||||||
try:
|
try:
|
||||||
while 1:
|
while 1:
|
||||||
temp = R.readline().strip(' ').split()
|
temp = R.readline().strip(' ').split()
|
||||||
if (temp[0] =='PGBSYM:'):
|
if (temp[0] == 'PGBSYM:'):
|
||||||
n_symmetries = int(temp[-1])
|
n_symmetries = int(temp[-1])
|
||||||
break
|
break
|
||||||
for i in range(n_symmetries):
|
for i in range(n_symmetries):
|
||||||
while 1:
|
while 1:
|
||||||
if (R.readline().strip().split()[0] == 'Symmetry'): break
|
if (R.readline().strip().split()[0] == 'Symmetry'):
|
||||||
sym_i = numpy.zeros((3, 3), dtype = float)
|
break
|
||||||
|
sym_i = numpy.zeros((3, 3), dtype=float)
|
||||||
for ir in range(3):
|
for ir in range(3):
|
||||||
temp = R.readline().strip().split()
|
temp = R.readline().strip().split()
|
||||||
for ic in range(3):
|
for ic in range(3):
|
||||||
@ -535,30 +609,33 @@ class Wien2kConverter(ConverterTools):
|
|||||||
things_to_save.extend(['n_symmetries', 'rot_symmetries'])
|
things_to_save.extend(['n_symmetries', 'rot_symmetries'])
|
||||||
things_to_save.append('rot_symmetries')
|
things_to_save.append('rot_symmetries')
|
||||||
except IOError:
|
except IOError:
|
||||||
raise "convert_misc_input: reading file %s failed" %self.outputs_file
|
raise "convert_misc_input: reading file %s failed" % self.outputs_file
|
||||||
|
|
||||||
# Save it to the HDF:
|
# Save it to the HDF:
|
||||||
ar = HDFArchive(self.hdf_file,'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.misc_subgrp in ar): ar.create_group(self.misc_subgrp)
|
if not (self.misc_subgrp in ar):
|
||||||
for it in things_to_save: ar[self.misc_subgrp][it] = locals()[it]
|
ar.create_group(self.misc_subgrp)
|
||||||
|
for it in things_to_save:
|
||||||
|
ar[self.misc_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
|
|
||||||
def convert_transport_input(self):
|
def convert_transport_input(self):
|
||||||
"""
|
"""
|
||||||
Reads the necessary information for transport calculations on:
|
Reads the necessary information for transport calculations on:
|
||||||
|
|
||||||
- the optical band window and the velocity matrix elements from :file:`case.pmat`
|
- the optical band window and the velocity matrix elements from :file:`case.pmat`
|
||||||
|
|
||||||
and stores the data in the hdf5 archive.
|
and stores the data in the hdf5 archive.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
|
||||||
# Check if SP, SO and n_k are already in h5
|
# Check if SP, SO and n_k are already in h5
|
||||||
ar = HDFArchive(self.hdf_file, 'r')
|
ar = HDFArchive(self.hdf_file, 'r')
|
||||||
if not (self.dft_subgrp in ar): raise IOError, "convert_transport_input: No %s subgroup in hdf file found! Call convert_dft_input first." %self.dft_subgrp
|
if not (self.dft_subgrp in ar):
|
||||||
|
raise IOError, "convert_transport_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp
|
||||||
SP = ar[self.dft_subgrp]['SP']
|
SP = ar[self.dft_subgrp]['SP']
|
||||||
SO = ar[self.dft_subgrp]['SO']
|
SO = ar[self.dft_subgrp]['SO']
|
||||||
n_k = ar[self.dft_subgrp]['n_k']
|
n_k = ar[self.dft_subgrp]['n_k']
|
||||||
@ -571,20 +648,22 @@ class Wien2kConverter(ConverterTools):
|
|||||||
# velocities_k: velocity (momentum) matrix elements between all bands in band_window_optics
|
# velocities_k: velocity (momentum) matrix elements between all bands in band_window_optics
|
||||||
# and each k-point.
|
# and each k-point.
|
||||||
|
|
||||||
if (SP == 0 or SO == 1):
|
if (SP == 0 or SO == 1):
|
||||||
files = [self.pmat_file]
|
files = [self.pmat_file]
|
||||||
elif SP == 1:
|
elif SP == 1:
|
||||||
files = [self.pmat_file+'up', self.pmat_file+'dn']
|
files = [self.pmat_file + 'up', self.pmat_file + 'dn']
|
||||||
else: # SO and SP can't both be 1
|
else: # SO and SP can't both be 1
|
||||||
assert 0, "convert_transport_input: Reading velocity file error! Check SP and SO!"
|
assert 0, "convert_transport_input: Reading velocity file error! Check SP and SO!"
|
||||||
|
|
||||||
velocities_k = [[] for f in files]
|
velocities_k = [[] for f in files]
|
||||||
band_window_optics = []
|
band_window_optics = []
|
||||||
for isp, f in enumerate(files):
|
for isp, f in enumerate(files):
|
||||||
if not os.path.exists(f) : raise IOError, "convert_transport_input: File %s does not exist" %f
|
if not os.path.exists(f):
|
||||||
mpi.report("Reading input from %s..."%f)
|
raise IOError, "convert_transport_input: File %s does not exist" % f
|
||||||
|
mpi.report("Reading input from %s..." % f)
|
||||||
|
|
||||||
R = ConverterTools.read_fortran_file(self, f, {'D':'E','(':'',')':'',',':' '})
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, f, {'D': 'E', '(': '', ')': '', ',': ' '})
|
||||||
band_window_optics_isp = []
|
band_window_optics_isp = []
|
||||||
for ik in xrange(n_k):
|
for ik in xrange(n_k):
|
||||||
R.next()
|
R.next()
|
||||||
@ -592,26 +671,34 @@ class Wien2kConverter(ConverterTools):
|
|||||||
nu2 = int(R.next())
|
nu2 = int(R.next())
|
||||||
band_window_optics_isp.append((nu1, nu2))
|
band_window_optics_isp.append((nu1, nu2))
|
||||||
n_bands = nu2 - nu1 + 1
|
n_bands = nu2 - nu1 + 1
|
||||||
for _ in range(4): R.next()
|
for _ in range(4):
|
||||||
|
R.next()
|
||||||
if n_bands <= 0:
|
if n_bands <= 0:
|
||||||
velocity_xyz = numpy.zeros((1, 1, 3), dtype = complex)
|
velocity_xyz = numpy.zeros((1, 1, 3), dtype=complex)
|
||||||
else:
|
else:
|
||||||
velocity_xyz = numpy.zeros((n_bands, n_bands, 3), dtype = complex)
|
velocity_xyz = numpy.zeros(
|
||||||
|
(n_bands, n_bands, 3), dtype=complex)
|
||||||
for nu_i in range(n_bands):
|
for nu_i in range(n_bands):
|
||||||
for nu_j in range(nu_i, n_bands):
|
for nu_j in range(nu_i, n_bands):
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
velocity_xyz[nu_i][nu_j][i] = R.next() + R.next()*1j
|
velocity_xyz[nu_i][nu_j][
|
||||||
if (nu_i != nu_j): velocity_xyz[nu_j][nu_i][i] = velocity_xyz[nu_i][nu_j][i].conjugate()
|
i] = R.next() + R.next() * 1j
|
||||||
|
if (nu_i != nu_j):
|
||||||
|
velocity_xyz[nu_j][nu_i][i] = velocity_xyz[
|
||||||
|
nu_i][nu_j][i].conjugate()
|
||||||
velocities_k[isp].append(velocity_xyz)
|
velocities_k[isp].append(velocity_xyz)
|
||||||
band_window_optics.append(numpy.array(band_window_optics_isp))
|
band_window_optics.append(numpy.array(band_window_optics_isp))
|
||||||
R.close() # Reading done!
|
R.close() # Reading done!
|
||||||
|
|
||||||
# Put data to HDF5 file
|
# Put data to HDF5 file
|
||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.transp_subgrp in ar): ar.create_group(self.transp_subgrp)
|
if not (self.transp_subgrp in ar):
|
||||||
# The subgroup containing the data. If it does not exist, it is created. If it exists, the data is overwritten!!!
|
ar.create_group(self.transp_subgrp)
|
||||||
|
# The subgroup containing the data. If it does not exist, it is
|
||||||
|
# created. If it exists, the data is overwritten!!!
|
||||||
things_to_save = ['band_window_optics', 'velocities_k']
|
things_to_save = ['band_window_optics', 'velocities_k']
|
||||||
for it in things_to_save: ar[self.transp_subgrp][it] = locals()[it]
|
for it in things_to_save:
|
||||||
|
ar[self.transp_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
def convert_symmetry_input(self, orbits, symm_file, symm_subgrp, SO, SP):
|
def convert_symmetry_input(self, orbits, symm_file, symm_subgrp, SO, SP):
|
||||||
@ -635,59 +722,70 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
mpi.report("Reading input from %s..."%symm_file)
|
return
|
||||||
|
mpi.report("Reading input from %s..." % symm_file)
|
||||||
|
|
||||||
n_orbits = len(orbits)
|
n_orbits = len(orbits)
|
||||||
|
|
||||||
R = ConverterTools.read_fortran_file(self,symm_file,self.fortran_to_replace)
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, symm_file, self.fortran_to_replace)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
n_symm = int(R.next()) # Number of symmetry operations
|
n_symm = int(R.next()) # Number of symmetry operations
|
||||||
n_atoms = int(R.next()) # number of atoms involved
|
n_atoms = int(R.next()) # number of atoms involved
|
||||||
perm = [ [int(R.next()) for i in range(n_atoms)] for j in range(n_symm) ] # list of permutations of the atoms
|
perm = [[int(R.next()) for i in range(n_atoms)]
|
||||||
if SP:
|
for j in range(n_symm)] # list of permutations of the atoms
|
||||||
time_inv = [ int(R.next()) for j in range(n_symm) ] # time inversion for SO coupling
|
if SP:
|
||||||
|
# time inversion for SO coupling
|
||||||
|
time_inv = [int(R.next()) for j in range(n_symm)]
|
||||||
else:
|
else:
|
||||||
time_inv = [ 0 for j in range(n_symm) ]
|
time_inv = [0 for j in range(n_symm)]
|
||||||
|
|
||||||
# Now read matrices:
|
# Now read matrices:
|
||||||
mat = []
|
mat = []
|
||||||
for i_symm in range(n_symm):
|
for i_symm in range(n_symm):
|
||||||
|
|
||||||
mat.append( [ numpy.zeros([orbits[orb]['dim'], orbits[orb]['dim']],numpy.complex_) for orb in range(n_orbits) ] )
|
mat.append([numpy.zeros([orbits[orb]['dim'], orbits[orb][
|
||||||
|
'dim']], numpy.complex_) for orb in range(n_orbits)])
|
||||||
for orb in range(n_orbits):
|
for orb in range(n_orbits):
|
||||||
for i in range(orbits[orb]['dim']):
|
for i in range(orbits[orb]['dim']):
|
||||||
for j in range(orbits[orb]['dim']):
|
for j in range(orbits[orb]['dim']):
|
||||||
mat[i_symm][orb][i,j] = R.next() # real part
|
# real part
|
||||||
|
mat[i_symm][orb][i, j] = R.next()
|
||||||
for i in range(orbits[orb]['dim']):
|
for i in range(orbits[orb]['dim']):
|
||||||
for j in range(orbits[orb]['dim']):
|
for j in range(orbits[orb]['dim']):
|
||||||
mat[i_symm][orb][i,j] += 1j * R.next() # imaginary part
|
mat[i_symm][orb][i, j] += 1j * \
|
||||||
|
R.next() # imaginary part
|
||||||
|
|
||||||
mat_tinv = [numpy.identity(orbits[orb]['dim'],numpy.complex_)
|
mat_tinv = [numpy.identity(orbits[orb]['dim'], numpy.complex_)
|
||||||
for orb in range(n_orbits)]
|
for orb in range(n_orbits)]
|
||||||
|
|
||||||
if ((SO==0) and (SP==0)):
|
if ((SO == 0) and (SP == 0)):
|
||||||
# here we need an additional time inversion operation, so read it:
|
# here we need an additional time inversion operation, so read
|
||||||
|
# it:
|
||||||
for orb in range(n_orbits):
|
for orb in range(n_orbits):
|
||||||
for i in range(orbits[orb]['dim']):
|
for i in range(orbits[orb]['dim']):
|
||||||
for j in range(orbits[orb]['dim']):
|
for j in range(orbits[orb]['dim']):
|
||||||
mat_tinv[orb][i,j] = R.next() # real part
|
# real part
|
||||||
|
mat_tinv[orb][i, j] = R.next()
|
||||||
for i in range(orbits[orb]['dim']):
|
for i in range(orbits[orb]['dim']):
|
||||||
for j in range(orbits[orb]['dim']):
|
for j in range(orbits[orb]['dim']):
|
||||||
mat_tinv[orb][i,j] += 1j * R.next() # imaginary part
|
mat_tinv[orb][i, j] += 1j * \
|
||||||
|
R.next() # imaginary part
|
||||||
|
|
||||||
|
except StopIteration: # a more explicit error if the file is corrupted.
|
||||||
except StopIteration : # a more explicit error if the file is corrupted.
|
|
||||||
raise "Wien2k_converter : reading file symm_file failed!"
|
raise "Wien2k_converter : reading file symm_file failed!"
|
||||||
|
|
||||||
R.close()
|
R.close()
|
||||||
# Reading done!
|
# Reading done!
|
||||||
|
|
||||||
# Save it to the HDF:
|
# Save it to the HDF:
|
||||||
ar = HDFArchive(self.hdf_file,'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (symm_subgrp in ar): ar.create_group(symm_subgrp)
|
if not (symm_subgrp in ar):
|
||||||
things_to_save = ['n_symm','n_atoms','perm','orbits','SO','SP','time_inv','mat','mat_tinv']
|
ar.create_group(symm_subgrp)
|
||||||
for it in things_to_save: ar[symm_subgrp][it] = locals()[it]
|
things_to_save = ['n_symm', 'n_atoms', 'perm',
|
||||||
|
'orbits', 'SO', 'SP', 'time_inv', 'mat', 'mat_tinv']
|
||||||
|
for it in things_to_save:
|
||||||
|
ar[symm_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,14 +18,16 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
|
|
||||||
import copy,numpy
|
import copy
|
||||||
|
import numpy
|
||||||
from types import *
|
from types import *
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf.local import *
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
|
|
||||||
|
|
||||||
class Symmetry:
|
class Symmetry:
|
||||||
"""
|
"""
|
||||||
This class provides the routines for applying symmetry operations for the k sums.
|
This class provides the routines for applying symmetry operations for the k sums.
|
||||||
@ -33,10 +35,10 @@ class Symmetry:
|
|||||||
rotational matrices for each symmetry operation.
|
rotational matrices for each symmetry operation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, hdf_file, subgroup = None):
|
def __init__(self, hdf_file, subgroup=None):
|
||||||
"""
|
"""
|
||||||
Initialises the class.
|
Initialises the class.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
hdf_file : string
|
hdf_file : string
|
||||||
@ -46,69 +48,80 @@ class Symmetry:
|
|||||||
the data is stored at the root of the hdf5 archive.
|
the data is stored at the root of the hdf5 archive.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert type(hdf_file) == StringType, "Symmetry: hdf_file must be a filename."
|
assert type(
|
||||||
|
hdf_file) == StringType, "Symmetry: hdf_file must be a filename."
|
||||||
self.hdf_file = hdf_file
|
self.hdf_file = hdf_file
|
||||||
things_to_read = ['n_symm','n_atoms','perm','orbits','SO','SP','time_inv','mat','mat_tinv']
|
things_to_read = ['n_symm', 'n_atoms', 'perm',
|
||||||
for it in things_to_read: setattr(self,it,0)
|
'orbits', 'SO', 'SP', 'time_inv', 'mat', 'mat_tinv']
|
||||||
|
for it in things_to_read:
|
||||||
|
setattr(self, it, 0)
|
||||||
|
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
#Read the stuff on master:
|
# Read the stuff on master:
|
||||||
ar = HDFArchive(hdf_file,'r')
|
ar = HDFArchive(hdf_file, 'r')
|
||||||
if subgroup is None:
|
if subgroup is None:
|
||||||
ar2 = ar
|
ar2 = ar
|
||||||
else:
|
else:
|
||||||
ar2 = ar[subgroup]
|
ar2 = ar[subgroup]
|
||||||
|
|
||||||
for it in things_to_read: setattr(self,it,ar2[it])
|
for it in things_to_read:
|
||||||
|
setattr(self, it, ar2[it])
|
||||||
del ar2
|
del ar2
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
# Broadcasting
|
# Broadcasting
|
||||||
for it in things_to_read: setattr(self,it,mpi.bcast(getattr(self,it)))
|
for it in things_to_read:
|
||||||
|
setattr(self, it, mpi.bcast(getattr(self, it)))
|
||||||
|
|
||||||
# now define the mapping of orbitals:
|
# now define the mapping of orbitals:
|
||||||
# self.orb_map[iorb] = jorb gives the permutation of the orbitals as given in the list, when the
|
# self.orb_map[iorb] = jorb gives the permutation of the orbitals as given in the list, when the
|
||||||
# permutation of the atoms is done:
|
# permutation of the atoms is done:
|
||||||
self.n_orbits = len(self.orbits)
|
self.n_orbits = len(self.orbits)
|
||||||
self.orb_map = [ [0 for iorb in range(self.n_orbits)] for i_symm in range(self.n_symm) ]
|
self.orb_map = [[0 for iorb in range(
|
||||||
|
self.n_orbits)] for i_symm in range(self.n_symm)]
|
||||||
for i_symm in range(self.n_symm):
|
for i_symm in range(self.n_symm):
|
||||||
for iorb in range(self.n_orbits):
|
for iorb in range(self.n_orbits):
|
||||||
srch = copy.deepcopy(self.orbits[iorb])
|
srch = copy.deepcopy(self.orbits[iorb])
|
||||||
srch['atom'] = self.perm[i_symm][self.orbits[iorb]['atom']-1]
|
srch['atom'] = self.perm[i_symm][self.orbits[iorb]['atom'] - 1]
|
||||||
self.orb_map[i_symm][iorb] = self.orbits.index(srch)
|
self.orb_map[i_symm][iorb] = self.orbits.index(srch)
|
||||||
|
|
||||||
|
def symmetrize(self, obj):
|
||||||
def symmetrize(self,obj):
|
|
||||||
"""
|
"""
|
||||||
Symmetrizes a given object.
|
Symmetrizes a given object.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
obj : list
|
obj : list
|
||||||
object to symmetrize. It has to be given as list, where its length is determined by the number
|
object to symmetrize. It has to be given as list, where its length is determined by the number
|
||||||
of equivalent members of the object. Two types of objects are supported:
|
of equivalent members of the object. Two types of objects are supported:
|
||||||
|
|
||||||
- BlockGf : list of Green's functions,
|
- BlockGf : list of Green's functions,
|
||||||
- Matrices : The format is taken from density matrices as obtained from Green's functions (DictType).
|
- Matrices : The format is taken from density matrices as obtained from Green's functions (DictType).
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
symm_obj : list
|
symm_obj : list
|
||||||
Symmetrized object, of the same type as input object.
|
Symmetrized object, of the same type as input object.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert isinstance(obj,list), "symmetrize: obj has to be a list of objects."
|
assert isinstance(
|
||||||
assert len(obj) == self.n_orbits, "symmetrize: obj has to be a list of the same length as defined in the init."
|
obj, list), "symmetrize: obj has to be a list of objects."
|
||||||
|
assert len(
|
||||||
|
obj) == self.n_orbits, "symmetrize: obj has to be a list of the same length as defined in the init."
|
||||||
|
|
||||||
if isinstance(obj[0],BlockGf):
|
if isinstance(obj[0], BlockGf):
|
||||||
symm_obj = [ obj[i].copy() for i in range(len(obj)) ] # here the result is stored, it is a BlockGf!
|
# here the result is stored, it is a BlockGf!
|
||||||
for iorb in range(self.n_orbits): symm_obj[iorb].zero() # set to zero
|
symm_obj = [obj[i].copy() for i in range(len(obj))]
|
||||||
|
for iorb in range(self.n_orbits):
|
||||||
|
symm_obj[iorb].zero() # set to zero
|
||||||
else:
|
else:
|
||||||
# if not a BlockGf, we assume it is a matrix (density matrix), has to be complex since self.mat is complex!
|
# if not a BlockGf, we assume it is a matrix (density matrix), has
|
||||||
symm_obj = [ copy.deepcopy(obj[i]) for i in range(len(obj)) ]
|
# to be complex since self.mat is complex!
|
||||||
|
symm_obj = [copy.deepcopy(obj[i]) for i in range(len(obj))]
|
||||||
for iorb in range(self.n_orbits):
|
for iorb in range(self.n_orbits):
|
||||||
if type(symm_obj[iorb]) == DictType:
|
if type(symm_obj[iorb]) == DictType:
|
||||||
for ii in symm_obj[iorb]: symm_obj[iorb][ii] *= 0.0
|
for ii in symm_obj[iorb]:
|
||||||
|
symm_obj[iorb][ii] *= 0.0
|
||||||
else:
|
else:
|
||||||
symm_obj[iorb] *= 0.0
|
symm_obj[iorb] *= 0.0
|
||||||
|
|
||||||
@ -118,12 +131,15 @@ class Symmetry:
|
|||||||
dim = self.orbits[iorb]['dim']
|
dim = self.orbits[iorb]['dim']
|
||||||
jorb = self.orb_map[i_symm][iorb]
|
jorb = self.orb_map[i_symm][iorb]
|
||||||
|
|
||||||
if isinstance(obj[0],BlockGf):
|
if isinstance(obj[0], BlockGf):
|
||||||
|
|
||||||
tmp = obj[iorb].copy()
|
tmp = obj[iorb].copy()
|
||||||
if self.time_inv[i_symm]: tmp << tmp.transpose()
|
if self.time_inv[i_symm]:
|
||||||
for bname,gf in tmp: tmp[bname].from_L_G_R(self.mat[i_symm][iorb],tmp[bname],self.mat[i_symm][iorb].conjugate().transpose())
|
tmp << tmp.transpose()
|
||||||
tmp *= 1.0/self.n_symm
|
for bname, gf in tmp:
|
||||||
|
tmp[bname].from_L_G_R(self.mat[i_symm][iorb], tmp[bname], self.mat[
|
||||||
|
i_symm][iorb].conjugate().transpose())
|
||||||
|
tmp *= 1.0 / self.n_symm
|
||||||
symm_obj[jorb] += tmp
|
symm_obj[jorb] += tmp
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -131,17 +147,17 @@ class Symmetry:
|
|||||||
if type(obj[iorb]) == DictType:
|
if type(obj[iorb]) == DictType:
|
||||||
for ii in obj[iorb]:
|
for ii in obj[iorb]:
|
||||||
if self.time_inv[i_symm] == 0:
|
if self.time_inv[i_symm] == 0:
|
||||||
symm_obj[jorb][ii] += numpy.dot(numpy.dot(self.mat[i_symm][iorb],obj[iorb][ii]),
|
symm_obj[jorb][ii] += numpy.dot(numpy.dot(self.mat[i_symm][iorb], obj[iorb][ii]),
|
||||||
self.mat[i_symm][iorb].conjugate().transpose()) / self.n_symm
|
self.mat[i_symm][iorb].conjugate().transpose()) / self.n_symm
|
||||||
else:
|
else:
|
||||||
symm_obj[jorb][ii] += numpy.dot(numpy.dot(self.mat[i_symm][iorb],obj[iorb][ii].conjugate()),
|
symm_obj[jorb][ii] += numpy.dot(numpy.dot(self.mat[i_symm][iorb], obj[iorb][ii].conjugate()),
|
||||||
self.mat[i_symm][iorb].conjugate().transpose()) / self.n_symm
|
self.mat[i_symm][iorb].conjugate().transpose()) / self.n_symm
|
||||||
else:
|
else:
|
||||||
if self.time_inv[i_symm] == 0:
|
if self.time_inv[i_symm] == 0:
|
||||||
symm_obj[jorb] += numpy.dot(numpy.dot(self.mat[i_symm][iorb],obj[iorb]),
|
symm_obj[jorb] += numpy.dot(numpy.dot(self.mat[i_symm][iorb], obj[iorb]),
|
||||||
self.mat[i_symm][iorb].conjugate().transpose()) / self.n_symm
|
self.mat[i_symm][iorb].conjugate().transpose()) / self.n_symm
|
||||||
else:
|
else:
|
||||||
symm_obj[jorb] += numpy.dot(numpy.dot(self.mat[i_symm][iorb],obj[iorb].conjugate()),
|
symm_obj[jorb] += numpy.dot(numpy.dot(self.mat[i_symm][iorb], obj[iorb].conjugate()),
|
||||||
self.mat[i_symm][iorb].conjugate().transpose()) / self.n_symm
|
self.mat[i_symm][iorb].conjugate().transpose()) / self.n_symm
|
||||||
|
|
||||||
# Markus: This does not what it is supposed to do, check how this should work (keep for now)
|
# Markus: This does not what it is supposed to do, check how this should work (keep for now)
|
||||||
|
@ -6,6 +6,7 @@ import pytriqs.utility.mpi as mpi
|
|||||||
import numpy
|
import numpy
|
||||||
import copy
|
import copy
|
||||||
|
|
||||||
|
|
||||||
class TransBasis:
|
class TransBasis:
|
||||||
"""
|
"""
|
||||||
Computates rotations into a new basis, using the condition that a given property is diagonal in the new basis.
|
Computates rotations into a new basis, using the condition that a given property is diagonal in the new basis.
|
||||||
@ -14,19 +15,19 @@ class TransBasis:
|
|||||||
def __init__(self, SK=None, hdf_datafile=None):
|
def __init__(self, SK=None, hdf_datafile=None):
|
||||||
"""
|
"""
|
||||||
Initialization of the class. There are two ways to do so:
|
Initialization of the class. There are two ways to do so:
|
||||||
|
|
||||||
- existing SumkLDA class : when you have an existing SumkLDA instance
|
- existing SumkLDA class : when you have an existing SumkLDA instance
|
||||||
- from hdf5 archive : when you want to use data from hdf5 archive
|
- from hdf5 archive : when you want to use data from hdf5 archive
|
||||||
|
|
||||||
Giving the class instance overrides giving the string for the hdf5 archive.
|
Giving the class instance overrides giving the string for the hdf5 archive.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
SK : class SumkLDA, optional
|
SK : class SumkLDA, optional
|
||||||
Existing instance of SumkLDA class.
|
Existing instance of SumkLDA class.
|
||||||
hdf5_datafile : string, optional
|
hdf5_datafile : string, optional
|
||||||
Name of hdf5 archive to be used.
|
Name of hdf5 archive to be used.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if SK is None:
|
if SK is None:
|
||||||
@ -35,68 +36,70 @@ class TransBasis:
|
|||||||
mpi.report("trans_basis: give SK instance or HDF filename!")
|
mpi.report("trans_basis: give SK instance or HDF filename!")
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
Converter = Wien2kConverter(filename=hdf_datafile,repacking=False)
|
Converter = Wien2kConverter(filename=hdf_datafile, repacking=False)
|
||||||
Converter.convert_dft_input()
|
Converter.convert_dft_input()
|
||||||
del Converter
|
del Converter
|
||||||
|
|
||||||
self.SK = SumkDFT(hdf_file=hdf_datafile+'.h5',use_dft_blocks=False)
|
self.SK = SumkDFT(hdf_file=hdf_datafile +
|
||||||
|
'.h5', use_dft_blocks=False)
|
||||||
else:
|
else:
|
||||||
self.SK = SK
|
self.SK = SK
|
||||||
|
|
||||||
self.T = copy.deepcopy(self.SK.T[0])
|
self.T = copy.deepcopy(self.SK.T[0])
|
||||||
self.w = numpy.identity(SK.corr_shells[0]['dim'])
|
self.w = numpy.identity(SK.corr_shells[0]['dim'])
|
||||||
|
|
||||||
|
def calculate_diagonalisation_matrix(self, prop_to_be_diagonal='eal'):
|
||||||
def calculate_diagonalisation_matrix(self, prop_to_be_diagonal = 'eal'):
|
|
||||||
"""
|
"""
|
||||||
Calculates the diagonalisation matrix w, and stores it as member of the class.
|
Calculates the diagonalisation matrix w, and stores it as member of the class.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
prop_to_be_diagonal : string, optional
|
prop_to_be_diagonal : string, optional
|
||||||
Defines the property to be diagonalized.
|
Defines the property to be diagonalized.
|
||||||
|
|
||||||
- 'eal' : local hamiltonian (i.e. crystal field)
|
- 'eal' : local hamiltonian (i.e. crystal field)
|
||||||
- 'dm' : local density matrix
|
- 'dm' : local density matrix
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
wsqr : double
|
wsqr : double
|
||||||
Measure for the degree of rotation done by the diagonalisation. wsqr=1 means no rotation.
|
Measure for the degree of rotation done by the diagonalisation. wsqr=1 means no rotation.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if prop_to_be_diagonal == 'eal':
|
if prop_to_be_diagonal == 'eal':
|
||||||
prop = self.SK.eff_atomic_levels()[0]
|
prop = self.SK.eff_atomic_levels()[0]
|
||||||
elif prop_to_be_diagonal == 'dm':
|
elif prop_to_be_diagonal == 'dm':
|
||||||
prop = self.SK.density_matrix(method = 'using_point_integration')[0]
|
prop = self.SK.density_matrix(method='using_point_integration')[0]
|
||||||
else:
|
else:
|
||||||
mpi.report("trans_basis: not a valid quantitiy to be diagonal. Choices are 'eal' or 'dm'.")
|
mpi.report(
|
||||||
|
"trans_basis: not a valid quantitiy to be diagonal. Choices are 'eal' or 'dm'.")
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
if self.SK.SO == 0:
|
if self.SK.SO == 0:
|
||||||
self.eig,self.w = numpy.linalg.eigh(prop['up'])
|
self.eig, self.w = numpy.linalg.eigh(prop['up'])
|
||||||
# calculate new Transformation matrix
|
# calculate new Transformation matrix
|
||||||
self.T = numpy.dot(self.T.transpose().conjugate(),self.w).conjugate().transpose()
|
self.T = numpy.dot(self.T.transpose().conjugate(),
|
||||||
|
self.w).conjugate().transpose()
|
||||||
else:
|
else:
|
||||||
self.eig,self.w = numpy.linalg.eigh(prop['ud'])
|
self.eig, self.w = numpy.linalg.eigh(prop['ud'])
|
||||||
# calculate new Transformation matrix
|
# calculate new Transformation matrix
|
||||||
self.T = numpy.dot(self.T.transpose().conjugate(),self.w).conjugate().transpose()
|
self.T = numpy.dot(self.T.transpose().conjugate(),
|
||||||
|
self.w).conjugate().transpose()
|
||||||
|
|
||||||
# measure for the 'unity' of the transformation:
|
# measure for the 'unity' of the transformation:
|
||||||
wsqr = sum(abs(self.w.diagonal())**2)/self.w.diagonal().size
|
wsqr = sum(abs(self.w.diagonal())**2) / self.w.diagonal().size
|
||||||
return wsqr
|
return wsqr
|
||||||
|
|
||||||
|
def rotate_gf(self, gf_to_rot):
|
||||||
def rotate_gf(self,gf_to_rot):
|
|
||||||
"""
|
"""
|
||||||
Uses the diagonalisation matrix w to rotate a given GF into the new basis.
|
Uses the diagonalisation matrix w to rotate a given GF into the new basis.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
gf_to_rot : BlockGf
|
gf_to_rot : BlockGf
|
||||||
Green's function block to rotate.
|
Green's function block to rotate.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
gfreturn : BlockGf
|
gfreturn : BlockGf
|
||||||
@ -104,86 +107,90 @@ class TransBasis:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# build a full GF
|
# build a full GF
|
||||||
gfrotated = BlockGf( name_block_generator = [ (block,GfImFreq(indices = inner, mesh = gf_to_rot.mesh)) for block,inner in self.SK.gf_struct_sumk[0] ], make_copies = False)
|
gfrotated = BlockGf(name_block_generator=[(block, GfImFreq(
|
||||||
|
indices=inner, mesh=gf_to_rot.mesh)) for block, inner in self.SK.gf_struct_sumk[0]], make_copies=False)
|
||||||
|
|
||||||
# transform the CTQMC blocks to the full matrix:
|
# transform the CTQMC blocks to the full matrix:
|
||||||
ish = self.SK.corr_to_inequiv[0] # ish is the index of the inequivalent shell corresponding to icrsh
|
# ish is the index of the inequivalent shell corresponding to icrsh
|
||||||
|
ish = self.SK.corr_to_inequiv[0]
|
||||||
for block, inner in self.gf_struct_solver[ish].iteritems():
|
for block, inner in self.gf_struct_solver[ish].iteritems():
|
||||||
for ind1 in inner:
|
for ind1 in inner:
|
||||||
for ind2 in inner:
|
for ind2 in inner:
|
||||||
gfrotated[self.SK.solver_to_sumk_block[ish][block]][ind1,ind2] << gf_to_rot[block][ind1,ind2]
|
gfrotated[self.SK.solver_to_sumk_block[ish][block]][
|
||||||
|
ind1, ind2] << gf_to_rot[block][ind1, ind2]
|
||||||
|
|
||||||
# Rotate using the matrix w
|
# Rotate using the matrix w
|
||||||
for bname,gf in gfrotated:
|
for bname, gf in gfrotated:
|
||||||
gfrotated[bname].from_L_G_R(self.w.transpose().conjugate(),gfrotated[bname],self.w)
|
gfrotated[bname].from_L_G_R(
|
||||||
|
self.w.transpose().conjugate(), gfrotated[bname], self.w)
|
||||||
|
|
||||||
gfreturn = gf_to_rot.copy()
|
gfreturn = gf_to_rot.copy()
|
||||||
# Put back into CTQMC basis:
|
# Put back into CTQMC basis:
|
||||||
for block, inner in self.gf_struct_solver[ish].iteritems():
|
for block, inner in self.gf_struct_solver[ish].iteritems():
|
||||||
for ind1 in inner:
|
for ind1 in inner:
|
||||||
for ind2 in inner:
|
for ind2 in inner:
|
||||||
gfreturn[block][ind1,ind2] << gfrotated[self.SK.solver_to_sumk_block[0][block]][ind1,ind2]
|
gfreturn[block][ind1, ind2] << gfrotated[
|
||||||
|
self.SK.solver_to_sumk_block[0][block]][ind1, ind2]
|
||||||
|
|
||||||
return gfreturn
|
return gfreturn
|
||||||
|
|
||||||
|
|
||||||
def write_trans_file(self, filename):
|
def write_trans_file(self, filename):
|
||||||
"""
|
"""
|
||||||
Writes the new transformation T into a file readable by dmftproj. By that, the requested quantity is
|
Writes the new transformation T into a file readable by dmftproj. By that, the requested quantity is
|
||||||
diagonal already at input.
|
diagonal already at input.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
filename : string
|
filename : string
|
||||||
Name of the file where the transformation is stored.
|
Name of the file where the transformation is stored.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
f = open(filename,'w')
|
f = open(filename, 'w')
|
||||||
Tnew = self.T.conjugate()
|
Tnew = self.T.conjugate()
|
||||||
dim = self.SK.corr_shells[0]['dim']
|
dim = self.SK.corr_shells[0]['dim']
|
||||||
|
|
||||||
if self.SK.SO == 0:
|
if self.SK.SO == 0:
|
||||||
|
|
||||||
for i in range(dim):
|
for i in range(dim):
|
||||||
st = ''
|
st = ''
|
||||||
for k in range(dim):
|
for k in range(dim):
|
||||||
st += " %9.6f"%(Tnew[i,k].real)
|
st += " %9.6f" % (Tnew[i, k].real)
|
||||||
st += " %9.6f"%(Tnew[i,k].imag)
|
st += " %9.6f" % (Tnew[i, k].imag)
|
||||||
for k in range(2*dim):
|
for k in range(2 * dim):
|
||||||
st += " 0.0"
|
st += " 0.0"
|
||||||
|
|
||||||
if i < (dim-1):
|
if i < (dim - 1):
|
||||||
f.write("%s\n"%(st))
|
f.write("%s\n" % (st))
|
||||||
else:
|
else:
|
||||||
st1 = st.replace(' ','*',1)
|
st1 = st.replace(' ', '*', 1)
|
||||||
f.write("%s\n"%(st1))
|
f.write("%s\n" % (st1))
|
||||||
|
|
||||||
for i in range(dim):
|
for i in range(dim):
|
||||||
st = ''
|
st = ''
|
||||||
for k in range(2*dim):
|
for k in range(2 * dim):
|
||||||
st += " 0.0"
|
st += " 0.0"
|
||||||
for k in range(dim):
|
for k in range(dim):
|
||||||
st += " %9.6f"%(Tnew[i,k].real)
|
st += " %9.6f" % (Tnew[i, k].real)
|
||||||
st += " %9.6f"%(Tnew[i,k].imag)
|
st += " %9.6f" % (Tnew[i, k].imag)
|
||||||
|
|
||||||
if i < (dim-1):
|
if i < (dim - 1):
|
||||||
f.write("%s\n"%(st))
|
f.write("%s\n" % (st))
|
||||||
else:
|
else:
|
||||||
st1 = st.replace(' ','*',1)
|
st1 = st.replace(' ', '*', 1)
|
||||||
f.write("%s\n"%(st1))
|
f.write("%s\n" % (st1))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
||||||
for i in range(dim):
|
for i in range(dim):
|
||||||
st = ''
|
st = ''
|
||||||
for k in range(dim):
|
for k in range(dim):
|
||||||
st += " %9.6f"%(Tnew[i,k].real)
|
st += " %9.6f" % (Tnew[i, k].real)
|
||||||
st += " %9.6f"%(Tnew[i,k].imag)
|
st += " %9.6f" % (Tnew[i, k].imag)
|
||||||
|
|
||||||
if i < (dim-1):
|
if i < (dim - 1):
|
||||||
f.write("%s\n"%(st))
|
f.write("%s\n" % (st))
|
||||||
else:
|
else:
|
||||||
st1 = st.replace(' ','*',1)
|
st1 = st.replace(' ', '*', 1)
|
||||||
f.write("%s\n"%(st1))
|
f.write("%s\n" % (st1))
|
||||||
|
|
||||||
f.close()
|
f.close()
|
||||||
|
@ -5,8 +5,8 @@ import numpy
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
if len(sys.argv) < 2:
|
if len(sys.argv) < 2:
|
||||||
print "Usage: python update_archive.py old_archive [v1.0|v1.2]"
|
print "Usage: python update_archive.py old_archive [v1.0|v1.2]"
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
print """
|
print """
|
||||||
This script is an attempt to update your archive to TRIQS 1.2.
|
This script is an attempt to update your archive to TRIQS 1.2.
|
||||||
@ -15,13 +15,16 @@ Please keep a copy of your old archive as this script is
|
|||||||
If you encounter any problem please report it on github!
|
If you encounter any problem please report it on github!
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def convert_shells(shells):
|
def convert_shells(shells):
|
||||||
shell_entries = ['atom', 'sort', 'l', 'dim']
|
shell_entries = ['atom', 'sort', 'l', 'dim']
|
||||||
return [ {name: int(val) for name, val in zip(shell_entries, shells[ish])} for ish in range(len(shells)) ]
|
return [{name: int(val) for name, val in zip(shell_entries, shells[ish])} for ish in range(len(shells))]
|
||||||
|
|
||||||
|
|
||||||
def convert_corr_shells(corr_shells):
|
def convert_corr_shells(corr_shells):
|
||||||
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
||||||
return [ {name: int(val) for name, val in zip(corr_shell_entries, corr_shells[icrsh])} for icrsh in range(len(corr_shells)) ]
|
return [{name: int(val) for name, val in zip(corr_shell_entries, corr_shells[icrsh])} for icrsh in range(len(corr_shells))]
|
||||||
|
|
||||||
|
|
||||||
def det_shell_equivalence(corr_shells):
|
def det_shell_equivalence(corr_shells):
|
||||||
corr_to_inequiv = [0 for i in range(len(corr_shells))]
|
corr_to_inequiv = [0 for i in range(len(corr_shells))]
|
||||||
@ -29,20 +32,20 @@ def det_shell_equivalence(corr_shells):
|
|||||||
n_inequiv_shells = 1
|
n_inequiv_shells = 1
|
||||||
|
|
||||||
if len(corr_shells) > 1:
|
if len(corr_shells) > 1:
|
||||||
inequiv_sort = [ corr_shells[0]['sort'] ]
|
inequiv_sort = [corr_shells[0]['sort']]
|
||||||
inequiv_l = [ corr_shells[0]['l'] ]
|
inequiv_l = [corr_shells[0]['l']]
|
||||||
for i in range(len(corr_shells)-1):
|
for i in range(len(corr_shells) - 1):
|
||||||
is_equiv = False
|
is_equiv = False
|
||||||
for j in range(n_inequiv_shells):
|
for j in range(n_inequiv_shells):
|
||||||
if (inequiv_sort[j]==corr_shells[i+1]['sort']) and (inequiv_l[j]==corr_shells[i+1]['l']):
|
if (inequiv_sort[j] == corr_shells[i + 1]['sort']) and (inequiv_l[j] == corr_shells[i + 1]['l']):
|
||||||
is_equiv = True
|
is_equiv = True
|
||||||
corr_to_inequiv[i+1] = j
|
corr_to_inequiv[i + 1] = j
|
||||||
if is_equiv==False:
|
if is_equiv == False:
|
||||||
corr_to_inequiv[i+1] = n_inequiv_shells
|
corr_to_inequiv[i + 1] = n_inequiv_shells
|
||||||
n_inequiv_shells += 1
|
n_inequiv_shells += 1
|
||||||
inequiv_sort.append( corr_shells[i+1]['sort'] )
|
inequiv_sort.append(corr_shells[i + 1]['sort'])
|
||||||
inequiv_l.append( corr_shells[i+1]['l'] )
|
inequiv_l.append(corr_shells[i + 1]['l'])
|
||||||
inequiv_to_corr.append( i+1 )
|
inequiv_to_corr.append(i + 1)
|
||||||
|
|
||||||
return n_inequiv_shells, corr_to_inequiv, inequiv_to_corr
|
return n_inequiv_shells, corr_to_inequiv, inequiv_to_corr
|
||||||
|
|
||||||
@ -50,48 +53,50 @@ def det_shell_equivalence(corr_shells):
|
|||||||
### Main ###
|
### Main ###
|
||||||
|
|
||||||
filename = sys.argv[1]
|
filename = sys.argv[1]
|
||||||
if len(sys.argv) > 2:
|
if len(sys.argv) > 2:
|
||||||
from_v = sys.argv[2]
|
from_v = sys.argv[2]
|
||||||
else: # Assume updating an old v1.0 script
|
else: # Assume updating an old v1.0 script
|
||||||
from_v = 'v1.0'
|
from_v = 'v1.0'
|
||||||
A = h5py.File(filename)
|
A = h5py.File(filename)
|
||||||
|
|
||||||
# Rename groups
|
# Rename groups
|
||||||
old_to_new = {'SumK_LDA':'dft_input', 'SumK_LDA_ParProj':'dft_parproj_input',
|
old_to_new = {'SumK_LDA': 'dft_input', 'SumK_LDA_ParProj': 'dft_parproj_input',
|
||||||
'SymmCorr':'dft_symmcorr_input', 'SymmPar':'dft_symmpar_input', 'SumK_LDA_Bands':'dft_bands_input'}
|
'SymmCorr': 'dft_symmcorr_input', 'SymmPar': 'dft_symmpar_input', 'SumK_LDA_Bands': 'dft_bands_input'}
|
||||||
|
|
||||||
for old, new in old_to_new.iteritems():
|
for old, new in old_to_new.iteritems():
|
||||||
if old not in A.keys(): continue
|
if old not in A.keys():
|
||||||
print "Changing %s to %s ..."%(old, new)
|
continue
|
||||||
A.copy(old,new)
|
print "Changing %s to %s ..." % (old, new)
|
||||||
|
A.copy(old, new)
|
||||||
del(A[old])
|
del(A[old])
|
||||||
|
|
||||||
# Move output items from dft_input to user_data
|
# Move output items from dft_input to user_data
|
||||||
move_to_output = ['chemical_potential','dc_imp','dc_energ']
|
move_to_output = ['chemical_potential', 'dc_imp', 'dc_energ']
|
||||||
for obj in move_to_output:
|
for obj in move_to_output:
|
||||||
if obj in A['dft_input'].keys():
|
if obj in A['dft_input'].keys():
|
||||||
if 'user_data' not in A: A.create_group('user_data')
|
if 'user_data' not in A:
|
||||||
print "Moving %s to user_data ..."%obj
|
A.create_group('user_data')
|
||||||
A.copy('dft_input/'+obj,'user_data/'+obj)
|
print "Moving %s to user_data ..." % obj
|
||||||
del(A['dft_input'][obj])
|
A.copy('dft_input/' + obj, 'user_data/' + obj)
|
||||||
|
del(A['dft_input'][obj])
|
||||||
# Delete obsolete quantities
|
# Delete obsolete quantities
|
||||||
to_delete = ['gf_struct_solver','map_inv','map','deg_shells','h_field']
|
to_delete = ['gf_struct_solver', 'map_inv', 'map', 'deg_shells', 'h_field']
|
||||||
for obj in to_delete:
|
for obj in to_delete:
|
||||||
if obj in A['dft_input'].keys():
|
if obj in A['dft_input'].keys():
|
||||||
del(A['dft_input'][obj])
|
del(A['dft_input'][obj])
|
||||||
|
|
||||||
if from_v == 'v1.0':
|
if from_v == 'v1.0':
|
||||||
# Update shells and corr_shells to list of dicts
|
# Update shells and corr_shells to list of dicts
|
||||||
shells_old = HDFArchive(filename,'r')['dft_input']['shells']
|
shells_old = HDFArchive(filename, 'r')['dft_input']['shells']
|
||||||
corr_shells_old = HDFArchive(filename,'r')['dft_input']['corr_shells']
|
corr_shells_old = HDFArchive(filename, 'r')['dft_input']['corr_shells']
|
||||||
shells = convert_shells(shells_old)
|
shells = convert_shells(shells_old)
|
||||||
corr_shells = convert_corr_shells(corr_shells_old)
|
corr_shells = convert_corr_shells(corr_shells_old)
|
||||||
del(A['dft_input']['shells'])
|
del(A['dft_input']['shells'])
|
||||||
del(A['dft_input']['corr_shells'])
|
del(A['dft_input']['corr_shells'])
|
||||||
A.close()
|
A.close()
|
||||||
# Need to use HDFArchive for the following
|
# Need to use HDFArchive for the following
|
||||||
HDFArchive(filename,'a')['dft_input']['shells'] = shells
|
HDFArchive(filename, 'a')['dft_input']['shells'] = shells
|
||||||
HDFArchive(filename,'a')['dft_input']['corr_shells'] = corr_shells
|
HDFArchive(filename, 'a')['dft_input']['corr_shells'] = corr_shells
|
||||||
A = h5py.File(filename)
|
A = h5py.File(filename)
|
||||||
|
|
||||||
# Add shell equivalency quantities
|
# Add shell equivalency quantities
|
||||||
@ -102,32 +107,36 @@ if 'n_inequiv_shells' not in A['dft_input']:
|
|||||||
A['dft_input']['inequiv_to_corr'] = equiv_shell_info[2]
|
A['dft_input']['inequiv_to_corr'] = equiv_shell_info[2]
|
||||||
|
|
||||||
# Rename variables
|
# Rename variables
|
||||||
groups = ['dft_symmcorr_input','dft_symmpar_input']
|
groups = ['dft_symmcorr_input', 'dft_symmpar_input']
|
||||||
for group in groups:
|
for group in groups:
|
||||||
if group not in A.keys(): continue
|
if group not in A.keys():
|
||||||
if 'n_s' not in A[group]: continue
|
continue
|
||||||
|
if 'n_s' not in A[group]:
|
||||||
|
continue
|
||||||
print "Changing n_s to n_symm ..."
|
print "Changing n_s to n_symm ..."
|
||||||
A[group].move('n_s','n_symm')
|
A[group].move('n_s', 'n_symm')
|
||||||
# Convert orbits to list of dicts
|
# Convert orbits to list of dicts
|
||||||
orbits_old = HDFArchive(filename,'r')[group]['orbits']
|
orbits_old = HDFArchive(filename, 'r')[group]['orbits']
|
||||||
orbits = convert_corr_shells(orbits_old)
|
orbits = convert_corr_shells(orbits_old)
|
||||||
del(A[group]['orbits'])
|
del(A[group]['orbits'])
|
||||||
A.close()
|
A.close()
|
||||||
HDFArchive(filename,'a')[group]['orbits'] = orbits
|
HDFArchive(filename, 'a')[group]['orbits'] = orbits
|
||||||
A = h5py.File(filename)
|
A = h5py.File(filename)
|
||||||
|
|
||||||
groups = ['dft_parproj_input']
|
groups = ['dft_parproj_input']
|
||||||
for group in groups:
|
for group in groups:
|
||||||
if group not in A.keys(): continue
|
if group not in A.keys():
|
||||||
if 'proj_mat_pc' not in A[group]: continue
|
continue
|
||||||
|
if 'proj_mat_pc' not in A[group]:
|
||||||
|
continue
|
||||||
print "Changing proj_mat_pc to proj_mat_all ..."
|
print "Changing proj_mat_pc to proj_mat_all ..."
|
||||||
A[group].move('proj_mat_pc','proj_mat_all')
|
A[group].move('proj_mat_pc', 'proj_mat_all')
|
||||||
|
|
||||||
A.close()
|
A.close()
|
||||||
|
|
||||||
# Repack to reclaim disk space
|
# Repack to reclaim disk space
|
||||||
retcode = subprocess.call(["h5repack","-i%s"%filename, "-otemphgfrt.h5"])
|
retcode = subprocess.call(["h5repack", "-i%s" % filename, "-otemphgfrt.h5"])
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
print "h5repack failed!"
|
print "h5repack failed!"
|
||||||
else:
|
else:
|
||||||
subprocess.call(["mv","-f","temphgfrt.h5","%s"%filename])
|
subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % filename])
|
||||||
|
Loading…
Reference in New Issue
Block a user