mirror of
https://github.com/triqs/dft_tools
synced 2024-11-18 12:03:50 +01:00
[doc] fixes for Vasp interface and general doc fix
This commit is contained in:
parent
4e9ce51a0f
commit
b30559a212
@ -118,8 +118,10 @@ echo " Number of iterations with fixed density: $NDFTITER"
|
|||||||
echo " VASP version: $VASP_VERSION"
|
echo " VASP version: $VASP_VERSION"
|
||||||
echo " Script name: $DMFT_SCRIPT"
|
echo " Script name: $DMFT_SCRIPT"
|
||||||
|
|
||||||
rm -f vasp.lock
|
rm -f vasp.lock STOPCAR
|
||||||
stdbuf -o 0 $MPIRUN_CMD -np $NPROC "$VASP_DIR" &
|
# run in serial and use OMP_NUM_THREADS here for vasp >=6.2
|
||||||
|
# otherwise set to -np $NPROC
|
||||||
|
stdbuf -o 0 $MPIRUN_CMD -np 1 "$VASP_DIR" &
|
||||||
|
|
||||||
|
|
||||||
$MPIRUN_CMD -np $NPROC @TRIQS_PYTHON_EXECUTABLE@ -m triqs_dft_tools.converters.plovasp.sc_dmft $(jobs -p) $NITER $NDFTITER $DMFT_SCRIPT 'plo.cfg' $VASP_VERSION || kill %1
|
$MPIRUN_CMD -np $NPROC @TRIQS_PYTHON_EXECUTABLE@ -m triqs_dft_tools.converters.plovasp.sc_dmft $(jobs -p) $NITER $NDFTITER $DMFT_SCRIPT 'plo.cfg' $VASP_VERSION || kill %1
|
||||||
|
@ -76,6 +76,6 @@ endif()
|
|||||||
# ---------------------------------
|
# ---------------------------------
|
||||||
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html/ COMPONENT documentation DESTINATION share/doc/${PROJECT_NAME}
|
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html/ COMPONENT documentation DESTINATION share/doc/${PROJECT_NAME}
|
||||||
FILES_MATCHING
|
FILES_MATCHING
|
||||||
REGEX "\\.(html|pdf|png|gif|jpg|svg|ico|js|xsl|css|py|txt|inv|bib|ttf|woff2|eot)$"
|
REGEX "\\.(html|pdf|png|gif|jpg|svg|ico|js|xsl|css|py|txt|inv|bib|ttf|woff2|eot|INCAR|POSCAR|cfg|KPOINTS)$"
|
||||||
PATTERN "_*"
|
PATTERN "_*"
|
||||||
)
|
)
|
||||||
|
@ -130,7 +130,7 @@ follows:
|
|||||||
:math:`G_{mn}(i\omega)` and the self energy
|
:math:`G_{mn}(i\omega)` and the self energy
|
||||||
:math:`\Sigma_{mn}(i\omega)`. For the details of how to do
|
:math:`\Sigma_{mn}(i\omega)`. For the details of how to do
|
||||||
this in practice, we refer to the documentation of one of the
|
this in practice, we refer to the documentation of one of the
|
||||||
Solver applications, for instance the :ref:`CTHYB solver <triqscthyb:welcome>`.
|
Solver applications, for instance the `CTHYB solver <https://triqs.github.io/cthyb/>`_.
|
||||||
|
|
||||||
#. The self energy, written in orbital space, has to be corrected by
|
#. The self energy, written in orbital space, has to be corrected by
|
||||||
the double counting correction, and upfolded into Bloch band space:
|
the double counting correction, and upfolded into Bloch band space:
|
||||||
|
@ -43,8 +43,8 @@ of results on their meaningfulness is the responsibility of the user.
|
|||||||
The :program:`DFTTools` package is a toolbox and **not** a black box!
|
The :program:`DFTTools` package is a toolbox and **not** a black box!
|
||||||
|
|
||||||
|
|
||||||
Learn how to use :ref:`TRIQS <triqslibs:welcome>` (and the :ref:`CTHYB <triqscthyb:welcome>` solver)
|
Learn how to use `TRIQS library <https://triqs.github.io/triqs/>`_ (and the `CTHYB <https://triqs.github.io/cthyb/>`_ solver)
|
||||||
----------------------------------------------------------------------------------------------------
|
-----------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
As :program:`DFTTools` is a :ref:`TRIQS <triqslibs:welcome>` based application
|
As :program:`DFTTools` is a :ref:`TRIQS <triqslibs:welcome>` based application
|
||||||
it is beneficial to invest a few hours to become familiar with
|
it is beneficial to invest a few hours to become familiar with
|
||||||
@ -54,7 +54,7 @@ the most important aspects of :ref:`TRIQS <triqslibs:welcome>`. We recommend
|
|||||||
downloading our hands-on training in the form of ipython notebooks from
|
downloading our hands-on training in the form of ipython notebooks from
|
||||||
the `tutorials repository on GitHub <https://github.com/TRIQS/tutorials>`_.
|
the `tutorials repository on GitHub <https://github.com/TRIQS/tutorials>`_.
|
||||||
Tutorials 1 to 6 are on the :ref:`TRIQS <triqslibs:welcome>` library, whereas tutorials
|
Tutorials 1 to 6 are on the :ref:`TRIQS <triqslibs:welcome>` library, whereas tutorials
|
||||||
7 and 8 are more specific to the usage of the :ref:`CTHYB <triqscthyb:welcome>`
|
7 and 8 are more specific to the usage of the `CTHYB solver <https://triqs.github.io/cthyb/>`_
|
||||||
hybridization-expansion solver. In general, those tutorials will take at least a full day to finish.
|
hybridization-expansion solver. In general, those tutorials will take at least a full day to finish.
|
||||||
|
|
||||||
Afterwards you can continue with the :ref:`DFTTools user guide <documentation>`.
|
Afterwards you can continue with the :ref:`DFTTools user guide <documentation>`.
|
||||||
|
@ -160,18 +160,26 @@ which takes care of the process management. The user must, however, specify a pa
|
|||||||
The user-provided script is almost the same as for Wien2k charge self-consistent
|
The user-provided script is almost the same as for Wien2k charge self-consistent
|
||||||
calculations with the main difference that its functionality (apart from the
|
calculations with the main difference that its functionality (apart from the
|
||||||
lines importing other modules) should be placed inside a function `dmft_cycle()`
|
lines importing other modules) should be placed inside a function `dmft_cycle()`
|
||||||
which will be called every DMFT cycle.
|
which will be called every DMFT cycle and returns both the correlation energy and the SumK object.
|
||||||
|
|
||||||
VASP has a special INCAR `ICHARG=5` mode, that has to be switched on to make VASP wait for the `vasp.lock` file, and read the updated charge density after each step. One should add the following lines to the `INCAR` file::
|
VASP has a special INCAR `ICHARG=5` mode, that has to be switched on to make VASP wait for the `vasp.lock` file, and read the updated charge density after each step. One should add the following lines to the `INCAR` file::
|
||||||
|
|
||||||
ICHARG = 5
|
ICHARG = 5
|
||||||
NELM = 1000
|
NELM = 1000
|
||||||
NELMIN = 1000
|
NELMIN = 1000
|
||||||
|
IMIX=1
|
||||||
|
BMIX=0.5
|
||||||
|
AMIX=0.02
|
||||||
|
|
||||||
Technically, VASP runs with `ICHARG=5` in a SCF mode, and adding the DMFT
|
Technically, VASP runs with `ICHARG=5` in a SCF mode, and adding the DMFT
|
||||||
changes to the DFT density in each step, so that the full DFT+DMFT charge
|
changes to the DFT density in each step, so that the full DFT+DMFT charge
|
||||||
density is constructed in every step. This is only done in VASP because only the
|
density is constructed in every step. This is only done in VASP because only the
|
||||||
changes to the DFT density are read by VASP not the full DFT+DMFT density.
|
changes to the DFT density are read by VASP not the full DFT+DMFT density. Here,
|
||||||
|
we also adjust the mixing, since iterations become quickly unstable for insulating
|
||||||
|
or charge ordered solutions. Also note, that in each DAV step you still have to
|
||||||
|
calculate the projectors, recalculate the chemical potential, and update the
|
||||||
|
GAMMA file. See the :meth:`triqs_dft_tools.converters.plovasp.sc_dmft` script for details.
|
||||||
|
|
||||||
Moreover, one should always start with a converged `WAVECAR` file, or make sure,
|
Moreover, one should always start with a converged `WAVECAR` file, or make sure,
|
||||||
that the KS states are well converged before the first projectors are created!
|
that the KS states are well converged before the first projectors are created!
|
||||||
To understand the difference please make sure to read `ISTART flag VASP wiki
|
To understand the difference please make sure to read `ISTART flag VASP wiki
|
||||||
@ -179,6 +187,8 @@ To understand the difference please make sure to read `ISTART flag VASP wiki
|
|||||||
`NELMIN` ensure that VASP does not terminate after the default number of
|
`NELMIN` ensure that VASP does not terminate after the default number of
|
||||||
iterations of 60.
|
iterations of 60.
|
||||||
|
|
||||||
|
For more detailed and fine grained methods to run Vasp in CSC also on clusters see the methods implemented in `solid dmft <https://triqs.github.io/solid_dmft/_ref/dft_managers.html>`_.
|
||||||
|
|
||||||
|
|
||||||
Elk
|
Elk
|
||||||
---------
|
---------
|
||||||
|
@ -9,6 +9,7 @@ Packaged Versions of DFTTools
|
|||||||
=============================
|
=============================
|
||||||
|
|
||||||
.. _ubuntu_debian:
|
.. _ubuntu_debian:
|
||||||
|
|
||||||
Ubuntu Debian packages
|
Ubuntu Debian packages
|
||||||
----------------------
|
----------------------
|
||||||
|
|
||||||
@ -17,6 +18,7 @@ We provide a Debian package for the Ubuntu LTS Versions 18.04 (bionic) and 20.04
|
|||||||
sudo apt-get install -y triqs_dft_tools
|
sudo apt-get install -y triqs_dft_tools
|
||||||
|
|
||||||
.. _anaconda:
|
.. _anaconda:
|
||||||
|
|
||||||
Anaconda (experimental)
|
Anaconda (experimental)
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
@ -27,6 +29,7 @@ We provide Linux and OSX packages for the `Anaconda <https://www.anaconda.com/>`
|
|||||||
See also `github.com/conda-forge/triqs_dft_tools-feedstock <https://github.com/conda-forge/triqs_dft_tools-feedstock/>`_.
|
See also `github.com/conda-forge/triqs_dft_tools-feedstock <https://github.com/conda-forge/triqs_dft_tools-feedstock/>`_.
|
||||||
|
|
||||||
.. _docker:
|
.. _docker:
|
||||||
|
|
||||||
Docker
|
Docker
|
||||||
------
|
------
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ Complex example: NiO
|
|||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
|
|
||||||
|
|
||||||
tutorials/nio_csc
|
tutorials/nio_csc_vasp/nio_csc
|
||||||
|
|
||||||
|
|
||||||
Elk interface examples
|
Elk interface examples
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
from triqs_dft_tools.converters.vasp import *
|
|
||||||
Converter = VaspConverter(filename = 'nio', proj_or_hk = 'hk')
|
|
||||||
Converter.convert_dft_input()
|
|
@ -1,14 +1,14 @@
|
|||||||
System = NiO
|
System = NiO
|
||||||
|
|
||||||
ISMEAR = -5
|
# for PLOT DOS
|
||||||
|
# ISMEAR = -5
|
||||||
|
# better convergence for small kpt grids
|
||||||
|
ISMEAR = 2
|
||||||
|
SIGMA = 0.05
|
||||||
|
|
||||||
# converge wave functions
|
# converge wave functions
|
||||||
EDIFF = 1.E-7
|
EDIFF = 1.E-7
|
||||||
PREC = accurate
|
NELMIN = 35
|
||||||
|
|
||||||
# optimize performance
|
|
||||||
NCORE = 4
|
|
||||||
NBANDS = 24
|
|
||||||
|
|
||||||
# the energy window to optimize projector channels (absolute)
|
# the energy window to optimize projector channels (absolute)
|
||||||
EMIN = -3
|
EMIN = -3
|
||||||
@ -21,5 +21,5 @@ ISYM = -1
|
|||||||
|
|
||||||
# project to Ni d and O p states
|
# project to Ni d and O p states
|
||||||
LORBIT = 14
|
LORBIT = 14
|
||||||
LOCPROJ = 1 : d : Pr
|
LOCPROJ = "1 : d : Pr
|
||||||
LOCPROJ = 2 : p : Pr
|
2 : p : Pr"
|
@ -11,17 +11,17 @@ from triqs_cthyb import *
|
|||||||
import warnings
|
import warnings
|
||||||
warnings.filterwarnings("ignore", category=FutureWarning)
|
warnings.filterwarnings("ignore", category=FutureWarning)
|
||||||
|
|
||||||
filename = 'nio'
|
filename = 'vasp'
|
||||||
beta = 5.0
|
beta = 5.0
|
||||||
SK = SumkDFT(hdf_file = filename+'.h5', use_dft_blocks = False, beta=beta)
|
mesh = MeshImFreq(beta=beta, S='Fermion', n_iw=1000)
|
||||||
|
|
||||||
|
SK = SumkDFT(hdf_file = filename+'.h5', use_dft_blocks = False, mesh=mesh)
|
||||||
|
|
||||||
|
|
||||||
# We analyze the block structure of the Hamiltonian
|
# We analyze the block structure of the Hamiltonian
|
||||||
Sigma = SK.block_structure.create_gf(beta=beta)
|
Sigma = SK.block_structure.create_gf(mesh=mesh)
|
||||||
|
|
||||||
SK.put_Sigma([Sigma])
|
SK.put_Sigma([Sigma])
|
||||||
G = SK.extract_G_loc()
|
|
||||||
SK.analyse_block_structure_from_gf(G, threshold = 1e-3)
|
|
||||||
|
|
||||||
|
|
||||||
# Setup CTQMC Solver
|
# Setup CTQMC Solver
|
||||||
@ -31,10 +31,13 @@ spin_names = ['up','down']
|
|||||||
# Print some information on the master node
|
# Print some information on the master node
|
||||||
iteration_offset = 0
|
iteration_offset = 0
|
||||||
Sigma_iw = 0
|
Sigma_iw = 0
|
||||||
|
block_structure = None
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
ar = HDFArchive(filename+'.h5','a')
|
ar = HDFArchive(filename+'.h5','a')
|
||||||
if not 'DMFT_results' in ar: ar.create_group('DMFT_results')
|
if not 'DMFT_results' in ar: ar.create_group('DMFT_results')
|
||||||
if not 'Iterations' in ar['DMFT_results']: ar['DMFT_results'].create_group('Iterations')
|
if not 'Iterations' in ar['DMFT_results']: ar['DMFT_results'].create_group('Iterations')
|
||||||
|
if 'DMFT_input' in ar:
|
||||||
|
block_structure = ar['DMFT_input']['sumk_block_structure']
|
||||||
if 'iteration_count' in ar['DMFT_results']:
|
if 'iteration_count' in ar['DMFT_results']:
|
||||||
iteration_offset = ar['DMFT_results']['iteration_count']+1
|
iteration_offset = ar['DMFT_results']['iteration_count']+1
|
||||||
print(('offset',iteration_offset))
|
print(('offset',iteration_offset))
|
||||||
@ -43,40 +46,30 @@ if mpi.is_master_node():
|
|||||||
SK.dc_energ = ar['DMFT_results']['Iterations']['dc_energ'+str(iteration_offset-1)]
|
SK.dc_energ = ar['DMFT_results']['Iterations']['dc_energ'+str(iteration_offset-1)]
|
||||||
SK.chemical_potential = ar['DMFT_results']['Iterations']['chemical_potential'+str(iteration_offset-1)].real
|
SK.chemical_potential = ar['DMFT_results']['Iterations']['chemical_potential'+str(iteration_offset-1)].real
|
||||||
|
|
||||||
|
block_structure = mpi.bcast(block_structure)
|
||||||
iteration_offset = mpi.bcast(iteration_offset)
|
iteration_offset = mpi.bcast(iteration_offset)
|
||||||
Sigma_iw = mpi.bcast(Sigma_iw)
|
Sigma_iw = mpi.bcast(Sigma_iw)
|
||||||
SK.dc_imp = mpi.bcast(SK.dc_imp)
|
SK.dc_imp = mpi.bcast(SK.dc_imp)
|
||||||
SK.dc_energ = mpi.bcast(SK.dc_energ)
|
SK.dc_energ = mpi.bcast(SK.dc_energ)
|
||||||
SK.chemical_potential = mpi.bcast(SK.chemical_potential)
|
SK.chemical_potential = mpi.bcast(SK.chemical_potential)
|
||||||
|
|
||||||
|
if block_structure:
|
||||||
|
SK.block_structure = block_structure
|
||||||
|
else:
|
||||||
|
G = SK.extract_G_loc()
|
||||||
|
SK.analyse_block_structure_from_gf(G, threshold = 1e-3)
|
||||||
|
|
||||||
SK.put_Sigma(Sigma_imp = [Sigma_iw])
|
SK.put_Sigma(Sigma_imp = [Sigma_iw])
|
||||||
|
|
||||||
ikarray = numpy.array(list(range(SK.n_k)))
|
ikarray = numpy.array(list(range(SK.n_k)))
|
||||||
|
|
||||||
# set up the orbitally resolved local lattice greens function:
|
gf_csc = Gf(mesh=SK.mesh, target_shape=(SK.proj_mat_csc.shape[2],SK.proj_mat_csc.shape[2]))
|
||||||
n_orbs = SK.proj_mat_csc.shape[2]
|
G_latt_orb = BlockGf(name_list=['up','down'], block_list=[gf_csc, gf_csc], make_copies=True)
|
||||||
spn = SK.spin_block_names[SK.SO]
|
|
||||||
mesh = Sigma_iw.mesh
|
|
||||||
block_structure = [list(range(n_orbs)) for sp in spn]
|
|
||||||
gf_struct = [(spn[isp], block_structure[isp])
|
|
||||||
for isp in range(SK.n_spin_blocks[SK.SO])]
|
|
||||||
block_ind_list = [block for block, inner in gf_struct]
|
|
||||||
glist = lambda: [GfImFreq(indices=inner, mesh=mesh)
|
|
||||||
for block, inner in gf_struct]
|
|
||||||
|
|
||||||
G_latt_orb = BlockGf(name_list=block_ind_list,
|
|
||||||
block_list=glist(), make_copies=False)
|
|
||||||
G_latt_orb.zero()
|
|
||||||
|
|
||||||
for ik in mpi.slice_array(ikarray):
|
for ik in mpi.slice_array(ikarray):
|
||||||
G_latt_KS = SK.lattice_gf(ik=ik, beta=beta)
|
G_latt_KS = SK.lattice_gf(ik=ik)*SK.bz_weights[ik]
|
||||||
G_latt_KS *= SK.bz_weights[ik]
|
|
||||||
for bname, gf in G_latt_orb:
|
for bname, gf in G_latt_orb:
|
||||||
add_g_ik = gf.copy()
|
gf += SK.downfold(ik, 0, bname, G_latt_KS[bname], gf, shells='csc', ir=None)
|
||||||
add_g_ik.zero()
|
|
||||||
add_g_ik << SK.downfold(ik, 0, bname, G_latt_KS[bname], gf, shells='csc', ir=None)
|
|
||||||
gf << gf + add_g_ik
|
|
||||||
|
|
||||||
G_latt_orb << mpi.all_reduce(G_latt_orb)
|
G_latt_orb << mpi.all_reduce(G_latt_orb)
|
||||||
|
|
9
doc/tutorials/nio_csc_vasp/converter.py
Normal file
9
doc/tutorials/nio_csc_vasp/converter.py
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
from triqs_dft_tools.converters.vasp import *
|
||||||
|
import triqs_dft_tools.converters.plovasp.converter as plo_converter
|
||||||
|
|
||||||
|
# Generate and store PLOs
|
||||||
|
plo_converter.generate_and_output_as_text('plo.cfg', vasp_dir='./')
|
||||||
|
|
||||||
|
# run the converter
|
||||||
|
Converter = VaspConverter(filename = 'vasp', proj_or_hk = 'hk')
|
||||||
|
Converter.convert_dft_input()
|
@ -2,7 +2,7 @@ from triqs.gf import *
|
|||||||
from h5 import *
|
from h5 import *
|
||||||
from triqs_maxent import *
|
from triqs_maxent import *
|
||||||
|
|
||||||
filename = 'nio'
|
filename = 'vasp'
|
||||||
|
|
||||||
ar = HDFArchive(filename+'.h5','a')
|
ar = HDFArchive(filename+'.h5','a')
|
||||||
if 'iteration_count' in ar['DMFT_results']:
|
if 'iteration_count' in ar['DMFT_results']:
|
@ -15,19 +15,22 @@ import triqs_dft_tools.version as dft_tools_version
|
|||||||
import warnings
|
import warnings
|
||||||
warnings.filterwarnings("ignore", category=FutureWarning)
|
warnings.filterwarnings("ignore", category=FutureWarning)
|
||||||
|
|
||||||
filename = 'nio'
|
filename = 'vasp'
|
||||||
|
|
||||||
beta = 5.0
|
beta = 5.0
|
||||||
SK = SumkDFT(hdf_file = filename+'.h5', use_dft_blocks = False, beta=beta)
|
mesh = MeshImFreq(beta=beta, S='Fermion', n_iw=1000)
|
||||||
|
|
||||||
|
SK = SumkDFT(hdf_file=filename+'.h5', use_dft_blocks=False, mesh=mesh)
|
||||||
|
|
||||||
|
|
||||||
Sigma = SK.block_structure.create_gf(beta=beta)
|
Sigma = SK.block_structure.create_gf(mesh=mesh)
|
||||||
SK.put_Sigma([Sigma])
|
SK.put_Sigma([Sigma])
|
||||||
G = SK.extract_G_loc()
|
G = SK.extract_G_loc()
|
||||||
SK.analyse_block_structure_from_gf(G, threshold=1e-3)
|
SK.analyse_block_structure_from_gf(G, threshold=1e-3)
|
||||||
for i_sh in range(len(SK.deg_shells)):
|
for i_sh in range(len(SK.deg_shells)):
|
||||||
num_block_deg_orbs = len(SK.deg_shells[i_sh])
|
num_block_deg_orbs = len(SK.deg_shells[i_sh])
|
||||||
mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(num_block_deg_orbs, i_sh))
|
mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(
|
||||||
|
num_block_deg_orbs, i_sh))
|
||||||
for iblock in range(num_block_deg_orbs):
|
for iblock in range(num_block_deg_orbs):
|
||||||
mpi.report('block {0:d} consists of orbitals:'.format(iblock))
|
mpi.report('block {0:d} consists of orbitals:'.format(iblock))
|
||||||
for keys in list(SK.deg_shells[i_sh][iblock].keys()):
|
for keys in list(SK.deg_shells[i_sh][iblock].keys()):
|
||||||
@ -43,7 +46,7 @@ mpi.report('Sumk to Solver: %s'%SK.sumk_to_solver)
|
|||||||
mpi.report('GF struct sumk: %s' % SK.gf_struct_sumk)
|
mpi.report('GF struct sumk: %s' % SK.gf_struct_sumk)
|
||||||
mpi.report('GF struct solver: %s' % SK.gf_struct_solver)
|
mpi.report('GF struct solver: %s' % SK.gf_struct_solver)
|
||||||
|
|
||||||
S = Solver(beta=beta, gf_struct=gf_struct)
|
S = Solver(beta=beta, gf_struct=gf_struct, n_iw=1000)
|
||||||
|
|
||||||
# Construct the Hamiltonian and save it in Hamiltonian_store.txt
|
# Construct the Hamiltonian and save it in Hamiltonian_store.txt
|
||||||
H = Operator()
|
H = Operator()
|
||||||
@ -52,10 +55,11 @@ J = 1.0
|
|||||||
|
|
||||||
|
|
||||||
U_sph = U_matrix_slater(l=2, U_int=U, J_hund=J)
|
U_sph = U_matrix_slater(l=2, U_int=U, J_hund=J)
|
||||||
U_cubic = transform_U_matrix(U_sph, spherical_to_cubic(l=2, convention=''))
|
U_cubic = transform_U_matrix(U_sph, spherical_to_cubic(l=2, convention='vasp'))
|
||||||
Umat, Upmat = reduce_4index_to_2index(U_cubic)
|
Umat, Upmat = reduce_4index_to_2index(U_cubic)
|
||||||
|
|
||||||
H = h_int_density(spin_names, n_orb, map_operator_structure=SK.sumk_to_solver[0], U=Umat, Uprime=Upmat)
|
H = h_int_density(spin_names, n_orb,
|
||||||
|
map_operator_structure=SK.sumk_to_solver[0], U=Umat, Uprime=Upmat)
|
||||||
|
|
||||||
# Print some information on the master node
|
# Print some information on the master node
|
||||||
mpi.report('Greens function structure is: %s ' % gf_struct)
|
mpi.report('Greens function structure is: %s ' % gf_struct)
|
||||||
@ -85,24 +89,30 @@ n_iterations = 10
|
|||||||
iteration_offset = 0
|
iteration_offset = 0
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
ar = HDFArchive(filename+'.h5', 'a')
|
ar = HDFArchive(filename+'.h5', 'a')
|
||||||
if not 'DMFT_results' in ar: ar.create_group('DMFT_results')
|
if not 'DMFT_results' in ar:
|
||||||
if not 'Iterations' in ar['DMFT_results']: ar['DMFT_results'].create_group('Iterations')
|
ar.create_group('DMFT_results')
|
||||||
if not 'DMFT_input' in ar: ar.create_group('DMFT_input')
|
if not 'Iterations' in ar['DMFT_results']:
|
||||||
if not 'Iterations' in ar['DMFT_input']: ar['DMFT_input'].create_group('Iterations')
|
ar['DMFT_results'].create_group('Iterations')
|
||||||
if not 'code_versions' in ar['DMFT_input']: ar['DMFT_input'].create_group('code_versio\
|
if not 'DMFT_input' in ar:
|
||||||
ns')
|
ar.create_group('DMFT_input')
|
||||||
|
if not 'Iterations' in ar['DMFT_input']:
|
||||||
|
ar['DMFT_input'].create_group('Iterations')
|
||||||
|
if not 'code_versions' in ar['DMFT_input']:
|
||||||
|
ar['DMFT_input'].create_group('code_versions')
|
||||||
ar['DMFT_input']['code_versions']["triqs_version"] = triqs_version.version
|
ar['DMFT_input']['code_versions']["triqs_version"] = triqs_version.version
|
||||||
ar['DMFT_input']['code_versions']["triqs_git"] = triqs_version.git_hash
|
ar['DMFT_input']['code_versions']["triqs_git"] = triqs_version.git_hash
|
||||||
ar['DMFT_input']['code_versions']["cthyb_version"] = cthyb_version.version
|
ar['DMFT_input']['code_versions']["cthyb_version"] = cthyb_version.version
|
||||||
ar['DMFT_input']['code_versions']["cthyb_git"] = cthyb_version.triqs_cthyb_hash
|
ar['DMFT_input']['code_versions']["cthyb_git"] = cthyb_version.triqs_cthyb_hash
|
||||||
ar['DMFT_input']['code_versions']["dft_tools_version"] = dft_tools_version.version
|
ar['DMFT_input']['code_versions']["dft_tools_version"] = dft_tools_version.version
|
||||||
ar['DMFT_input']['code_versions']["dft_tools_version"] = dft_tools_version.triqs_dft_tools_hash
|
ar['DMFT_input']['code_versions']["dft_tools_version"] = dft_tools_version.triqs_dft_tools_hash
|
||||||
|
ar['DMFT_input']['sumk_block_structure'] = SK.block_structure
|
||||||
if 'iteration_count' in ar['DMFT_results']:
|
if 'iteration_count' in ar['DMFT_results']:
|
||||||
iteration_offset = ar['DMFT_results']['iteration_count']+1
|
iteration_offset = ar['DMFT_results']['iteration_count']+1
|
||||||
S.Sigma_iw = ar['DMFT_results']['Iterations']['Sigma_it'+str(iteration_offset-1)]
|
S.Sigma_iw = ar['DMFT_results']['Iterations']['Sigma_it'+str(iteration_offset-1)]
|
||||||
SK.dc_imp = ar['DMFT_results']['Iterations']['dc_imp'+str(iteration_offset-1)]
|
SK.dc_imp = ar['DMFT_results']['Iterations']['dc_imp'+str(iteration_offset-1)]
|
||||||
SK.dc_energ = ar['DMFT_results']['Iterations']['dc_energ'+str(iteration_offset-1)]
|
SK.dc_energ = ar['DMFT_results']['Iterations']['dc_energ'+str(iteration_offset-1)]
|
||||||
SK.chemical_potential = ar['DMFT_results']['Iterations']['chemical_potential'+str(iteration_offset-1)].real
|
SK.chemical_potential = ar['DMFT_results']['Iterations']['chemical_potential' +
|
||||||
|
str(iteration_offset-1)].real
|
||||||
ar['DMFT_input']["dmft_script_it"+str(iteration_offset)] = open(sys.argv[0]).read()
|
ar['DMFT_input']["dmft_script_it"+str(iteration_offset)] = open(sys.argv[0]).read()
|
||||||
iteration_offset = mpi.bcast(iteration_offset)
|
iteration_offset = mpi.bcast(iteration_offset)
|
||||||
S.Sigma_iw = mpi.bcast(S.Sigma_iw)
|
S.Sigma_iw = mpi.bcast(S.Sigma_iw)
|
||||||
@ -123,7 +133,8 @@ if iteration_offset == 0:
|
|||||||
SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=DC_type, use_dc_value=DC_value)
|
SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=DC_type, use_dc_value=DC_value)
|
||||||
S.Sigma_iw << SK.dc_imp[0]['up'][0, 0]
|
S.Sigma_iw << SK.dc_imp[0]['up'][0, 0]
|
||||||
|
|
||||||
mpi.report('%s DMFT cycles requested. Starting with iteration %s.'%(n_iterations,iteration_offset))
|
mpi.report('%s DMFT cycles requested. Starting with iteration %s.' %
|
||||||
|
(n_iterations, iteration_offset))
|
||||||
|
|
||||||
# The infamous DMFT self consistency cycle
|
# The infamous DMFT self consistency cycle
|
||||||
for it in range(iteration_offset, iteration_offset + n_iterations):
|
for it in range(iteration_offset, iteration_offset + n_iterations):
|
||||||
@ -162,4 +173,5 @@ for it in range(iteration_offset, iteration_offset + n_iterations):
|
|||||||
ar['DMFT_results']['Iterations']['dc_energ'+str(it)] = SK.dc_energ
|
ar['DMFT_results']['Iterations']['dc_energ'+str(it)] = SK.dc_energ
|
||||||
ar['DMFT_results']['Iterations']['chemical_potential'+str(it)] = SK.chemical_potential
|
ar['DMFT_results']['Iterations']['chemical_potential'+str(it)] = SK.chemical_potential
|
||||||
|
|
||||||
if mpi.is_master_node(): del ar
|
if mpi.is_master_node():
|
||||||
|
del ar
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
@ -19,22 +19,24 @@ warnings.filterwarnings("ignore", category=FutureWarning)
|
|||||||
|
|
||||||
|
|
||||||
def dmft_cycle():
|
def dmft_cycle():
|
||||||
filename = 'nio'
|
filename = 'vasp'
|
||||||
|
|
||||||
Converter = VaspConverter(filename=filename)
|
Converter = VaspConverter(filename=filename, proj_or_hk='hk')
|
||||||
Converter.convert_dft_input()
|
Converter.convert_dft_input()
|
||||||
|
|
||||||
SK = SumkDFT(hdf_file = filename+'.h5', use_dft_blocks = False)
|
|
||||||
|
|
||||||
beta = 5.0
|
beta = 5.0
|
||||||
|
mesh = MeshImFreq(beta=beta, S='Fermion', n_iw=1000)
|
||||||
|
|
||||||
Sigma = SK.block_structure.create_gf(beta=beta)
|
SK = SumkDFT(hdf_file=filename+'.h5', use_dft_blocks=False, mesh=mesh)
|
||||||
|
|
||||||
|
Sigma = SK.block_structure.create_gf(mesh=mesh)
|
||||||
SK.put_Sigma([Sigma])
|
SK.put_Sigma([Sigma])
|
||||||
G = SK.extract_G_loc()
|
G = SK.extract_G_loc()
|
||||||
SK.analyse_block_structure_from_gf(G, threshold=1e-2)
|
SK.analyse_block_structure_from_gf(G, threshold=1e-2)
|
||||||
for i_sh in range(len(SK.deg_shells)):
|
for i_sh in range(len(SK.deg_shells)):
|
||||||
num_block_deg_orbs = len(SK.deg_shells[i_sh])
|
num_block_deg_orbs = len(SK.deg_shells[i_sh])
|
||||||
mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(num_block_deg_orbs, i_sh))
|
mpi.report('found {0:d} blocks of degenerate orbitals in shell {1:d}'.format(
|
||||||
|
num_block_deg_orbs, i_sh))
|
||||||
for iblock in range(num_block_deg_orbs):
|
for iblock in range(num_block_deg_orbs):
|
||||||
mpi.report('block {0:d} consists of orbitals:'.format(iblock))
|
mpi.report('block {0:d} consists of orbitals:'.format(iblock))
|
||||||
for keys in list(SK.deg_shells[i_sh][iblock].keys()):
|
for keys in list(SK.deg_shells[i_sh][iblock].keys()):
|
||||||
@ -50,19 +52,19 @@ def dmft_cycle():
|
|||||||
mpi.report('GF struct sumk: %s' % SK.gf_struct_sumk)
|
mpi.report('GF struct sumk: %s' % SK.gf_struct_sumk)
|
||||||
mpi.report('GF struct solver: %s' % SK.gf_struct_solver)
|
mpi.report('GF struct solver: %s' % SK.gf_struct_solver)
|
||||||
|
|
||||||
S = Solver(beta=beta, gf_struct=gf_struct)
|
S = Solver(beta=beta, gf_struct=gf_struct, n_iw=1000)
|
||||||
|
|
||||||
# Construct the Hamiltonian and save it in Hamiltonian_store.txt
|
# Construct the Hamiltonian and save it in Hamiltonian_store.txt
|
||||||
H = Operator()
|
H = Operator()
|
||||||
U = 8.0
|
U = 8.0
|
||||||
J = 1.0
|
J = 1.0
|
||||||
|
|
||||||
|
|
||||||
U_sph = U_matrix_slater(l=2, U_int=U, J_hund=J)
|
U_sph = U_matrix_slater(l=2, U_int=U, J_hund=J)
|
||||||
U_cubic = transform_U_matrix(U_sph, spherical_to_cubic(l=2, convention=''))
|
U_cubic = transform_U_matrix(U_sph, spherical_to_cubic(l=2, convention='vasp'))
|
||||||
Umat, Upmat = reduce_4index_to_2index(U_cubic)
|
Umat, Upmat = reduce_4index_to_2index(U_cubic)
|
||||||
|
|
||||||
H = h_int_density(spin_names, n_orb, map_operator_structure=SK.sumk_to_solver[0], U=Umat, Uprime=Upmat)
|
H = h_int_density(spin_names, n_orb,
|
||||||
|
map_operator_structure=SK.sumk_to_solver[0], U=Umat, Uprime=Upmat)
|
||||||
|
|
||||||
# Print some information on the master node
|
# Print some information on the master node
|
||||||
mpi.report('Greens function structure is: %s ' % gf_struct)
|
mpi.report('Greens function structure is: %s ' % gf_struct)
|
||||||
@ -92,24 +94,30 @@ def dmft_cycle():
|
|||||||
iteration_offset = 0
|
iteration_offset = 0
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
ar = HDFArchive(filename+'.h5', 'a')
|
ar = HDFArchive(filename+'.h5', 'a')
|
||||||
if not 'DMFT_results' in ar: ar.create_group('DMFT_results')
|
if not 'DMFT_results' in ar:
|
||||||
if not 'Iterations' in ar['DMFT_results']: ar['DMFT_results'].create_group('Iterations')
|
ar.create_group('DMFT_results')
|
||||||
if not 'DMFT_input' in ar: ar.create_group('DMFT_input')
|
if not 'Iterations' in ar['DMFT_results']:
|
||||||
if not 'Iterations' in ar['DMFT_input']: ar['DMFT_input'].create_group('Iterations')
|
ar['DMFT_results'].create_group('Iterations')
|
||||||
if not 'code_versions' in ar['DMFT_input']: ar['DMFT_input'].create_group('code_versio\
|
if not 'DMFT_input' in ar:
|
||||||
ns')
|
ar.create_group('DMFT_input')
|
||||||
|
if not 'Iterations' in ar['DMFT_input']:
|
||||||
|
ar['DMFT_input'].create_group('Iterations')
|
||||||
|
if not 'code_versions' in ar['DMFT_input']:
|
||||||
|
ar['DMFT_input'].create_group('code_versions')
|
||||||
ar['DMFT_input']['code_versions']["triqs_version"] = triqs_version.version
|
ar['DMFT_input']['code_versions']["triqs_version"] = triqs_version.version
|
||||||
ar['DMFT_input']['code_versions']["triqs_git"] = triqs_version.git_hash
|
ar['DMFT_input']['code_versions']["triqs_git"] = triqs_version.git_hash
|
||||||
ar['DMFT_input']['code_versions']["cthyb_version"] = cthyb_version.version
|
ar['DMFT_input']['code_versions']["cthyb_version"] = cthyb_version.version
|
||||||
ar['DMFT_input']['code_versions']["cthyb_git"] = cthyb_version.triqs_cthyb_hash
|
ar['DMFT_input']['code_versions']["cthyb_git"] = cthyb_version.triqs_cthyb_hash
|
||||||
ar['DMFT_input']['code_versions']["dft_tools_version"] = dft_tools_version.version
|
ar['DMFT_input']['code_versions']["dft_tools_version"] = dft_tools_version.version
|
||||||
ar['DMFT_input']['code_versions']["dft_tools_git"] = dft_tools_version.triqs_dft_tools_hash
|
ar['DMFT_input']['code_versions']["dft_tools_git"] = dft_tools_version.triqs_dft_tools_hash
|
||||||
|
ar['DMFT_input']['sumk_block_structure'] = SK.block_structure
|
||||||
if 'iteration_count' in ar['DMFT_results']:
|
if 'iteration_count' in ar['DMFT_results']:
|
||||||
iteration_offset = ar['DMFT_results']['iteration_count']+1
|
iteration_offset = ar['DMFT_results']['iteration_count']+1
|
||||||
S.Sigma_iw = ar['DMFT_results']['Iterations']['Sigma_it'+str(iteration_offset-1)]
|
S.Sigma_iw = ar['DMFT_results']['Iterations']['Sigma_it'+str(iteration_offset-1)]
|
||||||
SK.dc_imp = ar['DMFT_results']['Iterations']['dc_imp'+str(iteration_offset-1)]
|
SK.dc_imp = ar['DMFT_results']['Iterations']['dc_imp'+str(iteration_offset-1)]
|
||||||
SK.dc_energ = ar['DMFT_results']['Iterations']['dc_energ'+str(iteration_offset-1)]
|
SK.dc_energ = ar['DMFT_results']['Iterations']['dc_energ'+str(iteration_offset-1)]
|
||||||
SK.chemical_potential = ar['DMFT_results']['Iterations']['chemical_potential'+str(iteration_offset-1)].real
|
SK.chemical_potential = ar['DMFT_results']['Iterations']['chemical_potential' +
|
||||||
|
str(iteration_offset-1)].real
|
||||||
ar['DMFT_input']["dmft_script_it"+str(iteration_offset)] = open(sys.argv[0]).read()
|
ar['DMFT_input']["dmft_script_it"+str(iteration_offset)] = open(sys.argv[0]).read()
|
||||||
iteration_offset = mpi.bcast(iteration_offset)
|
iteration_offset = mpi.bcast(iteration_offset)
|
||||||
S.Sigma_iw = mpi.bcast(S.Sigma_iw)
|
S.Sigma_iw = mpi.bcast(S.Sigma_iw)
|
||||||
@ -127,12 +135,12 @@ def dmft_cycle():
|
|||||||
# Init the DC term and the self-energy if no previous iteration was found
|
# Init the DC term and the self-energy if no previous iteration was found
|
||||||
if iteration_offset == 0:
|
if iteration_offset == 0:
|
||||||
dm = S.G_iw.density()
|
dm = S.G_iw.density()
|
||||||
SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=DC_type,use_dc_value=DC_value)
|
SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0,
|
||||||
|
use_dc_formula=DC_type, use_dc_value=DC_value)
|
||||||
S.Sigma_iw << SK.dc_imp[0]['up'][0, 0]
|
S.Sigma_iw << SK.dc_imp[0]['up'][0, 0]
|
||||||
|
|
||||||
mpi.report('%s DMFT cycles requested. Starting with iteration %s.'%(n_iterations,iteration_offset))
|
mpi.report('%s DMFT cycles requested. Starting with iteration %s.' %
|
||||||
|
(n_iterations, iteration_offset))
|
||||||
|
|
||||||
|
|
||||||
# The infamous DMFT self consistency cycle
|
# The infamous DMFT self consistency cycle
|
||||||
for it in range(iteration_offset, iteration_offset + n_iterations):
|
for it in range(iteration_offset, iteration_offset + n_iterations):
|
||||||
@ -149,17 +157,18 @@ def dmft_cycle():
|
|||||||
ar['DMFT_results']['Iterations']['Sigma_uns_it'+str(it)] = S.Sigma_iw
|
ar['DMFT_results']['Iterations']['Sigma_uns_it'+str(it)] = S.Sigma_iw
|
||||||
# Calculate double counting
|
# Calculate double counting
|
||||||
dm = S.G_iw.density()
|
dm = S.G_iw.density()
|
||||||
SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=DC_type,use_dc_value=DC_value)
|
SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0,
|
||||||
|
use_dc_formula=DC_type, use_dc_value=DC_value)
|
||||||
# Get new G
|
# Get new G
|
||||||
SK.symm_deg_gf(S.Sigma_iw, ish=0)
|
SK.symm_deg_gf(S.Sigma_iw, ish=0)
|
||||||
SK.put_Sigma(Sigma_imp=[S.Sigma_iw])
|
SK.put_Sigma(Sigma_imp=[S.Sigma_iw])
|
||||||
SK.calc_mu(precision=0.01)
|
SK.calc_mu(precision=0.001)
|
||||||
S.G_iw << SK.extract_G_loc()[0]
|
S.G_iw << SK.extract_G_loc()[0]
|
||||||
|
|
||||||
# print densities
|
# print densities
|
||||||
for sig, gf in S.G_iw:
|
for sig, gf in S.G_iw:
|
||||||
mpi.report("Orbital %s density: %.6f"%(sig,dm[sig][0,0]))
|
mpi.report("Orbital %s density: %.6f" % (sig, dm[sig][0, 0].real))
|
||||||
mpi.report('Total charge of Gloc : %.6f'%S.G_iw.total_density())
|
mpi.report('Total charge of Gloc : %.6f' % S.G_iw.total_density().real)
|
||||||
|
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
ar['DMFT_results']['iteration_count'] = it
|
ar['DMFT_results']['iteration_count'] = it
|
||||||
@ -170,9 +179,6 @@ def dmft_cycle():
|
|||||||
ar['DMFT_results']['Iterations']['dc_energ'+str(it)] = SK.dc_energ
|
ar['DMFT_results']['Iterations']['dc_energ'+str(it)] = SK.dc_energ
|
||||||
ar['DMFT_results']['Iterations']['chemical_potential'+str(it)] = SK.chemical_potential
|
ar['DMFT_results']['Iterations']['chemical_potential'+str(it)] = SK.chemical_potential
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
print('calculating mu...')
|
print('calculating mu...')
|
||||||
SK.chemical_potential = SK.calc_mu(precision=0.000001)
|
SK.chemical_potential = SK.calc_mu(precision=0.000001)
|
||||||
@ -188,12 +194,12 @@ def dmft_cycle():
|
|||||||
|
|
||||||
dm = S.G_iw.density() # compute the density matrix of the impurity problem
|
dm = S.G_iw.density() # compute the density matrix of the impurity problem
|
||||||
SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=DC_type, use_dc_value=DC_value)
|
SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=DC_type, use_dc_value=DC_value)
|
||||||
dc_energ = SK.dc_energ[0]
|
|
||||||
|
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
ar['DMFT_results']['Iterations']['corr_energy_it'+str(it)] = correnerg
|
ar['DMFT_results']['Iterations']['corr_energy_it'+str(it)] = correnerg
|
||||||
ar['DMFT_results']['Iterations']['dc_energy_it'+str(it)] = dc_energ
|
ar['DMFT_results']['Iterations']['dc_energy_it'+str(it)] = SK.dc_energ[0]
|
||||||
|
|
||||||
if mpi.is_master_node(): del ar
|
if mpi.is_master_node():
|
||||||
|
del ar
|
||||||
|
|
||||||
return correnerg, dc_energ
|
return correnerg, SK
|
@ -3,8 +3,7 @@
|
|||||||
DFT and projections
|
DFT and projections
|
||||||
==================================================
|
==================================================
|
||||||
|
|
||||||
We will perform DFT+DMFT calcluations for the charge-transfer insulator NiO. We start from scratch and provide all necessary input files to do the calculations: First for doing a single-shot calculation.
|
We will perform DFT+DMFT calculations for the charge-transfer insulator NiO. We start from scratch and provide all necessary input files to do the calculations: First for doing a single-shot calculation (and then for charge-self consistency).
|
||||||
.. (and then for charge-selfconsistency).
|
|
||||||
|
|
||||||
VASP setup
|
VASP setup
|
||||||
-------------------------------
|
-------------------------------
|
||||||
@ -15,7 +14,7 @@ own :file:`POTCAR` file.
|
|||||||
Let us take a look in the :file:`INCAR`, where we have to specify local orbitals as basis
|
Let us take a look in the :file:`INCAR`, where we have to specify local orbitals as basis
|
||||||
for our many-body calculation.
|
for our many-body calculation.
|
||||||
|
|
||||||
.. literalinclude:: images_scripts/INCAR
|
.. literalinclude:: INCAR
|
||||||
|
|
||||||
`LORBIT = 14` takes care of optimizing the projectors in the energy window defined
|
`LORBIT = 14` takes care of optimizing the projectors in the energy window defined
|
||||||
by `EMIN` and `EMAX`. We switch off all symmetries with `ISYM=-1` since symmetries
|
by `EMIN` and `EMAX`. We switch off all symmetries with `ISYM=-1` since symmetries
|
||||||
@ -27,25 +26,28 @@ e.ac.at/wiki/index.php/LOCPROJ>`_ flag. The projectors are stored in the file `L
|
|||||||
|
|
||||||
PLOVASP
|
PLOVASP
|
||||||
------------------------------
|
------------------------------
|
||||||
Next, we postprocess the projectors, which VASP stored in the file `LOCPROJ`.
|
Next, we post-process the projectors, which VASP stored in the file `LOCPROJ`.
|
||||||
We do this by invoking :program:`plovasp plo.cfg` which is configured by an input file, e.g., named :ref:`plo.cfg`.
|
We do this by invoking :program:`plovasp plo.cfg` which is configured by an input file, e.g., named :ref:`plo.cfg`.
|
||||||
|
|
||||||
.. literalinclude:: images_scripts/plo.cfg
|
.. literalinclude:: plo.cfg
|
||||||
|
|
||||||
Here, in `[General]` we set the basename and the grid for calculating the density of
|
Here, in `[General]` we set the basename and the grid for calculating the density of
|
||||||
states. In `[Group 1]` we define a group of two shells which are orthonormalized with
|
states. In `[Group 1]` we define a group of two shells which are orthonormalized with
|
||||||
respect to states in an energy window from `-9` to `2` for all ions simultanously
|
respect to states in an energy window from `-9` to `2` for all ions simultaneously
|
||||||
(`NORMION = False`). We define the two shells, which correspond to the Ni d states
|
(`NORMION = False`). We define the two shells, which correspond to the Ni d states
|
||||||
and the O p states. Only the Ni shell is treated as correlated (`CORR = True`), i.e.,
|
and the O p states. Only the Ni shell is treated as correlated (`CORR = True`), i.e.,
|
||||||
is supplemented with a Coulomb interaction later in the DMFT calculation.
|
is supplemented with a Coulomb interaction later in the DMFT calculation. Here, we
|
||||||
|
chose to use the Hamiltonian mode of the vasp converter by setting `COMPLEMENT=TRUE`,
|
||||||
|
and specifying to use explicitly only bands with indices 2 to 9 (`BANDS`). This is
|
||||||
|
optional but later used in the post-processing.
|
||||||
|
|
||||||
Converting to hdf5 file
|
Converting to hdf5 file
|
||||||
-------------------------------
|
-------------------------------
|
||||||
We gather the output generated by :program:`plovasp` into a hdf5 archive which :program:`dft_tools` is able to read. We do this by running :program:`python converter.py` on the script :ref:`converter.py`:
|
We run the whole conversion to a dft_tools readable h5 archive by running the converter script provided :program:`python converter.py`
|
||||||
|
|
||||||
.. literalinclude:: images_scripts/converter.py
|
.. literalinclude:: converter.py
|
||||||
|
|
||||||
Now we are all set to perform a dmft calculation.
|
A h5 archive should be created with the name `vasp.h5` Now we are all set to perform a dmft calculation.
|
||||||
|
|
||||||
DMFT
|
DMFT
|
||||||
==================================================
|
==================================================
|
||||||
@ -70,7 +72,7 @@ Spectral function on real axis: MaxEnt
|
|||||||
To compare to results from literature we make use of the `maxent triqs application <https://triqs.github.io/maxent/master/>`_ and calculate the spectral function on real axis. Use this script to perform a crude but quick calculation: :ref:`maxent.py` using a linear real axis and a line-fit analyzer to determine the optimal :math:`\alpha`. The output is saved in the h5 file in `DMFT_results/Iterations/G_latt_orb_w_o<n_o>_it<n_it>`, where `<n_o>` is the number of the orbital and `n_it` is again the number of the last iteration. The real axis information is stored in `DMFT_results/Iterations/w_it<n_it>`.
|
To compare to results from literature we make use of the `maxent triqs application <https://triqs.github.io/maxent/master/>`_ and calculate the spectral function on real axis. Use this script to perform a crude but quick calculation: :ref:`maxent.py` using a linear real axis and a line-fit analyzer to determine the optimal :math:`\alpha`. The output is saved in the h5 file in `DMFT_results/Iterations/G_latt_orb_w_o<n_o>_it<n_it>`, where `<n_o>` is the number of the orbital and `n_it` is again the number of the last iteration. The real axis information is stored in `DMFT_results/Iterations/w_it<n_it>`.
|
||||||
|
|
||||||
|
|
||||||
.. image:: images_scripts/nio_Aw.png
|
.. image:: nio_Aw.png
|
||||||
:width: 400
|
:width: 400
|
||||||
:align: center
|
:align: center
|
||||||
|
|
||||||
@ -83,20 +85,22 @@ In this part we will perform charge self-consistent DMFT calculations. To do so
|
|||||||
ICHARG = 5
|
ICHARG = 5
|
||||||
NELM = 1000
|
NELM = 1000
|
||||||
NELMIN = 1000
|
NELMIN = 1000
|
||||||
IMIX=0
|
IMIX=1
|
||||||
|
BMIX=0.5
|
||||||
|
AMIX=0.02
|
||||||
|
|
||||||
which makes VASP wait after each step of its iterative diagonalization until the file vasp.lock is created. It then reads the update of the charge density in the file `GAMMA`. It is terminated by an external script after a desired amount of steps, such that we deactivate all automatic stoping criterion by setting the number of steps to a very high number.
|
which makes VASP wait after each step of its iterative diagonalization until the file vasp.lock is created. It then reads the update of the charge density in the file `GAMMA`. We change the mixing here to stabilize the updating, which can be problem for charge ordered systems. Vasp is terminated by an external script after a desired amount of steps, such that we deactivate all automatic stoping criterion by setting the number of steps to a very high number.
|
||||||
|
|
||||||
We take the respective converged DFT and DMFT calculations from before as a starting point. I.e., we copy the `CHGCAR` and `nio.h5` together with the other :program:`VASP` input files and :file:`plo.cfg` in a new directory. We use a script called :program:`vasp_dmft` to invoke :program:`VASP` in the background and start the DMFT calculation together with :program:`plovasp` and the converter. This script assumes that the dmft sript contains a function `dmft_cycle()` and also the conversion from text files to the h5 file. Additionally we have to add a few lines to calculate the density correction and calculate the correlation energy. We adapt the script straightforardly (for a working example see :ref:`nio_csc.py`). The most important new lines are::
|
We take the respective converged DFT and DMFT calculations from before as a starting point. I.e., we copy the `CHGCAR` and `nio.h5` together with the other :program:`VASP` input files and :file:`plo.cfg` in a new directory. We use a script called :program:`vasp_dmft` to invoke :program:`VASP` in the background and start the DMFT calculation together with :program:`plovasp` and the converter. This script assumes that the dmft sript contains a function `dmft_cycle()` and also the conversion from text files to the h5 file. Additionally we have to add a few lines to calculate the density correction and calculate the correlation energy. We adapt the script straightforwardly (for a working example see :ref:`nio_csc.py`). The most important new lines are::
|
||||||
|
|
||||||
SK.chemical_potential = SK.calc_mu( precision = 0.000001 )
|
SK.chemical_potential = SK.calc_mu( precision = 0.000001 )
|
||||||
SK.calc_density_correction(dm_type='vasp')
|
SK.calc_density_correction(dm_type='vasp')
|
||||||
correnerg = 0.5 * (S.G_iw * S.Sigma_iw).total_density()
|
correnerg = 0.5 * (S.G_iw * S.Sigma_iw).total_density()
|
||||||
|
|
||||||
where the chemical potential is determined to a greater precision than before, the correction to the dft density matrix is calculated and output to the file :file:`GAMMA`. The correlation energy is calculated via Migdal-Galitzki formula. We also slightly increase the tolerance for the detection of blocks since the DFT calculation now includes some QMC noise.
|
where the chemical potential is determined to a greater precision than before, the correction to the dft density matrix is calculated and output to the file :file:`GAMMA`. The correlation energy is calculated via Migdal-Galitski formula. We also slightly increase the tolerance for the detection of blocks since the DFT calculation now includes some QMC noise.
|
||||||
|
|
||||||
To help convergence, we keep the density (i.e., the GAMMA file) fixed for a few DFT iterations. We do so since VASP uses an iterative diagonalization.
|
To help convergence, we keep the density (i.e., the GAMMA file) fixed for a few DFT iterations. We do so since VASP uses an iterative diagonalization. Within these steps we still need to update the projectors and recalculate the GAMMA file to not shuffle eigenvalues around by accident.
|
||||||
|
|
||||||
We can start the whole machinery by excecuting::
|
We can start the whole machinery by executing::
|
||||||
|
|
||||||
vasp_dmft -n <n_procs> -i <n_iters> -j <n_iters_dft> -p <vasp_exec> nio_csc.py
|
vasp_dmft -n <n_procs> -i <n_iters> -j <n_iters_dft> -p <vasp_exec> nio_csc.py
|
@ -1,6 +1,5 @@
|
|||||||
[General]
|
[General]
|
||||||
BASENAME = nio
|
# DOSMESH = -21 55 400
|
||||||
DOSMESH = -21 55 400
|
|
||||||
HK = True
|
HK = True
|
||||||
COMPLEMENT = True
|
COMPLEMENT = True
|
||||||
|
|
||||||
@ -9,7 +8,7 @@ SHELLS = 1 2
|
|||||||
EWINDOW = -9 2
|
EWINDOW = -9 2
|
||||||
NORMION = False
|
NORMION = False
|
||||||
NORMALIZE = True
|
NORMALIZE = True
|
||||||
BANDS = 2 10
|
BANDS = 2 9
|
||||||
|
|
||||||
[Shell 1] # Ni d shell
|
[Shell 1] # Ni d shell
|
||||||
LSHELL = 2
|
LSHELL = 2
|
@ -1,7 +1,7 @@
|
|||||||
.. _SrVO3:
|
.. _SrVO3:
|
||||||
|
|
||||||
On the example of SrVO3 we will discuss now how to set up a full working calculation,
|
On the example of SrVO3 we will discuss now how to set up a full working calculation,
|
||||||
including the initialization of the :ref:`CTHYB solver <triqscthyb:welcome>`.
|
including the initialization of the `CTHYB solver <https://triqs.github.io/cthyb/>`_.
|
||||||
Some additional parameter are introduced to make the calculation
|
Some additional parameter are introduced to make the calculation
|
||||||
more efficient. This is a more advanced example, which is
|
more efficient. This is a more advanced example, which is
|
||||||
also suited for parallel execution.
|
also suited for parallel execution.
|
||||||
@ -84,7 +84,7 @@ First, we load the necessary modules::
|
|||||||
import triqs.utility.mpi as mpi
|
import triqs.utility.mpi as mpi
|
||||||
|
|
||||||
The last two lines load the modules for the construction of the
|
The last two lines load the modules for the construction of the
|
||||||
:ref:`CTHYB solver <triqscthyb:welcome>`.
|
`CTHYB solver <https://triqs.github.io/cthyb/>`_.
|
||||||
|
|
||||||
Initializing SumkDFT
|
Initializing SumkDFT
|
||||||
--------------------
|
--------------------
|
||||||
@ -109,7 +109,7 @@ And next, we can initialize the :class:`SumkDFT <dft.sumk_dft.SumkDFT>` class::
|
|||||||
Initializing the solver
|
Initializing the solver
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
We also have to specify the :ref:`CTHYB solver <triqscthyb:welcome>` related settings.
|
We also have to specify the `CTHYB solver <https://triqs.github.io/cthyb/>`_ related settings.
|
||||||
We assume that the DMFT script for SrVO3 is executed on 16 cores. A sufficient set
|
We assume that the DMFT script for SrVO3 is executed on 16 cores. A sufficient set
|
||||||
of parameters for a first guess is::
|
of parameters for a first guess is::
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ of parameters for a first guess is::
|
|||||||
|
|
||||||
Here we use a tail fit to deal with numerical noise of higher Matsubara frequencies.
|
Here we use a tail fit to deal with numerical noise of higher Matsubara frequencies.
|
||||||
For other options and more details on the solver parameters, we refer the user to
|
For other options and more details on the solver parameters, we refer the user to
|
||||||
the :ref:`CTHYB solver <triqscthyb:welcome>` documentation.
|
the `CTHYB solver <https://triqs.github.io/cthyb/>`_ documentation.
|
||||||
It is important to note that the solver parameters have to be adjusted for
|
It is important to note that the solver parameters have to be adjusted for
|
||||||
each material individually. A guide on how to set the tail fit parameters is given
|
each material individually. A guide on how to set the tail fit parameters is given
|
||||||
:ref:`below <tailfit>`.
|
:ref:`below <tailfit>`.
|
||||||
@ -278,3 +278,4 @@ of the self energy and to stop (:emphasis:`fit_max_n`) before the noise fully ta
|
|||||||
If it is difficult to find a reasonable fit in this region you should increase
|
If it is difficult to find a reasonable fit in this region you should increase
|
||||||
your statistics (number of measurements). Keep in mind that :emphasis:`fit_min_n`
|
your statistics (number of measurements). Keep in mind that :emphasis:`fit_min_n`
|
||||||
and :emphasis:`fit_max_n` also depend on :math:`\beta`.
|
and :emphasis:`fit_max_n` also depend on :math:`\beta`.
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
.. _SrVO3_elk:
|
.. _SrVO3_elk:
|
||||||
|
|
||||||
This example is almost identical to the :ref:`Wien2k-TRIQS SrVO3 example <SrVO3>`. On the example of SrVO3 we will discuss now how to set up a full working calculation using Elk, including the initialization of the :ref:`CTHYB solver <https://triqs.github.io/cthyb/latest>`_. Some additional parameter are introduced to make the calculation more efficient. This is a more advanced example, which is also suited for parallel execution.
|
This example is almost identical to the :ref:`Wien2k-TRIQS SrVO3 example <SrVO3>`. On the example of SrVO3 we will discuss now how to set up a full working calculation using Elk, including the initialization of the `CTHYB solver <https://triqs.github.io/cthyb/latest>`_. Some additional parameter are introduced to make the calculation more efficient. This is a more advanced example, which is also suited for parallel execution.
|
||||||
|
|
||||||
For the convenience of the user, we provide also a full python script (:download:`dft_dmft_cthyb_elk.py <dft_dmft_cthyb_elk.py>`). The user has to adapt it to their own needs. How to execute your script is described :ref:`here<runpy>`.
|
For the convenience of the user, we provide also a full python script (:download:`dft_dmft_cthyb_elk.py <dft_dmft_cthyb_elk.py>`). The user has to adapt it to their own needs. How to execute your script is described :ref:`here<runpy>`.
|
||||||
|
|
||||||
@ -48,7 +48,7 @@ First, we load the necessary modules::
|
|||||||
import triqs.utility.mpi as mpi
|
import triqs.utility.mpi as mpi
|
||||||
|
|
||||||
The last two lines load the modules for the construction of the
|
The last two lines load the modules for the construction of the
|
||||||
:ref:`CTHYB solver <https://triqs.github.io/cthyb/latest/>`_.
|
`CTHYB solver <https://triqs.github.io/cthyb/latest/>`_.
|
||||||
|
|
||||||
Initializing SumkDFT
|
Initializing SumkDFT
|
||||||
--------------------
|
--------------------
|
||||||
@ -73,7 +73,7 @@ And next, we can initialize the :class:`SumkDFT <dft.sumk_dft.SumkDFT>` class::
|
|||||||
Initializing the solver
|
Initializing the solver
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
We also have to specify the :ref:`CTHYB solver <https://triqs.github.io/cthyb/latest>`_ related settings. We assume that the DMFT script for SrVO3 is executed on 16 cores. A sufficient set of parameters for a first guess is::
|
We also have to specify the `CTHYB solver <https://triqs.github.io/cthyb/latest>`_ related settings. We assume that the DMFT script for SrVO3 is executed on 16 cores. A sufficient set of parameters for a first guess is::
|
||||||
|
|
||||||
p = {}
|
p = {}
|
||||||
# solver
|
# solver
|
||||||
@ -86,7 +86,7 @@ We also have to specify the :ref:`CTHYB solver <https://triqs.github.io/cthyb/la
|
|||||||
p["fit_min_n"] = 30
|
p["fit_min_n"] = 30
|
||||||
p["fit_max_n"] = 60
|
p["fit_max_n"] = 60
|
||||||
|
|
||||||
Here we use a tail fit to deal with numerical noise of higher Matsubara frequencies. For other options and more details on the solver parameters, we refer to the :ref:`CTHYB solver <https://triqs.github.io/cthyb/latest/reference/constr_parameters.html>`_ documentation. It is important to note that the solver parameters have to be adjusted for each material individually. A guide on how to set the tail fit parameters is given :ref:`below <tailfit>`.
|
Here we use a tail fit to deal with numerical noise of higher Matsubara frequencies. For other options and more details on the solver parameters, we refer to the `CTHYB solver <https://triqs.github.io/cthyb/latest/reference/constr_parameters.html>`_ documentation. It is important to note that the solver parameters have to be adjusted for each material individually. A guide on how to set the tail fit parameters is given :ref:`below <tailfit>`.
|
||||||
|
|
||||||
The next step is to initialize the :class:`solver class <triqs_cthyb.Solver>`. It consist of two parts:
|
The next step is to initialize the :class:`solver class <triqs_cthyb.Solver>`. It consist of two parts:
|
||||||
|
|
||||||
@ -218,21 +218,3 @@ Using the Kanamori Hamiltonian and the parameters above (but on 16 cores), your
|
|||||||
:width: 700
|
:width: 700
|
||||||
:align: center
|
:align: center
|
||||||
|
|
||||||
.. _tailfit:
|
|
||||||
|
|
||||||
|
|
||||||
Tail fit parameters
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
A good way to identify suitable tail fit parameters is by "human inspection". Therefore disabled the tail fitting first::
|
|
||||||
|
|
||||||
p["perform_tail_fit"] = False
|
|
||||||
|
|
||||||
and perform only one DMFT iteration. The resulting self energy can be tail fitted by hand::
|
|
||||||
|
|
||||||
Sigma_iw_fit = S.Sigma_iw.copy()
|
|
||||||
Sigma_iw_fit << tail_fit(S.Sigma_iw, fit_max_moment = 4, fit_min_n = 40, fit_max_n = 160)[0]
|
|
||||||
|
|
||||||
Plot the self energy and adjust the tail fit parameters such that you obtain a proper fit. The :meth:`fit_tail function <triqs.gf.tools.tail_fit>` is part of the :ref:`TRIQS <triqslibs:welcome>` library.
|
|
||||||
|
|
||||||
For a self energy which is going to zero for :math:`i\omega \rightarrow 0` our suggestion is to start the tail fit (:emphasis:`fit_min_n`) at a Matsubara frequency considerable above the minimum of the self energy and to stop (:emphasis:`fit_max_n`) before the noise fully takes over. If it is difficult to find a reasonable fit in this region you should increase your statistics (number of measurements). Keep in mind that :emphasis:`fit_min_n` and :emphasis:`fit_max_n` also depend on :math:`\beta`.
|
|
||||||
|
@ -41,8 +41,8 @@ class ConverterTools:
|
|||||||
to_replace : dict of str:str
|
to_replace : dict of str:str
|
||||||
Dictionary defining old_char:new_char.
|
Dictionary defining old_char:new_char.
|
||||||
|
|
||||||
Yields
|
Returns
|
||||||
------
|
-------
|
||||||
string
|
string
|
||||||
The next number in file.
|
The next number in file.
|
||||||
|
|
||||||
|
@ -513,10 +513,11 @@ class ElkConverter(ConverterTools,Elk_tools,read_Elk):
|
|||||||
mpi.report('Converted the band data')
|
mpi.report('Converted the band data')
|
||||||
|
|
||||||
def convert_contours_input(self,kgrid=None,ngrid=None):
|
def convert_contours_input(self,kgrid=None,ngrid=None):
|
||||||
"""
|
r"""
|
||||||
Reads the appropriate files and stores the data for the cont_subgrp in the hdf5 archive.
|
Reads the appropriate files and stores the data for the cont_subgrp in the hdf5 archive.
|
||||||
|
|
||||||
Parameters:
|
Parameters
|
||||||
|
----------
|
||||||
kgrid : size (4,3) double numpy array, optional
|
kgrid : size (4,3) double numpy array, optional
|
||||||
Numpy array defining the reciprocal lattice vertices used in the Elk Fermi
|
Numpy array defining the reciprocal lattice vertices used in the Elk Fermi
|
||||||
surface calculation. Each row has the following meaning:
|
surface calculation. Each row has the following meaning:
|
||||||
|
@ -52,8 +52,8 @@ class readElkfiles:
|
|||||||
to_replace : dict of str:str
|
to_replace : dict of str:str
|
||||||
Dictionary defining old_char:new_char.
|
Dictionary defining old_char:new_char.
|
||||||
|
|
||||||
Yields
|
Returns
|
||||||
------
|
-------
|
||||||
string
|
string
|
||||||
The next number in file.
|
The next number in file.
|
||||||
|
|
||||||
@ -87,8 +87,8 @@ class readElkfiles:
|
|||||||
to_replace : dict of str:str
|
to_replace : dict of str:str
|
||||||
Dictionary defining old_char:new_char.
|
Dictionary defining old_char:new_char.
|
||||||
|
|
||||||
Yields
|
Returns
|
||||||
------
|
-------
|
||||||
string
|
string
|
||||||
The next number in file.
|
The next number in file.
|
||||||
|
|
||||||
|
@ -42,28 +42,6 @@ from .elstruct import ElectronicStructure
|
|||||||
from .plotools import generate_plo, output_as_text
|
from .plotools import generate_plo, output_as_text
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
class PloFormatter(logging.Formatter):
|
|
||||||
"""
|
|
||||||
custom event logger for all output, warnings and debug info
|
|
||||||
"""
|
|
||||||
def __init__(self, default):
|
|
||||||
self._default_formatter = default
|
|
||||||
|
|
||||||
def format(self, record):
|
|
||||||
# Save the original format
|
|
||||||
_style = self._style
|
|
||||||
|
|
||||||
# Customized WARNING format
|
|
||||||
if record.levelno == logging.WARNING:
|
|
||||||
self._style = logging.PercentStyle("\n!!! WARNING !!!: %(msg)s\n")
|
|
||||||
|
|
||||||
result = super().format(record)
|
|
||||||
|
|
||||||
# Restore the original format
|
|
||||||
self._style = _style
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Uncomment this to get extra output
|
# Uncomment this to get extra output
|
||||||
#logging.basicConfig(level=logging.DEBUG)
|
#logging.basicConfig(level=logging.DEBUG)
|
||||||
|
|
||||||
@ -72,7 +50,8 @@ main_log = logging.getLogger('plovasp')
|
|||||||
main_log.propagate = False
|
main_log.propagate = False
|
||||||
|
|
||||||
handler = logging.StreamHandler(sys.stdout)
|
handler = logging.StreamHandler(sys.stdout)
|
||||||
formatter = PloFormatter("[%(levelname)s]:[%(name)s]: %(message)s")
|
# formatter = logging.Formatter("[%(levelname)s]:[%(name)s]: %(message)s")
|
||||||
|
formatter = logging.Formatter("[%(levelname)s]: %(message)s")
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
main_log.addHandler(handler)
|
main_log.addHandler(handler)
|
||||||
|
|
||||||
|
@ -235,16 +235,16 @@ class ProjectorGroup:
|
|||||||
"""
|
"""
|
||||||
Calculate the complement for a group of projectors.
|
Calculate the complement for a group of projectors.
|
||||||
|
|
||||||
This leads to quadtratic projectors P = <l|n> by using a Gram-Schmidt.
|
This leads to quadtratic projectors :math:`P = <l|n>` by using a Gram-Schmidt.
|
||||||
|
|
||||||
The projector on the orthogonal complement of the existing projectors
|
The projector on the orthogonal complement of the existing projectors
|
||||||
|l> is P^u = 1 - sum_l |l><l|
|
:math:`|l>` is :math:`P^u = 1 - sum_l |l><l|`
|
||||||
We get candidates for complement projectors by applying P^u to a Bloch
|
We get candidates for complement projectors by applying P^u to a Bloch
|
||||||
state |n>: |l*> = P^u |n>. For numerical stability we select that Bloch
|
state :math:`|n>`: :math:`|l*> = P^u |n>`. For numerical stability we select that Bloch
|
||||||
state which leads to the |l*> with the largest norm (that corresponds to
|
state which leads to the :math:`|l*>` with the largest norm (that corresponds to
|
||||||
that Bloch state with the smallest overlap with the space spanned by |l>)
|
that Bloch state with the smallest overlap with the space spanned by :math:`|l>`)
|
||||||
We normalize |l*> and add it to |l>. We do so untill we have as many
|
We normalize :math:`|l*>` and add it to :math:`|l>`. We do so untill we have as many
|
||||||
|l> states as we have |n> states.
|
:math:`|l>` states as we have :math:`|n>` states.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@ import time
|
|||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
import triqs.utility.mpi as mpi
|
import triqs.utility.mpi as mpi
|
||||||
|
from h5 import HDFArchive
|
||||||
from . import converter
|
from . import converter
|
||||||
from shutil import copyfile
|
from shutil import copyfile
|
||||||
|
|
||||||
@ -136,17 +137,17 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
|
|||||||
mpi.barrier()
|
mpi.barrier()
|
||||||
|
|
||||||
if debug: print(bcolors.GREEN + "rank %s"%(mpi.rank) + bcolors.ENDC)
|
if debug: print(bcolors.GREEN + "rank %s"%(mpi.rank) + bcolors.ENDC)
|
||||||
corr_energy, dft_dc = dmft_cycle()
|
corr_energy, sum_k = dmft_cycle()
|
||||||
mpi.barrier()
|
mpi.barrier()
|
||||||
|
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
total_energy = dft_energy + corr_energy - dft_dc
|
total_energy = dft_energy + corr_energy - sum_k.dc_energ[0]
|
||||||
print()
|
print()
|
||||||
print("="*80)
|
print("="*80)
|
||||||
print(" Total energy: ", total_energy)
|
print(" Total energy: ", total_energy)
|
||||||
print(" DFT energy: ", dft_energy)
|
print(" DFT energy: ", dft_energy)
|
||||||
print(" Corr. energy: ", corr_energy)
|
print(" Corr. energy: ", corr_energy)
|
||||||
print(" DFT DC: ", dft_dc)
|
print(" DFT DC: ", sum_k.dc_energ[0])
|
||||||
print("="*80)
|
print("="*80)
|
||||||
print()
|
print()
|
||||||
|
|
||||||
@ -163,6 +164,23 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
|
|||||||
if vasp_version == 'standard':
|
if vasp_version == 'standard':
|
||||||
copyfile(src='GAMMA',dst='GAMMA_recent')
|
copyfile(src='GAMMA',dst='GAMMA_recent')
|
||||||
while iter_dft < n_iter_dft:
|
while iter_dft < n_iter_dft:
|
||||||
|
# insert recalculation of GAMMA here
|
||||||
|
# Recalculates the density correction
|
||||||
|
# Reads in new projectors and hopping and updates chemical potential
|
||||||
|
# rot_mat is not updated since it's more closely related to the local problem than DFT
|
||||||
|
# New fermi weights are directly read in calc_density_correction
|
||||||
|
if iter > 0 and not iter == n_iter and mpi.is_master_node():
|
||||||
|
with HDFArchive('vasp.h5', 'r') as archive:
|
||||||
|
sum_k.proj_mat = archive['dft_input/proj_mat']
|
||||||
|
sum_k.hopping = archive['dft_input/hopping']
|
||||||
|
sum_k.proj_mat = mpi.bcast(sum_k.proj_mat)
|
||||||
|
sum_k.hopping = mpi.bcast(sum_k.hopping)
|
||||||
|
sum_k.calc_mu(precision=0.001)
|
||||||
|
|
||||||
|
# Writes out GAMMA file
|
||||||
|
sum_k.calc_density_correction(dm_type='vasp')
|
||||||
|
|
||||||
|
mpi.barrier()
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
open('./vasp.lock', 'a').close()
|
open('./vasp.lock', 'a').close()
|
||||||
while is_vasp_lock_present():
|
while is_vasp_lock_present():
|
||||||
@ -182,13 +200,13 @@ def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
|
|||||||
f_stop.write("LABORT = .TRUE.\n")
|
f_stop.write("LABORT = .TRUE.\n")
|
||||||
f_stop.close()
|
f_stop.close()
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
total_energy = dft_energy + corr_energy - dft_dc
|
total_energy = dft_energy + corr_energy - sum_k.dc_energ[0]
|
||||||
with open('TOTENERGY', 'w') as f:
|
with open('TOTENERGY', 'w') as f:
|
||||||
f.write(" Total energy: %s\n"%(total_energy))
|
f.write(" Total energy: %s\n"%(total_energy))
|
||||||
f.write(" DFT energy: %s\n"%(dft_energy))
|
f.write(" DFT energy: %s\n"%(dft_energy))
|
||||||
f.write(" Corr. energy: %s\n"%(corr_energy))
|
f.write(" Corr. energy: %s\n"%(corr_energy))
|
||||||
f.write(" DFT DC: %s\n"%(dft_dc))
|
f.write(" DFT DC: %s\n"%(sum_k.dc_energ[0]))
|
||||||
f.write(" Energy correction: %s\n"%(corr_energy - dft_dc))
|
f.write(" Energy correction: %s\n"%(corr_energy - sum_k.dc_energ[0]))
|
||||||
|
|
||||||
mpi.report("***Done")
|
mpi.report("***Done")
|
||||||
|
|
||||||
|
@ -125,14 +125,14 @@ class Plocar:
|
|||||||
|
|
||||||
def from_file(self, vasp_dir='./', plocar_filename='PLOCAR'):
|
def from_file(self, vasp_dir='./', plocar_filename='PLOCAR'):
|
||||||
r"""
|
r"""
|
||||||
Reads non-normalized projectors from a binary file (`PLOCAR' by default)
|
Reads non-normalized projectors from a binary file ('PLOCAR' by default)
|
||||||
generated by VASP PLO interface.
|
generated by VASP PLO interface.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
|
|
||||||
vasp_dir (str) : path to the VASP working directory [default = `./']
|
vasp_dir (str) : path to the VASP working directory [default = './']
|
||||||
plocar_filename (str) : filename [default = `PLOCAR']
|
plocar_filename (str) : filename [default = 'PLOCAR']
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Add a slash to the path name if necessary
|
# Add a slash to the path name if necessary
|
||||||
@ -289,14 +289,14 @@ class Poscar:
|
|||||||
self.q_cart = None
|
self.q_cart = None
|
||||||
|
|
||||||
def from_file(self, vasp_dir='./', poscar_filename='POSCAR'):
|
def from_file(self, vasp_dir='./', poscar_filename='POSCAR'):
|
||||||
"""
|
r"""
|
||||||
Reads POSCAR and returns a dictionary.
|
Reads POSCAR and returns a dictionary.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
|
|
||||||
vasp_dir (str) : path to the VASP working directory [default = `./']
|
vasp_dir (str) : path to the VASP working directory [default = './']
|
||||||
plocar_filename (str) : filename [default = `POSCAR']
|
plocar_filename (str) : filename [default = 'POSCAR']
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Convenince local function
|
# Convenince local function
|
||||||
@ -410,15 +410,15 @@ class Kpoints:
|
|||||||
# Reads IBZKPT file
|
# Reads IBZKPT file
|
||||||
#
|
#
|
||||||
def from_file(self, vasp_dir='./', ibz_filename='IBZKPT'):
|
def from_file(self, vasp_dir='./', ibz_filename='IBZKPT'):
|
||||||
"""
|
r"""
|
||||||
Reads from IBZKPT: k-points and optionally
|
Reads from IBZKPT: k-points and optionally
|
||||||
tetrahedra topology (if present).
|
tetrahedra topology (if present).
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
|
|
||||||
vasp_dir (str) : path to the VASP working directory [default = `./']
|
vasp_dir (str) : path to the VASP working directory [default = './']
|
||||||
plocar_filename (str) : filename [default = `IBZKPT']
|
plocar_filename (str) : filename [default = 'IBZKPT']
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ class Wannier90Converter(ConverterTools):
|
|||||||
symmcorr_subgrp='dft_symmcorr_input', misc_subgrp='dft_misc_input',
|
symmcorr_subgrp='dft_symmcorr_input', misc_subgrp='dft_misc_input',
|
||||||
repacking=False, rot_mat_type='hloc_diag', bloch_basis=False, add_lambda=None,
|
repacking=False, rot_mat_type='hloc_diag', bloch_basis=False, add_lambda=None,
|
||||||
w90zero=2e-6, reorder_orbital_and_spin_vasp5=False):
|
w90zero=2e-6, reorder_orbital_and_spin_vasp5=False):
|
||||||
"""
|
r"""
|
||||||
Initialise the class.
|
Initialise the class.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
|
@ -4,8 +4,8 @@
|
|||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
double e[4], en, ci_sum, ct, res[4];
|
double e[4], en, ci_sum, ct, res[4];
|
||||||
int inds[4], inds_should[4];
|
int inds[4];
|
||||||
int i, flag;
|
int i;
|
||||||
char mess[128];
|
char mess[128];
|
||||||
|
|
||||||
e[0] = -1.5;
|
e[0] = -1.5;
|
||||||
@ -16,8 +16,6 @@ int main()
|
|||||||
en = -0.55;
|
en = -0.55;
|
||||||
printf("\n Test case 2\n\n");
|
printf("\n Test case 2\n\n");
|
||||||
|
|
||||||
flag = dos_reorder(en, e, inds);
|
|
||||||
|
|
||||||
dos_corner_weights(en, e, inds, res);
|
dos_corner_weights(en, e, inds, res);
|
||||||
dos_tet_weights(en, e, inds, &ct);
|
dos_tet_weights(en, e, inds, &ct);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user