mirror of
https://github.com/triqs/dft_tools
synced 2025-01-03 01:55:56 +01:00
Merge branch 'master' into vasp
Conflicts: doc/guide/dftdmft_selfcons.rst python/CMakeLists.txt python/converters/__init__.py python/sumk_dft.py test/CMakeLists.txt
This commit is contained in:
commit
8378013faa
@ -14,14 +14,16 @@ Olivier Parcollet (CEA Saclay). A first step has been the definition of the
|
|||||||
framework and the construction of the projective Wannier functions as input for
|
framework and the construction of the projective Wannier functions as input for
|
||||||
the DMFT calculations [#dft_tools1]_. This has been followed by the introduction
|
the DMFT calculations [#dft_tools1]_. This has been followed by the introduction
|
||||||
of full charge self-consistency [#dft_tools2]_, necessary for total energy
|
of full charge self-consistency [#dft_tools2]_, necessary for total energy
|
||||||
calculations.
|
calculations. The package at hand is fully implemented as an application
|
||||||
|
based on the TRIQS library [#dft_tools3]_.
|
||||||
|
|
||||||
**Developers**: M. Aichhorn, L. Pourovskii, V. Vildosola, C. Martins, P. Seth, M. Zingl
|
**Developers**: M. Aichhorn, L. Pourovskii, P.Seth, V. Vildosola, M. Zingl, O. E. Peil, X. Deng, J. Mravlje, G. Kraberger, C. Martins, M. Ferrero, O. Parcollet
|
||||||
|
|
||||||
**Related papers**:
|
**Related papers**:
|
||||||
|
|
||||||
.. [#dft_tools1] `M. Aichhorn, L. Pourovskii, V. Vildosola, M. Ferrero, O. Parcollet, T. Miyake, A. Georges, and S. Biermann, Phys. Rev. B 80, 085101 (2009) <http://link.aps.org/doi/10.1103/PhysRevB.80.085101>`_ (:download:`bibtex file <dft_tools1.bib>`)
|
.. [#dft_tools1] `M. Aichhorn, L. Pourovskii, V. Vildosola, M. Ferrero, O. Parcollet, T. Miyake, A. Georges, and S. Biermann, Phys. Rev. B 80, 085101 (2009) <http://link.aps.org/doi/10.1103/PhysRevB.80.085101>`_ (:download:`bibtex file <dft_tools1.bib>`)
|
||||||
.. [#dft_tools2] `M. Aichhorn, L. Pourovskii, and A. Georges, Phys. Rev. B 84, 054529 (2011) <http://link.aps.org/doi/10.1103/PhysRevB.84.054529>`_ (:download:`bibtex file <dft_tools2.bib>`)
|
.. [#dft_tools2] `M. Aichhorn, L. Pourovskii, and A. Georges, Phys. Rev. B 84, 054529 (2011) <http://link.aps.org/doi/10.1103/PhysRevB.84.054529>`_ (:download:`bibtex file <dft_tools2.bib>`)
|
||||||
|
.. [#dft_tools3] `M. Aichhorn, L. Pourovskii, P.Seth, V. Vildosola, M. Zingl, O. E. Peil, X. Deng, J. Marvlje, G. Kraberger, C. Martins, M. Ferrero, and O. Parcollet, Commt. Phys. Commun. 204, 200 (2016) <http://www.sciencedirect.com/science/article/pii/S0010465516300728>`_ (:download:`bibtex file <dft_tools3.bib>`)
|
||||||
|
|
||||||
This application is a part of our scientific work and we would appreciate if
|
This application is a part of our scientific work and we would appreciate if
|
||||||
projects using it will include a citation to the above relevant papers. In
|
projects using it will include a citation to the above relevant papers. In
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
.. _dftplusdmft:
|
||||||
|
|
||||||
Introduction to DFT+DMFT
|
Introduction to DFT+DMFT
|
||||||
========================
|
========================
|
||||||
|
|
||||||
@ -8,7 +10,7 @@ terms it states that electrons in a crystal form bands of allowed
|
|||||||
states in momentum space. These states are then filled by the
|
states in momentum space. These states are then filled by the
|
||||||
electrons according to Pauli's principle up the Fermi level. With this
|
electrons according to Pauli's principle up the Fermi level. With this
|
||||||
simple picture one can explain the electronic band structure of simple
|
simple picture one can explain the electronic band structure of simple
|
||||||
materials such as elementary copper or aluminium.
|
materials such as elementary copper or aluminum.
|
||||||
|
|
||||||
Following this principle one can easily classify all existing
|
Following this principle one can easily classify all existing
|
||||||
materials into metals and insulators, with semiconductors being
|
materials into metals and insulators, with semiconductors being
|
||||||
@ -17,9 +19,8 @@ spectrum. Following this band theory, a system is a metal if there is
|
|||||||
an odd number of electrons in the valence bands, since this leads to a
|
an odd number of electrons in the valence bands, since this leads to a
|
||||||
partially filled band, cutting the Fermi energy and, thus, producing a
|
partially filled band, cutting the Fermi energy and, thus, producing a
|
||||||
Fermi surface, i.e metallic behavior. On the other hand, an even
|
Fermi surface, i.e metallic behavior. On the other hand, an even
|
||||||
number of electrons leads to
|
number of electrons leads to completely filled bands with a finite
|
||||||
completely filled bands with a finite excitation gap to the conduction
|
excitation gap to the conduction bands, i.e. insulating behavior.
|
||||||
bands, i.e. insulating behavior.
|
|
||||||
|
|
||||||
This classification works pretty well for a large class of
|
This classification works pretty well for a large class of
|
||||||
materials, where the electronic band structures are reproduced by
|
materials, where the electronic band structures are reproduced by
|
||||||
@ -41,7 +42,7 @@ current
|
|||||||
because of the strong Coulomb repulsion between the electrons. With
|
because of the strong Coulomb repulsion between the electrons. With
|
||||||
reference to Sir Nevill Mott, who contributed substantially to the
|
reference to Sir Nevill Mott, who contributed substantially to the
|
||||||
explanation of this effect in the 1930's, these materials are in
|
explanation of this effect in the 1930's, these materials are in
|
||||||
general reffered to as Mott insulators.
|
general referred to as Mott insulators.
|
||||||
|
|
||||||
Density-functional theory in a (very small) nutshell
|
Density-functional theory in a (very small) nutshell
|
||||||
----------------------------------------------------
|
----------------------------------------------------
|
||||||
@ -63,7 +64,7 @@ that is discussed in the literature on DFT, let us just note that the
|
|||||||
main result of DFT calculations are the Kohn-Sham energies
|
main result of DFT calculations are the Kohn-Sham energies
|
||||||
:math:`\varepsilon_{\nu\mathbf{k}}` and the Kohn-Sham orbitals :math:`\psi_{\nu\mathbf{k}}(\mathbf{r})`.
|
:math:`\varepsilon_{\nu\mathbf{k}}` and the Kohn-Sham orbitals :math:`\psi_{\nu\mathbf{k}}(\mathbf{r})`.
|
||||||
This set of equations is exact, however, the exchange correlation
|
This set of equations is exact, however, the exchange correlation
|
||||||
potential :math:`V_{xc}(\mathbf{r})` is not known explicitely. In
|
potential :math:`V_{xc}(\mathbf{r})` is not known explicitly. In
|
||||||
order to do actual calculations, it needs to be approximated in some
|
order to do actual calculations, it needs to be approximated in some
|
||||||
way. The local density approximation is one of the most famous
|
way. The local density approximation is one of the most famous
|
||||||
approximations used in this context. This approximation works well for
|
approximations used in this context. This approximation works well for
|
||||||
@ -75,7 +76,7 @@ From DFT to DMFT
|
|||||||
|
|
||||||
In order to extend our calculations to strong correlations, we need to
|
In order to extend our calculations to strong correlations, we need to
|
||||||
go from a description by bands to a description in terms of
|
go from a description by bands to a description in terms of
|
||||||
(localised) orbitals: Wannier functions.
|
(localized) orbitals: Wannier functions.
|
||||||
|
|
||||||
In principle, Wannier functions :math:`\chi_{\mu\sigma}(\mathbf{r})`
|
In principle, Wannier functions :math:`\chi_{\mu\sigma}(\mathbf{r})`
|
||||||
are nothing else than a Fourier transform of the Bloch basis set from
|
are nothing else than a Fourier transform of the Bloch basis set from
|
||||||
@ -88,7 +89,7 @@ where we introduced also the spin degree of freedom :math:`\sigma`. The
|
|||||||
unitary matrix :math:`U_{\mu\nu}` is not uniquely defined, but allows for a
|
unitary matrix :math:`U_{\mu\nu}` is not uniquely defined, but allows for a
|
||||||
certain amount of freedom in the calculation of Wannier function. A
|
certain amount of freedom in the calculation of Wannier function. A
|
||||||
very popular choice is the constraint that the resulting Wannier
|
very popular choice is the constraint that the resulting Wannier
|
||||||
functions should be maximally localised in space. Another route,
|
functions should be maximally localized in space. Another route,
|
||||||
computationally much lighter and more stable, are projective Wannier
|
computationally much lighter and more stable, are projective Wannier
|
||||||
functions. This scheme is used for the Wien2k interface in this
|
functions. This scheme is used for the Wien2k interface in this
|
||||||
package.
|
package.
|
||||||
@ -98,7 +99,7 @@ A central quantity in this scheme is the projection operator
|
|||||||
:math:`\nu` a Bloch band index.
|
:math:`\nu` a Bloch band index.
|
||||||
Its definition and how it is calculated can be found in the original
|
Its definition and how it is calculated can be found in the original
|
||||||
literature or in the extensive documentation of the
|
literature or in the extensive documentation of the
|
||||||
:program:`dmftproj` program shipped with :program:`dft_tools`.
|
:program:`dmftproj` program shipped with :program:`DFTTools`.
|
||||||
|
|
||||||
Using projective Wannier functions for DMFT
|
Using projective Wannier functions for DMFT
|
||||||
-------------------------------------------
|
-------------------------------------------
|
||||||
@ -121,7 +122,7 @@ with the DFT Green function
|
|||||||
|
|
||||||
This non-interacting Green function :math:`G^0_{mn}(i\omega)` defines,
|
This non-interacting Green function :math:`G^0_{mn}(i\omega)` defines,
|
||||||
together with the interaction Hamiltonian, the Anderson impurity
|
together with the interaction Hamiltonian, the Anderson impurity
|
||||||
model. The DMFT self-consitency cycle can now be formulated as
|
model. The DMFT self-consistency cycle can now be formulated as
|
||||||
follows:
|
follows:
|
||||||
|
|
||||||
#. Take :math:`G^0_{mn}(i\omega)` and the interaction Hamiltonian and
|
#. Take :math:`G^0_{mn}(i\omega)` and the interaction Hamiltonian and
|
||||||
@ -173,9 +174,9 @@ Full charge self-consistency
|
|||||||
|
|
||||||
The feedback of the electronic correlations to the Kohn-Sham orbitals
|
The feedback of the electronic correlations to the Kohn-Sham orbitals
|
||||||
is included by the interacting density matrix. With going into the
|
is included by the interacting density matrix. With going into the
|
||||||
details, it basically consists of calculating the Kohn Sham density
|
details, it basically consists of calculating the Kohn-Sham density
|
||||||
:math:`\rho(\mathbf{r})` in the presence of this interacting density
|
:math:`\rho(\mathbf{r})` in the presence of this interacting density
|
||||||
matrix. This new density now defines a new Kohn Sham
|
matrix. This new density now defines a new Kohn-Sham
|
||||||
exchange-correlation potential, which in turn leads to new
|
exchange-correlation potential, which in turn leads to new
|
||||||
:math:`\varepsilon_{\nu\mathbf{k}}`,
|
:math:`\varepsilon_{\nu\mathbf{k}}`,
|
||||||
:math:`\psi_{\nu\mathbf{k}}(\mathbf{r})`, and projectors
|
:math:`\psi_{\nu\mathbf{k}}(\mathbf{r})`, and projectors
|
||||||
@ -186,4 +187,4 @@ step 3, before the local lattice Green
|
|||||||
function is downfolded again into orbital space.
|
function is downfolded again into orbital space.
|
||||||
|
|
||||||
How all these calculations can be done in practice with this
|
How all these calculations can be done in practice with this
|
||||||
:program:`dft_tools` package is subject of the user guide in this documentation.
|
:program:`DFTTools` package is subject of the user guide in this documentation.
|
||||||
|
72
doc/basicnotions/first.rst
Normal file
72
doc/basicnotions/first.rst
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
What you should know
|
||||||
|
====================
|
||||||
|
|
||||||
|
Probably, you can hardly wait to perform your first DFT+DMFT calculation
|
||||||
|
with the :program:`DFTTools` package. This documentation and user guide
|
||||||
|
should make it as easy as possible to get started quickly.
|
||||||
|
However, it is mutually important to sort out a few prerequisites first.
|
||||||
|
|
||||||
|
What is :program:`DFTTools`?
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
:program:`DFTTools` connects the :ref:`TRIQS <triqslibs:welcome>` library
|
||||||
|
to realistic materials calculations based
|
||||||
|
on density functional theory (DFT). It allows an efficient implementation
|
||||||
|
of DFT plus dynamical mean-field theory (DMFT) calculations and it supplies
|
||||||
|
tools and methods to construct Wannier functions and to perform the
|
||||||
|
DMFT self-consistency cycle in this basis set. Post-processing tools,
|
||||||
|
such as band-structure plotting or the calculation of transport properties
|
||||||
|
are also implemented. The package comes with a fully charge self-consistent
|
||||||
|
interface to the Wien2k band structure code, as well as a generic interface.
|
||||||
|
We assume that you are already know about DFT and the usage of Wien2k.
|
||||||
|
|
||||||
|
Have a look at :ref:`DFT+DMFT page <dftplusdmft>` for a brief introduction on
|
||||||
|
the DFT+DMFT method and on how the theory is reflected in the
|
||||||
|
:ref:`basic structure <structure>` of the :program:`DFTTools` package.
|
||||||
|
|
||||||
|
|
||||||
|
Understand the philosophy of :program:`DFTTools`
|
||||||
|
------------------------------------------------
|
||||||
|
|
||||||
|
The purpose of :program:`DFTTools` is to provide the necessary tools
|
||||||
|
required for a DFT+DMFT calculations. Putting those tools together to a working
|
||||||
|
DFT+DMFT implementation is the task of the user. We do not
|
||||||
|
supply an universal script which runs with the click of a button, simply because
|
||||||
|
each material requires a different treatment or different settings.
|
||||||
|
Building your own script offers a great deal of flexibility and customizability.
|
||||||
|
Additionally, the :ref:`DFTTools user guide <documentation>` is there to support you
|
||||||
|
during this process.
|
||||||
|
|
||||||
|
It should go without saying, but the verification of outputs and the inspection
|
||||||
|
of results on their meaningfulness is the responsibility of the user.
|
||||||
|
|
||||||
|
The :program:`DFTTools` package is a toolbox and **not** a black box!
|
||||||
|
|
||||||
|
|
||||||
|
Learn how to use :ref:`TRIQS <triqslibs:welcome>` (and the :ref:`CTHYB <triqscthyb:welcome>` solver)
|
||||||
|
----------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
As :program:`DFTTools` is a :ref:`TRIQS <triqslibs:welcome>` based application
|
||||||
|
it is beneficial to invest a few hours to become familiar with
|
||||||
|
the :ref:`TRIQS <triqslibs:welcome>` basics first. The
|
||||||
|
:ref:`TRIQS tutorial <triqslibs:tutorials>` covers
|
||||||
|
the most important aspects of :ref:`TRIQS <triqslibs:welcome>`. We recommend
|
||||||
|
downloading our hands-on training in the form of ipython notebooks from
|
||||||
|
the `tutorials repository on GitHub <https://github.com/TRIQS/tutorials>`_.
|
||||||
|
Tutorials 1 to 6 are on the :ref:`TRIQS <triqslibs:welcome>` library, whereas tutorials
|
||||||
|
7 and 8 are more specific to the usage of the :ref:`CTHYB <triqscthyb:welcome>`
|
||||||
|
hybridization-expansion solver. In general, those tutorials will take at least a full day to finish.
|
||||||
|
|
||||||
|
Afterwards you can continue with the :ref:`DFTTools user guide <documentation>`.
|
||||||
|
|
||||||
|
|
||||||
|
Maximum Entropy (MaxEnt)
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
Analytic continuation is needed for many :ref:`post-processing tools <analysis>`, e.g. to
|
||||||
|
calculate the spectral function, the correlated band structure (:math:`A(k,\omega)`)
|
||||||
|
and to perform :ref:`transport calculations <Transport>`.
|
||||||
|
You can use the Pade approximation available in the :ref:`TRIQS <triqslibs:welcome>` library, however,
|
||||||
|
it turns out to be very unstable for noisy numerical data. Most of the time, the MaxEnt method
|
||||||
|
is used to obtain data on the real-frequency axis. At the moment neither :ref:`TRIQS <triqslibs:welcome>` nor
|
||||||
|
:program:`DFTTools` provide such routines.
|
@ -1,18 +1,21 @@
|
|||||||
Structure of DFT Tools
|
.. _structure:
|
||||||
======================
|
|
||||||
|
Structure of :program:`DFTTools`
|
||||||
|
================================
|
||||||
|
|
||||||
.. image:: images/structure.png
|
.. image:: images/structure.png
|
||||||
:width: 700
|
:width: 700
|
||||||
:align: center
|
:align: center
|
||||||
|
|
||||||
The central part of :program:`dft_tools`, which is performing the
|
The central part of :program:`DFTTools`, which is performing the
|
||||||
steps for the DMFT self-consistency cycle, is written following the
|
steps for the DMFT self-consistency cycle, is written following the
|
||||||
same philosophy as the :ref:`TRIQS <triqslibs:welcome>` toolbox. At
|
same philosophy as the :ref:`TRIQS <triqslibs:welcome>` toolbox. At
|
||||||
the user level, easy-to-use python modules are provided that allow to
|
the user level, easy-to-use python modules are provided that allow to
|
||||||
write simple and short scripts performing the actual
|
write simple and short scripts performing the actual calculation.
|
||||||
calculation. Here, we will describe the general structure of the
|
The usage of those modules is presented in the user guide of this
|
||||||
package, for the details of how to use the modules, please consult the
|
:ref:`documentation`. Before considering the user guide, we suggest
|
||||||
user guide of this :ref:`documentation`.
|
to read the following introduction on the general structure of
|
||||||
|
the :program:`DFTTools` package.
|
||||||
|
|
||||||
The interface layer
|
The interface layer
|
||||||
-------------------
|
-------------------
|
||||||
@ -30,37 +33,37 @@ Wien2k interface
|
|||||||
""""""""""""""""
|
""""""""""""""""
|
||||||
|
|
||||||
This interface layer consists of two parts. First, the output from Wien2k
|
This interface layer consists of two parts. First, the output from Wien2k
|
||||||
is taken, and localised Wannier orbitals are constructed. This is done
|
is taken, and localized Wannier orbitals are constructed. This is done
|
||||||
by the fortran program :program:`dmftproj`. The second part consist in
|
by the FORTRAN program :program:`dmftproj`. The second part consist in
|
||||||
the conversion of the :program:`dmftproj` into the hdf5 file
|
the conversion of the :program:`dmftproj` into the hdf5 file
|
||||||
format to be used for the DMFT calculation. This step is done by a
|
format to be used for the DMFT calculation. This step is done by a
|
||||||
python routine called :class:`Wien2kConverter`, that reads the text output and
|
python routine called :class:`Wien2kConverter`, that reads the text output and
|
||||||
creates the hdf5 input file with the necessary ingredients. Quite
|
creates the hdf5 input file with the necessary ingredients. Quite
|
||||||
naturally, :program:`dft_tools` will adopt this converter concept also for future
|
naturally, :program:`DFTTools` will adopt this converter concept also for future
|
||||||
developments for other DFT packages.
|
developments for other DFT packages.
|
||||||
|
|
||||||
General interface
|
General interface
|
||||||
"""""""""""""""""
|
"""""""""""""""""
|
||||||
|
|
||||||
In addition to the specialised Wien2k interface, :program:`dft_tools`
|
In addition to the specialized Wien2k interface, :program:`DFTTools`
|
||||||
provides also a very light-weight general interface. It basically
|
provides also a very light-weight general interface. It basically
|
||||||
consists of a very simple :class:`HkConverter`. As input it requires a
|
consists of a very simple :class:`HkConverter`. As input it requires a
|
||||||
hamiltonian matrix :math:`H_{mn}(\mathbf{k})` written already in
|
Hamiltonian matrix :math:`H_{mn}(\mathbf{k})` written already in
|
||||||
localised-orbital indices :math:`m,n`, on a :math:`\mathbf{k}`-point
|
localized-orbital indices :math:`m,n`, on a :math:`\mathbf{k}`-point
|
||||||
grid covering the Brillouin zone, and just a few other informations
|
grid covering the Brillouin zone, and just a few other informations
|
||||||
like total numer of electrons, how many correlated atoms in the unit
|
like total number of electrons, how many correlated atoms in the unit
|
||||||
cell, and so on. It converts this hamiltonian into a hdf5 format and
|
cell, and so on. It converts this Hamiltonian into a hdf5 format and
|
||||||
sets some variables to standard values, such that it can be used with
|
sets some variables to standard values, such that it can be used with
|
||||||
the python modules performing the DMFT calculation. How the
|
the python modules performing the DMFT calculation. How the
|
||||||
hamiltonian matrix :math:`H_{mn}(\mathbf{k})` is actually calculated,
|
Hamiltonian matrix :math:`H_{mn}(\mathbf{k})` is actually calculated,
|
||||||
is **not** part of this interace.
|
is **not** part of this interface.
|
||||||
|
|
||||||
The DMFT calculation
|
The DMFT calculation
|
||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
As mentioned above, there are a few python routines that allow to
|
As mentioned above, there are a few python routines that allow to
|
||||||
perform the multi-band DMFT calculation in the context of real
|
perform the multi-band DMFT calculation in the context of real
|
||||||
materials. The major part is contained inte module
|
materials. The major part is contained in the module
|
||||||
:class:`SumkDFT`. It contains routines to
|
:class:`SumkDFT`. It contains routines to
|
||||||
|
|
||||||
* calculate local Greens functions
|
* calculate local Greens functions
|
||||||
@ -69,7 +72,7 @@ materials. The major part is contained inte module
|
|||||||
* calculate the double-counting correction
|
* calculate the double-counting correction
|
||||||
* calculate the chemical potential in order to get the electron count right
|
* calculate the chemical potential in order to get the electron count right
|
||||||
* other things like determining the structure of the local
|
* other things like determining the structure of the local
|
||||||
hamiltonian, rotating from local to global coordinate systems, etc.
|
Hamiltonian, rotating from local to global coordinate systems, etc.
|
||||||
|
|
||||||
At the user level, all these routines can be used to construct
|
At the user level, all these routines can be used to construct
|
||||||
situation- and problem-dependent DMFT calculations in a very efficient
|
situation- and problem-dependent DMFT calculations in a very efficient
|
||||||
@ -90,8 +93,8 @@ Post-processing
|
|||||||
|
|
||||||
The main result of DMFT calculation is the interacting Greens function
|
The main result of DMFT calculation is the interacting Greens function
|
||||||
and the Self energy. However, one is normally interested in
|
and the Self energy. However, one is normally interested in
|
||||||
quantitites like band structure, density of states, or transport
|
quantities like band structure, density of states, or transport
|
||||||
properties. In order to calculate these things, :program:`dft_tools`
|
properties. In order to calculate these things, :program:`DFTTools`
|
||||||
provides the post-processing modules :class:`SumkDFTTools`. It
|
provides the post-processing modules :class:`SumkDFTTools`. It
|
||||||
contains routines to calculate
|
contains routines to calculate
|
||||||
|
|
||||||
@ -102,11 +105,8 @@ contains routines to calculate
|
|||||||
or thermopower.
|
or thermopower.
|
||||||
|
|
||||||
.. warning::
|
.. warning::
|
||||||
At the moment neither :ref:`TRIQS<triqslibs:welcome>` nor :program:`dft_tools`
|
At the moment neither :ref:`TRIQS<triqslibs:welcome>` nor :program:`DFTTools`
|
||||||
provides Maximum Entropy routines! You can use the Pade
|
provides Maximum Entropy routines! You can use the Pade
|
||||||
approximants implemented in the TRIQS library, or you use your own
|
approximation implemented in the :ref:`TRIQS <triqslibs:welcome>` library, or you use your own
|
||||||
home-made Maximum Entropy code to do the analytic continuation from
|
home-made Maximum Entropy code to do the analytic continuation from
|
||||||
Matsubara to the real-frequency axis.
|
Matsubara to the real-frequency axis.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ extensions = ['sphinx.ext.autodoc',
|
|||||||
|
|
||||||
source_suffix = '.rst'
|
source_suffix = '.rst'
|
||||||
|
|
||||||
project = u'TRIQS_DFT Tools'
|
project = u'TRIQS DFTTools'
|
||||||
copyright = u'2011-2013, M. Aichhorn, L. Pourovskii, V. Vildosola, C. Martins'
|
copyright = u'2011-2013, M. Aichhorn, L. Pourovskii, V. Vildosola, C. Martins'
|
||||||
version = '@DFT_TOOLS_VERSION@'
|
version = '@DFT_TOOLS_VERSION@'
|
||||||
release = '@DFT_TOOLS_RELEASE@'
|
release = '@DFT_TOOLS_RELEASE@'
|
||||||
@ -28,16 +28,15 @@ templates_path = ['@CMAKE_SOURCE_DIR@/doc/_templates']
|
|||||||
html_theme = 'triqs'
|
html_theme = 'triqs'
|
||||||
html_theme_path = ['@TRIQS_THEMES_PATH@']
|
html_theme_path = ['@TRIQS_THEMES_PATH@']
|
||||||
html_show_sphinx = False
|
html_show_sphinx = False
|
||||||
html_context = {'header_title': 'dft_tools',
|
html_context = {'header_title': 'dft tools',
|
||||||
'header_subtitle': 'connecting <a class="triqs" style="font-size: 12px" href="http://ipht.cea.fr/triqs">TRIQS</a> to DFT packages',
|
'header_subtitle': 'connecting <a class="triqs" style="font-size: 12px" href="http://triqs.ipht.cnrs.fr/1.x">TRIQS</a> to DFT packages',
|
||||||
'header_links': [['Install', 'install'],
|
'header_links': [['Install', 'install'],
|
||||||
['Documentation', 'documentation'],
|
['Documentation', 'documentation'],
|
||||||
['Issues', 'issues'],
|
['Issues', 'issues'],
|
||||||
['About dft_tools', 'about']]}
|
['About DFTTools', 'about']]}
|
||||||
html_static_path = ['@CMAKE_SOURCE_DIR@/doc/_static']
|
html_static_path = ['@CMAKE_SOURCE_DIR@/doc/_static']
|
||||||
html_sidebars = {'index': ['sideb.html', 'searchbox.html']}
|
html_sidebars = {'index': ['sideb.html', 'searchbox.html']}
|
||||||
|
|
||||||
htmlhelp_basename = 'TRIQSDftToolsdoc'
|
htmlhelp_basename = 'TRIQSDftToolsdoc'
|
||||||
|
|
||||||
intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None), 'triqslibs': ('http://ipht.cea.fr/triqs', None),
|
intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None), 'triqslibs': ('http://triqs.ipht.cnrs.fr/1.x', None), 'triqscthyb': ('https://triqs.ipht.cnrs.fr/1.x/applications/cthyb/', None)}
|
||||||
'triqscthyb': ('http://ipht.cea.fr/triqs/applications/cthyb', None)}
|
|
||||||
|
13
doc/dft_tools3.bib
Normal file
13
doc/dft_tools3.bib
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
@Article{TRIQS/DFTTools,
|
||||||
|
title = "TRIQS/DFTTools: A \{TRIQS\} application for ab initio calculations of correlated materials ",
|
||||||
|
journal = "Computer Physics Communications ",
|
||||||
|
volume = "204",
|
||||||
|
number = "",
|
||||||
|
pages = "200 - 208",
|
||||||
|
year = "2016",
|
||||||
|
note = "",
|
||||||
|
issn = "0010-4655",
|
||||||
|
doi = "http://dx.doi.org/10.1016/j.cpc.2016.03.014",
|
||||||
|
url = "http://www.sciencedirect.com/science/article/pii/S0010465516300728",
|
||||||
|
author = "Markus Aichhorn and Leonid Pourovskii and Priyanka Seth and Veronica Vildosola and Manuel Zingl and Oleg E. Peil and Xiaoyu Deng and Jernej Mravlje and Gernot J. Kraberger and Cyril Martins and Michel Ferrero and Olivier Parcollet",
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
.. module:: pytriqs.applications.dft_tools
|
.. module:: pytriqs.applications.dft
|
||||||
|
|
||||||
.. _documentation:
|
.. _documentation:
|
||||||
|
|
||||||
@ -11,6 +11,7 @@ Basic notions
|
|||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
|
|
||||||
|
basicnotions/first
|
||||||
basicnotions/dft_dmft
|
basicnotions/dft_dmft
|
||||||
basicnotions/structure
|
basicnotions/structure
|
||||||
|
|
||||||
@ -23,6 +24,7 @@ User guide
|
|||||||
|
|
||||||
guide/conversion
|
guide/conversion
|
||||||
guide/dftdmft_singleshot
|
guide/dftdmft_singleshot
|
||||||
|
guide/SrVO3
|
||||||
guide/dftdmft_selfcons
|
guide/dftdmft_selfcons
|
||||||
guide/analysis
|
guide/analysis
|
||||||
guide/full_tutorial
|
guide/full_tutorial
|
||||||
@ -43,6 +45,7 @@ This is the reference manual for the python routines.
|
|||||||
reference/sumk_dft_tools
|
reference/sumk_dft_tools
|
||||||
reference/symmetry
|
reference/symmetry
|
||||||
reference/transbasis
|
reference/transbasis
|
||||||
|
reference/block_structure
|
||||||
|
|
||||||
|
|
||||||
FAQs
|
FAQs
|
||||||
|
224
doc/guide/SrVO3.rst
Normal file
224
doc/guide/SrVO3.rst
Normal file
@ -0,0 +1,224 @@
|
|||||||
|
.. _SrVO3:
|
||||||
|
|
||||||
|
SrVO3 (single-shot)
|
||||||
|
===================
|
||||||
|
|
||||||
|
We will discuss now how to set up a full working calculation,
|
||||||
|
including the initialization of the :ref:`CTHYB solver <triqscthyb:welcome>`.
|
||||||
|
Some additional parameter are introduced to make the calculation
|
||||||
|
more efficient. This is a more advanced example, which is
|
||||||
|
also suited for parallel execution. The conversion, which
|
||||||
|
we assume to be carried out already, is discussed :ref:`here <conversion>`.
|
||||||
|
|
||||||
|
For the convenience of the user, we provide also two
|
||||||
|
working python scripts in this documentation. One for a calculation
|
||||||
|
using Kanamori definitions (:download:`dft_dmft_cthyb.py
|
||||||
|
<images_scripts/dft_dmft_cthyb.py>`) and one with a
|
||||||
|
rotational-invariant Slater interaction Hamiltonian (:download:`dft_dmft_cthyb_slater.py
|
||||||
|
<images_scripts/dft_dmft_cthyb.py>`). The user has to adapt these
|
||||||
|
scripts to his own needs.
|
||||||
|
|
||||||
|
Loading modules
|
||||||
|
---------------
|
||||||
|
|
||||||
|
First, we load the necessary modules::
|
||||||
|
|
||||||
|
from pytriqs.applications.dft.sumk_dft import *
|
||||||
|
from pytriqs.gf.local import *
|
||||||
|
from pytriqs.archive import HDFArchive
|
||||||
|
from pytriqs.operators.util import *
|
||||||
|
from pytriqs.applications.impurity_solvers.cthyb import *
|
||||||
|
|
||||||
|
The last two lines load the modules for the construction of the
|
||||||
|
:ref:`CTHYB solver <triqscthyb:welcome>`.
|
||||||
|
|
||||||
|
Initializing SumkDFT
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
We define some parameters, which should be self-explanatory::
|
||||||
|
|
||||||
|
dft_filename = 'SrVO3' # filename
|
||||||
|
U = 4.0 # interaction parameters
|
||||||
|
J = 0.65
|
||||||
|
beta = 40 # inverse temperature
|
||||||
|
loops = 15 # number of DMFT loops
|
||||||
|
mix = 0.8 # mixing factor of Sigma after solution of the AIM
|
||||||
|
dc_type = 1 # DC type: 0 FLL, 1 Held, 2 AMF
|
||||||
|
use_blocks = True # use bloc structure from DFT input
|
||||||
|
prec_mu = 0.0001 # precision of chemical potential
|
||||||
|
|
||||||
|
|
||||||
|
And next, we can initialize the :class:`SumkDFT <dft.sumk_dft.SumkDFT>` class::
|
||||||
|
|
||||||
|
SK = SumkDFT(hdf_file=dft_filename+'.h5',use_dft_blocks=use_blocks)
|
||||||
|
|
||||||
|
Initializing the solver
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
We also have to specify the :ref:`CTHYB solver <triqscthyb:welcome>` related settings.
|
||||||
|
We assume that the DMFT script for SrVO3 is executed on 16 cores. A sufficient set
|
||||||
|
of parameters for a first guess is::
|
||||||
|
|
||||||
|
p = {}
|
||||||
|
# solver
|
||||||
|
p["random_seed"] = 123 * mpi.rank + 567
|
||||||
|
p["length_cycle"] = 200
|
||||||
|
p["n_warmup_cycles"] = 100000
|
||||||
|
p["n_cycles"] = 1000000
|
||||||
|
# tail fit
|
||||||
|
p["perform_tail_fit"] = True
|
||||||
|
p["fit_max_moment"] = 4
|
||||||
|
p["fit_min_n"] = 30
|
||||||
|
p["fit_max_n"] = 60
|
||||||
|
|
||||||
|
Here we use a tail fit to deal with numerical noise of higher Matsubara frequencies.
|
||||||
|
For other options and more details on the solver parameters, we refer the user to
|
||||||
|
the :ref:`CTHYB solver <triqscthyb:welcome>` documentation.
|
||||||
|
It is important to note that the solver parameters have to be adjusted for
|
||||||
|
each material individually. A guide on how to set the tail fit parameters is given
|
||||||
|
:ref:`below <tailfit>`.
|
||||||
|
|
||||||
|
|
||||||
|
The next step is to initialize the
|
||||||
|
:class:`solver class <pytriqs.applications.impurity_solvers.cthyb.Solver>`.
|
||||||
|
It consist of two parts:
|
||||||
|
|
||||||
|
#. Calculating the multi-band interaction matrix, and constructing the
|
||||||
|
interaction Hamiltonian.
|
||||||
|
#. Initializing the solver class itself.
|
||||||
|
|
||||||
|
The first step is done using methods of the :ref:`TRIQS <triqslibs:welcome>` library::
|
||||||
|
|
||||||
|
n_orb = SK.corr_shells[0]['dim']
|
||||||
|
l = SK.corr_shells[0]['l']
|
||||||
|
spin_names = ["up","down"]
|
||||||
|
orb_names = [i for i in range(n_orb)]
|
||||||
|
# Use GF structure determined by DFT blocks:
|
||||||
|
gf_struct = SK.gf_struct_solver[0]
|
||||||
|
# Construct U matrix for density-density calculations:
|
||||||
|
Umat, Upmat = U_matrix_kanamori(n_orb=n_orb, U_int=U, J_hund=J)
|
||||||
|
|
||||||
|
We assumed here that we want to use an interaction matrix with
|
||||||
|
Kanamori definitions of :math:`U` and :math:`J`.
|
||||||
|
|
||||||
|
Next, we construct the Hamiltonian and the solver::
|
||||||
|
|
||||||
|
h_int = h_int_density(spin_names, orb_names, map_operator_structure=SK.sumk_to_solver[0], U=Umat, Uprime=Upmat)
|
||||||
|
S = Solver(beta=beta, gf_struct=gf_struct)
|
||||||
|
|
||||||
|
As you see, we take only density-density interactions into
|
||||||
|
account. Other Hamiltonians with, e.g. with full rotational invariant interactions are:
|
||||||
|
|
||||||
|
* h_int_kanamori
|
||||||
|
* h_int_slater
|
||||||
|
|
||||||
|
For other choices of the interaction matrices (e.g Slater representation) or
|
||||||
|
Hamiltonians, we refer to the reference manual of the :ref:`TRIQS <triqslibs:welcome>`
|
||||||
|
library.
|
||||||
|
|
||||||
|
DMFT cycle
|
||||||
|
----------
|
||||||
|
|
||||||
|
Now we can go to the definition of the self-consistency step. It consists again
|
||||||
|
of the basic steps discussed in the :ref:`previous section <singleshot>`, with
|
||||||
|
some additional refinements::
|
||||||
|
|
||||||
|
for iteration_number in range(1,loops+1):
|
||||||
|
if mpi.is_master_node(): print "Iteration = ", iteration_number
|
||||||
|
|
||||||
|
SK.symm_deg_gf(S.Sigma_iw,orb=0) # symmetrizing Sigma
|
||||||
|
SK.set_Sigma([ S.Sigma_iw ]) # put Sigma into the SumK class
|
||||||
|
chemical_potential = SK.calc_mu( precision = prec_mu ) # find the chemical potential for given density
|
||||||
|
S.G_iw << SK.extract_G_loc()[0] # calc the local Green function
|
||||||
|
mpi.report("Total charge of Gloc : %.6f"%S.G_iw.total_density())
|
||||||
|
|
||||||
|
# Init the DC term and the real part of Sigma, if no previous runs found:
|
||||||
|
if (iteration_number==1 and previous_present==False):
|
||||||
|
dm = S.G_iw.density()
|
||||||
|
SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type)
|
||||||
|
S.Sigma_iw << SK.dc_imp[0]['up'][0,0]
|
||||||
|
|
||||||
|
# Calculate new G0_iw to input into the solver:
|
||||||
|
S.G0_iw << S.Sigma_iw + inverse(S.G_iw)
|
||||||
|
S.G0_iw << inverse(S.G0_iw)
|
||||||
|
|
||||||
|
# Solve the impurity problem:
|
||||||
|
S.solve(h_int=h_int, **p)
|
||||||
|
|
||||||
|
# Solved. Now do post-solution stuff:
|
||||||
|
mpi.report("Total charge of impurity problem : %.6f"%S.G_iw.total_density())
|
||||||
|
|
||||||
|
# Now mix Sigma and G with factor mix, if wanted:
|
||||||
|
if (iteration_number>1 or previous_present):
|
||||||
|
if mpi.is_master_node():
|
||||||
|
ar = HDFArchive(dft_filename+'.h5','a')
|
||||||
|
mpi.report("Mixing Sigma and G with factor %s"%mix)
|
||||||
|
S.Sigma_iw << mix * S.Sigma_iw + (1.0-mix) * ar['dmft_output']['Sigma_iw']
|
||||||
|
S.G_iw << mix * S.G_iw + (1.0-mix) * ar['dmft_output']['G_iw']
|
||||||
|
del ar
|
||||||
|
S.G_iw << mpi.bcast(S.G_iw)
|
||||||
|
S.Sigma_iw << mpi.bcast(S.Sigma_iw)
|
||||||
|
|
||||||
|
# Write the final Sigma and G to the hdf5 archive:
|
||||||
|
if mpi.is_master_node():
|
||||||
|
ar = HDFArchive(dft_filename+'.h5','a')
|
||||||
|
ar['dmft_output']['iterations'] = iteration_number
|
||||||
|
ar['dmft_output']['G_0'] = S.G0_iw
|
||||||
|
ar['dmft_output']['G_tau'] = S.G_tau
|
||||||
|
ar['dmft_output']['G_iw'] = S.G_iw
|
||||||
|
ar['dmft_output']['Sigma_iw'] = S.Sigma_iw
|
||||||
|
del ar
|
||||||
|
|
||||||
|
# Set the new double counting:
|
||||||
|
dm = S.G_iw.density() # compute the density matrix of the impurity problem
|
||||||
|
SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type)
|
||||||
|
|
||||||
|
# Save stuff into the user_data group of hdf5 archive in case of rerun:
|
||||||
|
SK.save(['chemical_potential','dc_imp','dc_energ'])
|
||||||
|
|
||||||
|
|
||||||
|
This is all we need for the DFT+DMFT calculation.
|
||||||
|
You can see in this code snippet, that all results of this calculation
|
||||||
|
will be stored in a separate subgroup in the hdf5 file, called `dmft_output`.
|
||||||
|
Note that this script performs 15 DMFT cycles, but does not check for
|
||||||
|
convergence. Of course, it would be possible to build in convergence criteria.
|
||||||
|
A simple check for convergence can be also done if you store multiple quantities
|
||||||
|
of each iteration and analyze the convergence by hand. In general, it is advisable
|
||||||
|
to start with a lower statistics (less measurements), but then increase it at a
|
||||||
|
point close to converged results (e.g. after a few initial iterations). This helps
|
||||||
|
to keep computational costs low during the first iterations.
|
||||||
|
|
||||||
|
Using the Kanamori Hamiltonian and the parameters above (but on 16 cores),
|
||||||
|
your self energy after the **first iteration** should look like the
|
||||||
|
self energy shown below.
|
||||||
|
|
||||||
|
.. image:: images_scripts/SrVO3_Sigma_iw_it1.png
|
||||||
|
:width: 700
|
||||||
|
:align: center
|
||||||
|
|
||||||
|
|
||||||
|
.. _tailfit:
|
||||||
|
|
||||||
|
Tail fit parameters
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
A good way to identify suitable tail fit parameters is by "human inspection".
|
||||||
|
Therefore disabled the tail fitting first::
|
||||||
|
|
||||||
|
p["perform_tail_fit"] = False
|
||||||
|
|
||||||
|
and perform only one DMFT iteration. The resulting self energy can be tail fitted by hand::
|
||||||
|
|
||||||
|
for name, sig in S.Sigma_iw:
|
||||||
|
S.Sigma_iw[name].fit_tail(fit_n_moments = 4, fit_min_n = 60, fit_max_n = 140)
|
||||||
|
|
||||||
|
Plot the self energy and adjust the tail fit parameters such that you obtain a
|
||||||
|
proper fit. The :meth:`fit_tail function <pytriqs.gf.local.tools.tail_fit>` is part
|
||||||
|
of the :ref:`TRIQS <triqslibs:welcome>` library.
|
||||||
|
|
||||||
|
For a self energy which is going to zero for :math:`i\omega \rightarrow 0` our suggestion is
|
||||||
|
to start the tail fit (:emphasis:`fit_min_n`) at a Matsubara frequency considerable above the minimum
|
||||||
|
of the self energy and to stop (:emphasis:`fit_max_n`) before the noise fully takes over.
|
||||||
|
If it is difficult to find a reasonable fit in this region you should increase
|
||||||
|
your statistics (number of measurements). Keep in mind that :emphasis:`fit_min_n`
|
||||||
|
and :emphasis:`fit_max_n` also depend on :math:`\beta`.
|
@ -7,13 +7,13 @@ This section explains how to use some tools of the package in order to analyse t
|
|||||||
|
|
||||||
There are two practical tools for which a self energy on the real axis is not needed, namely:
|
There are two practical tools for which a self energy on the real axis is not needed, namely:
|
||||||
|
|
||||||
* :meth:`dos_wannier_basis <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.dos_wannier_basis>` for the density of states of the Wannier orbitals and
|
* :meth:`dos_wannier_basis <dft.sumk_dft_tools.SumkDFTTools.dos_wannier_basis>` for the density of states of the Wannier orbitals and
|
||||||
* :meth:`partial_charges <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.partial_charges>` for the partial charges according to the :program:`Wien2k` definition.
|
* :meth:`partial_charges <dft.sumk_dft_tools.SumkDFTTools.partial_charges>` for the partial charges according to the :program:`Wien2k` definition.
|
||||||
|
|
||||||
However, a real frequency self energy has to be provided by the user for the methods:
|
However, a real frequency self energy has to be provided by the user for the methods:
|
||||||
|
|
||||||
* :meth:`dos_parproj_basis <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.dos_parproj_basis>` for the momentum-integrated spectral function including self energy effects and
|
* :meth:`dos_parproj_basis <dft.sumk_dft_tools.SumkDFTTools.dos_parproj_basis>` for the momentum-integrated spectral function including self energy effects and
|
||||||
* :meth:`spaghettis <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.spaghettis>` for the momentum-resolved spectral function (i.e. ARPES)
|
* :meth:`spaghettis <dft.sumk_dft_tools.SumkDFTTools.spaghettis>` for the momentum-resolved spectral function (i.e. ARPES)
|
||||||
|
|
||||||
.. warning::
|
.. warning::
|
||||||
This package does NOT provide an explicit method to do an **analytic continuation** of the
|
This package does NOT provide an explicit method to do an **analytic continuation** of the
|
||||||
@ -24,17 +24,17 @@ However, a real frequency self energy has to be provided by the user for the met
|
|||||||
Initialisation
|
Initialisation
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
All tools described below are collected in an extension of the :class:`SumkDFT <pytriqs.applications.dft.sumk_dft.SumkDFT>` class and are
|
All tools described below are collected in an extension of the :class:`SumkDFT <dft.sumk_dft.SumkDFT>` class and are
|
||||||
loaded by importing the module :class:`SumkDFTTools <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools>`::
|
loaded by importing the module :class:`SumkDFTTools <dft.sumk_dft_tools.SumkDFTTools>`::
|
||||||
|
|
||||||
from pytriqs.applications.dft.sumk_dft_tools import *
|
from pytriqs.applications.dft.sumk_dft_tools import *
|
||||||
|
|
||||||
The initialisation of the class is equivalent to that of the :class:`SumkDFT <pytriqs.applications.dft.sumk_dft.SumkDFT>`
|
The initialisation of the class is equivalent to that of the :class:`SumkDFT <dft.sumk_dft.SumkDFT>`
|
||||||
class::
|
class::
|
||||||
|
|
||||||
SK = SumkDFTTools(hdf_file = filename + '.h5')
|
SK = SumkDFTTools(hdf_file = filename + '.h5')
|
||||||
|
|
||||||
Note that all routines available in :class:`SumkDFT <pytriqs.applications.dft.sumk_dft.SumkDFT>` are also available here.
|
Note that all routines available in :class:`SumkDFT <dft.sumk_dft.SumkDFT>` are also available here.
|
||||||
|
|
||||||
If required, we have to load and initialise the real frequency self energy. Most conveniently,
|
If required, we have to load and initialise the real frequency self energy. Most conveniently,
|
||||||
you have your self energy already stored as a real frequency :class:`BlockGf <pytriqs.gf.local.BlockGf>` object
|
you have your self energy already stored as a real frequency :class:`BlockGf <pytriqs.gf.local.BlockGf>` object
|
||||||
@ -110,7 +110,7 @@ real frequency self energy for this purpose. The calculation is done by::
|
|||||||
which calculates the partial charges using the self energy, double counting, and chemical potential as set in the
|
which calculates the partial charges using the self energy, double counting, and chemical potential as set in the
|
||||||
`SK` object. On return, `dm` is a list, where the list items correspond to the density matrices of all shells
|
`SK` object. On return, `dm` is a list, where the list items correspond to the density matrices of all shells
|
||||||
defined in the list `SK.shells`. This list is constructed by the :program:`Wien2k` converter routines and stored automatically
|
defined in the list `SK.shells`. This list is constructed by the :program:`Wien2k` converter routines and stored automatically
|
||||||
in the hdf5 archive. For the structure of `dm`, see also :meth:`reference manual <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.partial_charges>`.
|
in the hdf5 archive. For the structure of `dm`, see also :meth:`reference manual <dft.sumk_dft_tools.SumkDFTTools.partial_charges>`.
|
||||||
|
|
||||||
Correlated spectral function (with real frequency self energy)
|
Correlated spectral function (with real frequency self energy)
|
||||||
--------------------------------------------------------------
|
--------------------------------------------------------------
|
||||||
@ -129,7 +129,7 @@ Momentum resolved spectral function (with real frequency self energy)
|
|||||||
|
|
||||||
Another quantity of interest is the momentum-resolved spectral function, which can directly be compared to ARPES
|
Another quantity of interest is the momentum-resolved spectral function, which can directly be compared to ARPES
|
||||||
experiments. First we have to execute `lapw1`, `lapw2 -almd` and :program:`dmftproj` with the `-band`
|
experiments. First we have to execute `lapw1`, `lapw2 -almd` and :program:`dmftproj` with the `-band`
|
||||||
option and use the :meth:`convert_bands_input <pytriqs.applications.dft.converters.wien2k_converter.Wien2kConverter.convert_bands_input>`
|
option and use the :meth:`convert_bands_input <dft.converters.wien2k_converter.Wien2kConverter.convert_bands_input>`
|
||||||
routine, which converts the required files (for a more detailed description see :ref:`conversion`). The spectral function is then calculated by typing::
|
routine, which converts the required files (for a more detailed description see :ref:`conversion`). The spectral function is then calculated by typing::
|
||||||
|
|
||||||
SK.spaghettis(broadening=0.01,plot_shift=0.0,plot_range=None,ishell=None,save_to_file='Akw_')
|
SK.spaghettis(broadening=0.01,plot_shift=0.0,plot_range=None,ishell=None,save_to_file='Akw_')
|
||||||
|
@ -34,9 +34,9 @@ some files that we need for the Wannier orbital construction.
|
|||||||
The orbital construction itself is done by the Fortran program
|
The orbital construction itself is done by the Fortran program
|
||||||
:program:`dmftproj`. For an extensive manual to this program see
|
:program:`dmftproj`. For an extensive manual to this program see
|
||||||
:download:`TutorialDmftproj.pdf <images_scripts/TutorialDmftproj.pdf>`.
|
:download:`TutorialDmftproj.pdf <images_scripts/TutorialDmftproj.pdf>`.
|
||||||
Here we will only describe only the basic steps.
|
Here we will only describe the basic steps.
|
||||||
|
|
||||||
Let us take the example of SrVO3, a commonly used
|
Let us take the compound SrVO3, a commonly used
|
||||||
example for DFT+DMFT calculations. The input file for
|
example for DFT+DMFT calculations. The input file for
|
||||||
:program:`dmftproj` looks like
|
:program:`dmftproj` looks like
|
||||||
|
|
||||||
@ -56,9 +56,9 @@ following 3 to 5 lines:
|
|||||||
harmonics).
|
harmonics).
|
||||||
#. The four numbers refer to *s*, *p*, *d*, and *f* electrons,
|
#. The four numbers refer to *s*, *p*, *d*, and *f* electrons,
|
||||||
resp. Putting 0 means doing nothing, putting 1 will calculate
|
resp. Putting 0 means doing nothing, putting 1 will calculate
|
||||||
**unnormalised** projectors in compliance with the Wien2k
|
**unnormalized** projectors in compliance with the Wien2k
|
||||||
definition. The important flag is 2, this means to include these
|
definition. The important flag is 2, this means to include these
|
||||||
electrons as correlated electrons, and calculate normalised Wannier
|
electrons as correlated electrons, and calculate normalized Wannier
|
||||||
functions for them. In the example above, you see that only for the
|
functions for them. In the example above, you see that only for the
|
||||||
vanadium *d* we set the flag to 2. If you want to do simply a DMFT
|
vanadium *d* we set the flag to 2. If you want to do simply a DMFT
|
||||||
calculation, then set everything to 0, except one flag 2 for the
|
calculation, then set everything to 0, except one flag 2 for the
|
||||||
@ -100,12 +100,12 @@ directory name):
|
|||||||
respectively. These files are needed for projected
|
respectively. These files are needed for projected
|
||||||
density-of-states or spectral-function calculations in
|
density-of-states or spectral-function calculations in
|
||||||
post-processing only.
|
post-processing only.
|
||||||
* :file:`case.oubwin` needed for the charge desity recalculation in
|
* :file:`case.oubwin` needed for the charge density recalculation in
|
||||||
the case of fully self-consistent DFT+DMFT run (see below).
|
the case of fully self-consistent DFT+DMFT run (see below).
|
||||||
|
|
||||||
Now we convert these files into an hdf5 file that can be used for the
|
Now we convert these files into an hdf5 file that can be used for the
|
||||||
DMFT calculations. For this purpose we
|
DMFT calculations. For this purpose we
|
||||||
use the python module :class:`Wien2kConverter <pytriqs.applications.dft.converters.wien2k_converter.Wien2kConverter>`. It is initialised as::
|
use the python module :class:`Wien2kConverter <dft.converters.wien2k_converter.Wien2kConverter>`. It is initialized as::
|
||||||
|
|
||||||
from pytriqs.applications.dft.converters.wien2k_converter import *
|
from pytriqs.applications.dft.converters.wien2k_converter import *
|
||||||
Converter = Wien2kConverter(filename = case)
|
Converter = Wien2kConverter(filename = case)
|
||||||
@ -119,7 +119,7 @@ an hdf5 archive, named :file:`case.h5`, where all the data is
|
|||||||
stored. For other parameters of the constructor please visit the
|
stored. For other parameters of the constructor please visit the
|
||||||
:ref:`refconverters` section of the reference manual.
|
:ref:`refconverters` section of the reference manual.
|
||||||
|
|
||||||
After initialising the interface module, we can now convert the input
|
After initializing the interface module, we can now convert the input
|
||||||
text files to the hdf5 archive by::
|
text files to the hdf5 archive by::
|
||||||
|
|
||||||
Converter.convert_dft_input()
|
Converter.convert_dft_input()
|
||||||
@ -133,18 +133,18 @@ After this step, all the necessary information for the DMFT loop is
|
|||||||
stored in the hdf5 archive, where the string variable
|
stored in the hdf5 archive, where the string variable
|
||||||
`Converter.hdf_filename` gives the file name of the archive.
|
`Converter.hdf_filename` gives the file name of the archive.
|
||||||
|
|
||||||
At this point you should use the method :meth:`dos_wannier_basis <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.dos_wannier_basis>`
|
At this point you should use the method :meth:`dos_wannier_basis <dft.sumk_dft_tools.SumkDFTTools.dos_wannier_basis>`
|
||||||
contained in the module :class:`SumkDFTTools <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools>` to check the density of
|
contained in the module :class:`SumkDFTTools <dft.sumk_dft_tools.SumkDFTTools>` to check the density of
|
||||||
states of the Wannier orbitals (see :ref:`analysis`).
|
states of the Wannier orbitals (see :ref:`analysis`).
|
||||||
|
|
||||||
You have now everything for performing a DMFT calculation, and you can
|
You have now everything for performing a DMFT calculation, and you can
|
||||||
proceed with :ref:`singleshot`.
|
proceed with the section on :ref:`single-shot DFT+DMFT calculations <singleshot>`.
|
||||||
|
|
||||||
Data for post-processing
|
Data for post-processing
|
||||||
""""""""""""""""""""""""
|
""""""""""""""""""""""""
|
||||||
|
|
||||||
In case you want to do post-processing of your data using the module
|
In case you want to do post-processing of your data using the module
|
||||||
:class:`SumkDFTTools <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools>`, some more files
|
:class:`SumkDFTTools <dft.sumk_dft_tools.SumkDFTTools>`, some more files
|
||||||
have to be converted to the hdf5 archive. For instance, for
|
have to be converted to the hdf5 archive. For instance, for
|
||||||
calculating the partial density of states or partial charges
|
calculating the partial density of states or partial charges
|
||||||
consistent with the definition of :program:`Wien2k`, you have to invoke::
|
consistent with the definition of :program:`Wien2k`, you have to invoke::
|
||||||
@ -165,7 +165,7 @@ following. First, one has to do the Wien2k calculation on the given
|
|||||||
|
|
||||||
Again, maybe with the optional additional extra flags according to
|
Again, maybe with the optional additional extra flags according to
|
||||||
Wien2k. Now we use a routine of the converter module allows to read
|
Wien2k. Now we use a routine of the converter module allows to read
|
||||||
and convert the input for :class:`SumkDFTTools <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools>`::
|
and convert the input for :class:`SumkDFTTools <dft.sumk_dft_tools.SumkDFTTools>`::
|
||||||
|
|
||||||
Converter.convert_bands_input()
|
Converter.convert_bands_input()
|
||||||
|
|
||||||
@ -255,10 +255,10 @@ A general H(k)
|
|||||||
--------------
|
--------------
|
||||||
|
|
||||||
In addition to the more complicated Wien2k converter,
|
In addition to the more complicated Wien2k converter,
|
||||||
:program:`dft_tools` contains also a light converter. It takes only
|
:program:`DFTTools` contains also a light converter. It takes only
|
||||||
one inputfile, and creates the necessary hdf outputfile for
|
one inputfile, and creates the necessary hdf outputfile for
|
||||||
the DMFT calculation. The header of this input file has to have the
|
the DMFT calculation. The header of this input file has a defined
|
||||||
following format:
|
format, an example is the following:
|
||||||
|
|
||||||
.. literalinclude:: images_scripts/case.hk
|
.. literalinclude:: images_scripts/case.hk
|
||||||
|
|
||||||
@ -266,22 +266,74 @@ The lines of this header define
|
|||||||
|
|
||||||
#. Number of :math:`\mathbf{k}`-points used in the calculation
|
#. Number of :math:`\mathbf{k}`-points used in the calculation
|
||||||
#. Electron density for setting the chemical potential
|
#. Electron density for setting the chemical potential
|
||||||
#. Number of correlated atoms in the unit cell
|
#. Number of total atomic shells in the hamiltonian matrix. In short,
|
||||||
#. The next line contains four numbers: index of the atom, index
|
this gives the number of lines described in the following. IN the
|
||||||
of the correlated shell, :math:`l` quantum number, dimension
|
example file give above this number is 2.
|
||||||
of this shell. Repeat this line for each correlated atom.
|
#. The next line(s) contain four numbers each: index of the atom, index
|
||||||
|
of the equivalent shell, :math:`l` quantum number, dimension
|
||||||
|
of this shell. Repeat this line for each atomic shell, the number
|
||||||
|
of the shells is given in the previous line.
|
||||||
|
|
||||||
|
In the example input file given above, we have two inequivalent
|
||||||
|
atomic shells, one on atom number 1 with a full d-shell (dimension 5),
|
||||||
|
and one on atom number 2 with one p-shell (dimension 3).
|
||||||
|
|
||||||
|
Other examples for these lines are:
|
||||||
|
|
||||||
|
#. Full d-shell in a material with only one correlated atom in the
|
||||||
|
unit cell (e.g. SrVO3). One line is sufficient and the numbers
|
||||||
|
are `1 1 2 5`.
|
||||||
|
#. Full d-shell in a material with two equivalent atoms in the unit
|
||||||
|
cell (e.g. FeSe): You need two lines, one for each equivalent
|
||||||
|
atom. First line is `1 1 2 5`, and the second line is
|
||||||
|
`2 1 2 5`. The only difference is the first number, which tells on
|
||||||
|
which atom the shell is located. The second number is the
|
||||||
|
same in both lines, meaning that both atoms are equivalent.
|
||||||
|
#. t2g orbitals on two non-equivalent atoms in the unit cell: Two
|
||||||
|
lines again. First line is `1 1 2 3`, second line `2 2 2 3`. The
|
||||||
|
difference to the case above is that now also the second number
|
||||||
|
differs. Therefore, the two shells are treated independently in
|
||||||
|
the calculation.
|
||||||
|
#. d-p Hamiltonian in a system with two equivalent atoms each in
|
||||||
|
the unit cell (e.g. FeSe has two Fe and two Se in the unit
|
||||||
|
cell). You need for lines. First line `1 1 2 5`, second
|
||||||
|
line
|
||||||
|
`2 1 2 5`. These two lines specify Fe as in the case above. For the p
|
||||||
|
orbitals you need line three as `3 2 1 3` and line four
|
||||||
|
as `4 2 1 3`. We have 4 atoms, since the first number runs from 1 to 4,
|
||||||
|
but only two inequivalent atoms, since the second number runs
|
||||||
|
only form 1 to 2.
|
||||||
|
|
||||||
|
Note that the total dimension of the hamiltonian matrices that are
|
||||||
|
read in is the sum of all shell dimensions that you specified. For
|
||||||
|
example number 4 given above we have a dimension of 5+5+3+3=16. It is important
|
||||||
|
that the order of the shells that you give here must be the same as
|
||||||
|
the order of the orbitals in the hamiltonian matrix. In the last
|
||||||
|
example case above the code assumes that matrix index 1 to 5
|
||||||
|
belongs to the first d shell, 6 to 10 to the second, 11 to 13 to
|
||||||
|
the first p shell, and 14 to 16 the second p shell.
|
||||||
|
|
||||||
|
#. Number of correlated shells in the hamiltonian matrix, in the same
|
||||||
|
spirit as line 3.
|
||||||
|
|
||||||
|
#. The next line(s) contain six numbers: index of the atom, index
|
||||||
|
of the equivalent shell, :math:`l` quantum number, dimension
|
||||||
|
of the correlated shells, a spin-orbit parameter, and another
|
||||||
|
parameter defining interactions. Note that the latter two
|
||||||
|
parameters are not used at the moment in the code, and only kept
|
||||||
|
for compatibility reasons. In our example file we use only the
|
||||||
|
d-shell as correlated, that is why we have only one line here.
|
||||||
|
|
||||||
#. The last line contains several numbers: the number of irreducible
|
#. The last line contains several numbers: the number of irreducible
|
||||||
representations, and then the dimensions of the irreps. One
|
representations, and then the dimensions of the irreps. One
|
||||||
possibility is as the example above, another one would be 2
|
possibility is as the example above, another one would be 2
|
||||||
2 3. Thiw would mean, 2 irreps (eg and t2g), of dimension 2 and 3,
|
2 3. This would mean, 2 irreps (eg and t2g), of dimension 2 and 3,
|
||||||
resp.
|
resp.
|
||||||
|
|
||||||
After these header lines, the file has to contain the Hamiltonian
|
After these header lines, the file has to contain the Hamiltonian
|
||||||
matrix in orbital space. The standard convention is that you give for
|
matrix in orbital space. The standard convention is that you give for
|
||||||
each
|
each :math:`\mathbf{k}`-point first the matrix of the real part, then the
|
||||||
:math:`\mathbf{k}`-point first the matrix of the real part, then the
|
matrix of the imaginary part, and then move on to the next :math:`\mathbf{k}`-point.
|
||||||
matrix of the imaginary part, and then move on to the next
|
|
||||||
:math:`\mathbf{k}`-point.
|
|
||||||
|
|
||||||
The converter itself is used as::
|
The converter itself is used as::
|
||||||
|
|
||||||
@ -290,8 +342,7 @@ The converter itself is used as::
|
|||||||
Converter.convert_dft_input()
|
Converter.convert_dft_input()
|
||||||
|
|
||||||
where :file:`hkinputfile` is the name of the input file described
|
where :file:`hkinputfile` is the name of the input file described
|
||||||
above. This produces the hdf file that you need, and you cna proceed
|
above. This produces the hdf file that you need for a DMFT calculation.
|
||||||
with the
|
|
||||||
|
|
||||||
For more options of this converter, have a look at the
|
For more options of this converter, have a look at the
|
||||||
:ref:`refconverters` section of the reference manual.
|
:ref:`refconverters` section of the reference manual.
|
||||||
@ -301,10 +352,10 @@ Wannier90 Converter
|
|||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
Using this converter it is possible to convert the output of
|
Using this converter it is possible to convert the output of
|
||||||
:program:`Wannier90` (http://wannier.org) calculations of
|
`wannier90 <http://wannier.org>`_
|
||||||
Maximally Localized Wannier Functions (MLWF) and create a HDF5 archive
|
Maximally Localized Wannier Functions (MLWF) and create a HDF5 archive
|
||||||
suitable for one-shot DMFT calculations with the
|
suitable for one-shot DMFT calculations with the
|
||||||
:class:`SumkDFT <pytriqs.applications.dft.sumk_dft.SumkDFT>` class.
|
:class:`SumkDFT <dft.sumk_dft.SumkDFT>` class.
|
||||||
|
|
||||||
The user must supply two files in order to run the Wannier90 Converter:
|
The user must supply two files in order to run the Wannier90 Converter:
|
||||||
|
|
||||||
@ -375,7 +426,7 @@ In our `Pnma`-LaVO\ :sub:`3` example, for instance, we could use::
|
|||||||
where the ``x=-1,1,0`` option indicates that the V--O bonds in the octahedra are
|
where the ``x=-1,1,0`` option indicates that the V--O bonds in the octahedra are
|
||||||
rotated by (approximatively) 45 degrees with respect to the axes of the `Pbnm` cell.
|
rotated by (approximatively) 45 degrees with respect to the axes of the `Pbnm` cell.
|
||||||
|
|
||||||
The converter will analyse the matrix elements of the local hamiltonian
|
The converter will analyze the matrix elements of the local Hamiltonian
|
||||||
to find the symmetry matrices `rot_mat` needed for the global-to-local
|
to find the symmetry matrices `rot_mat` needed for the global-to-local
|
||||||
transformation of the basis set for correlated orbitals
|
transformation of the basis set for correlated orbitals
|
||||||
(see section :ref:`hdfstructure`).
|
(see section :ref:`hdfstructure`).
|
||||||
@ -400,7 +451,7 @@ The current implementation of the Wannier90 Converter has some limitations:
|
|||||||
* Calculations with spin-orbit (``SO=1``) are not supported.
|
* Calculations with spin-orbit (``SO=1``) are not supported.
|
||||||
* The spin-polarized case (``SP=1``) is not yet tested.
|
* The spin-polarized case (``SP=1``) is not yet tested.
|
||||||
* The post-processing routines in the module
|
* The post-processing routines in the module
|
||||||
:class:`SumkDFTTools <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools>`
|
:class:`SumkDFTTools <dft.sumk_dft_tools.SumkDFTTools>`
|
||||||
were not tested with this converter.
|
were not tested with this converter.
|
||||||
* ``proj_mat_all`` are not used, so there are no projectors onto the
|
* ``proj_mat_all`` are not used, so there are no projectors onto the
|
||||||
uncorrelated orbitals for now.
|
uncorrelated orbitals for now.
|
||||||
@ -413,8 +464,8 @@ The interface packages are written such that all the file operations
|
|||||||
are done only on the master node. In general, the philosophy of the
|
are done only on the master node. In general, the philosophy of the
|
||||||
package is that whenever you read in something from the archive
|
package is that whenever you read in something from the archive
|
||||||
yourself, you have to *manually* broadcast it to the nodes. An
|
yourself, you have to *manually* broadcast it to the nodes. An
|
||||||
exception to this rule is when you use routines from :class:`SumkDFT <pytriqs.applications.dft.sumk_dft.SumkDFT>`
|
exception to this rule is when you use routines from :class:`SumkDFT <dft.sumk_dft.SumkDFT>`
|
||||||
or :class:`SumkDFTTools <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools>`, where the broadcasting is done for you.
|
or :class:`SumkDFTTools <dft.sumk_dft_tools.SumkDFTTools>`, where the broadcasting is done for you.
|
||||||
|
|
||||||
Interfaces to other packages
|
Interfaces to other packages
|
||||||
----------------------------
|
----------------------------
|
||||||
|
@ -21,7 +21,7 @@ Wien2k + dmftproj
|
|||||||
:ref:`conversion`, or the extensive :download:`dmftproj manual<images_scripts/TutorialDmftproj.pdf>`.
|
:ref:`conversion`, or the extensive :download:`dmftproj manual<images_scripts/TutorialDmftproj.pdf>`.
|
||||||
|
|
||||||
In the following, we discuss how to use the
|
In the following, we discuss how to use the
|
||||||
:ref:`TRIQS <triqslibs:welcome>` tools in combination with the :program:`Wien2k` program.
|
:ref:`TRIQS <triqslibs:install>` tools in combination with the :program:`Wien2k` program.
|
||||||
|
|
||||||
We can use the DMFT script as introduced in section :ref:`singleshot`,
|
We can use the DMFT script as introduced in section :ref:`singleshot`,
|
||||||
with just a few simple
|
with just a few simple
|
||||||
|
@ -5,16 +5,21 @@
|
|||||||
Single-shot DFT+DMFT
|
Single-shot DFT+DMFT
|
||||||
====================
|
====================
|
||||||
|
|
||||||
|
After having set up the hdf5 archive, we can now proceed to our first DFT+DMFT calculation.
|
||||||
|
It consists of initialization steps, and the actual DMFT self-consistency loop,
|
||||||
|
With the code snippets below you can build your own script and target
|
||||||
|
it to your needs. Little examples on :ref:`mixing <mixing>` and on
|
||||||
|
:ref:`restarting from a previous calculation <restartcalc>` at the end of this page
|
||||||
|
should also demonstrate how simple you can modify your own DMFT script. A full working
|
||||||
|
calculation for SrVO3 is discussed in the :ref:`next section <SrVO3>`.
|
||||||
|
|
||||||
After having set up the hdf5 archive, we can now do our DFT+DMFT calculation. It consists of
|
|
||||||
initialization steps, and the actual DMFT self-consistency loop, as is
|
|
||||||
discussed below.
|
|
||||||
|
|
||||||
Initialisation of the calculation
|
Initialization of the calculation
|
||||||
---------------------------------
|
---------------------------------
|
||||||
|
|
||||||
Before doing the calculation, we have to intialize all the objects that we will need. The first thing is the
|
Before doing the actual calculation, we have to initialize all needed objects.
|
||||||
:class:`SumkDFT <pytriqs.applications.dft.sumk_dft.SumkDFT>` class. It contains all basic routines that are necessary to perform a summation in k-space
|
The first thing is the :class:`SumkDFT <dft.sumk_dft.SumkDFT>` class.
|
||||||
|
It contains all basic routines that are necessary to perform a summation in k-space
|
||||||
to get the local quantities used in DMFT. It is initialized by::
|
to get the local quantities used in DMFT. It is initialized by::
|
||||||
|
|
||||||
from pytriqs.applications.dft.sumk_dft import *
|
from pytriqs.applications.dft.sumk_dft import *
|
||||||
@ -25,35 +30,34 @@ Setting up the impurity solver
|
|||||||
------------------------------
|
------------------------------
|
||||||
|
|
||||||
The next step is to setup an impurity solver. There are different
|
The next step is to setup an impurity solver. There are different
|
||||||
solvers available within the :ref:`TRIQS <triqslibs:welcome>` framework. Below, we will discuss
|
solvers available within the :ref:`TRIQS <triqslibs:welcome>` framework.
|
||||||
the example of the hybridisation
|
E.g. for :ref:`SrVO3 <SrVO3>`, we will use the hybridization
|
||||||
expansion :ref:`CTHYB solver <triqscthyb:welcome>`. Later on, we will
|
expansion :ref:`CTHYB solver <triqscthyb:welcome>`. Later on, we will
|
||||||
see also the example of the Hubbard-I solver. They all have in common,
|
see also the example of the `Hubbard-I solver <https://triqs.ipht.cnrs.fr/1.x/applications/hubbardI/>`_.
|
||||||
that they are called by a uniform command::
|
They all have in common, that they are called by an uniform command::
|
||||||
|
|
||||||
S.solve(params)
|
S.solve(params)
|
||||||
|
|
||||||
where `params` are the solver parameters and depend on the actual
|
where :emphasis:`params` are the solver parameters and depend on the actual
|
||||||
solver that is used. Before going into the details of the solver, let
|
solver. Setting up the :ref:`CTHYB solver <triqscthyb:welcome>` for SrVO3 is
|
||||||
us discuss in the next section how to perform the DMFT loop using
|
discussed on the :ref:`next page <SrVO3>`. Here, let us now perform the DMFT
|
||||||
the methods of :program:`dft_tools`, assuming that we have set up a
|
loop using the methods of :program:`DFTTools`, assuming that we have already
|
||||||
working solver instance.
|
set up a working solver instance.
|
||||||
|
|
||||||
|
|
||||||
Doing the DMFT loop
|
Doing the DMFT loop
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
Having initialized the SumK class and the Solver, we can proceed with the DMFT
|
Having initialized the :class:`Sumk class <dft.sumk_dft.SumkDFT>`
|
||||||
loop itself. We have to set up the loop over DMFT
|
and the solver, we can proceed with the actual DMFT part of the calculation.
|
||||||
iterations and the self-consistency condition::
|
We set up the loop over DMFT iterations and the self-consistency condition::
|
||||||
|
|
||||||
n_loops = 5
|
n_loops = 15
|
||||||
for iteration_number in range(n_loops) : # start the DMFT loop
|
for iteration_number in range(n_loops) : # start the DMFT loop
|
||||||
|
|
||||||
SK.set_Sigma([ S.Sigma ]) # Put self energy to the SumK class
|
SK.set_Sigma([ S.Sigma ]) # Put self energy to the SumK class
|
||||||
chemical_potential = SK.calc_mu() # calculate the chemical potential for the given density
|
chemical_potential = SK.calc_mu() # calculate the chemical potential for the given density
|
||||||
S.G_iw << SK.extract_G_loc()[0] # extract the local Green function
|
S.G_iw << SK.extract_G_loc()[0] # extract the local Green function
|
||||||
S.G0_iw << inverse(S.Sigma_iw + inverse(S.G_iw)) # finally get G0, the input for the Solver
|
S.G0_iw << inverse(S.Sigma_iw + inverse(S.G_iw)) # finally get G0, the input for the solver
|
||||||
|
|
||||||
S.solve(h_int=h_int, **p) # now solve the impurity problem
|
S.solve(h_int=h_int, **p) # now solve the impurity problem
|
||||||
|
|
||||||
@ -61,24 +65,18 @@ iterations and the self-consistency condition::
|
|||||||
SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=1) # Set the double counting term
|
SK.calc_dc(dm, U_interact=U, J_hund=J, orb=0, use_dc_formula=1) # Set the double counting term
|
||||||
SK.save(['chemical_potential','dc_imp','dc_energ']) # Save data in the hdf5 archive
|
SK.save(['chemical_potential','dc_imp','dc_energ']) # Save data in the hdf5 archive
|
||||||
|
|
||||||
These basic steps are enough to set up the basic DMFT Loop. For a detailed
|
These steps are enough for a basic DMFT Loop.
|
||||||
description of the :class:`SumkDFT <pytriqs.applications.dft.sumk_dft.SumkDFT>` routines, see the reference
|
After the self-consistency steps, which lead to a new :math:`G^0(i\omega)`,
|
||||||
manual.
|
the impurity solver is called. Different to model calculations, we have to do a few
|
||||||
|
|
||||||
After
|
|
||||||
the self-consistency steps (extracting a new :math:`G^0(i\omega)`),
|
|
||||||
the Anderson impurity problem is solved.
|
|
||||||
|
|
||||||
Different to model calculations, we have to do a few
|
|
||||||
more steps after this, because of the double-counting correction. We first
|
more steps after this, because of the double-counting correction. We first
|
||||||
calculate the density of the impurity problem. Then, the routine `calc_dc`
|
calculate the density of the impurity problem. Then, the routine :meth:`calc_dc <dft.sumk_dft.SumkDFT.calc_dc>`
|
||||||
takes as parameters this density matrix, the Coulomb interaction, Hund's rule
|
takes as parameters this density matrix, the Coulomb interaction, Hund's rule
|
||||||
coupling, and the type of double-counting that should be used. Possible values
|
coupling, and the type of double-counting that should be used. Possible values
|
||||||
for `use_dc_formula` are:
|
for :emphasis:`use_dc_formula` are:
|
||||||
|
|
||||||
* `0`: Full-localised limit
|
* `0`: Full-localised limit (FLL)
|
||||||
* `1`: DC formula as given in K. Held, Adv. Phys. 56, 829 (2007).
|
* `1`: DC formula as given in K. Held, Adv. Phys. 56, 829 (2007).
|
||||||
* `2`: Around-mean-field
|
* `2`: Around-mean-field (AMF)
|
||||||
|
|
||||||
At the end of the calculation, we can save the Greens function and self energy into a file::
|
At the end of the calculation, we can save the Greens function and self energy into a file::
|
||||||
|
|
||||||
@ -89,65 +87,21 @@ At the end of the calculation, we can save the Greens function and self energy i
|
|||||||
ar["G"] = S.G_iw
|
ar["G"] = S.G_iw
|
||||||
ar["Sigma"] = S.Sigma_iw
|
ar["Sigma"] = S.Sigma_iw
|
||||||
|
|
||||||
This is it!
|
These are the essential steps necessary for a one-shot DFT+DMFT calculation.
|
||||||
|
For a detailed description of the :class:`SumkDFT <dft.sumk_dft.SumkDFT>`
|
||||||
|
routines, see the :ref:`reference manual <reference>`. To perform full charge self-consistent calculations, there
|
||||||
|
are some more things to consider, which we will see :ref:`later on <full_charge_selfcons>`.
|
||||||
|
|
||||||
These are the essential steps to do a one-shot DFT+DMFT calculation.
|
.. _restartcalc:
|
||||||
For full charge-self consistent calculations, there are some more things
|
|
||||||
to consider, which we will see later on.
|
|
||||||
|
|
||||||
|
|
||||||
A full DFT+DMFT calculation
|
Restarting a calculation
|
||||||
---------------------------
|
------------------------
|
||||||
|
|
||||||
We will discuss now how to set up a full working calculation,
|
Often only a few DMFT iterations are performed first, and thus, it is desirable to
|
||||||
including setting up the CTHYB solver, and specifying some more parameters
|
carry out further iterations, e.g. to improve on the convergence. With a little modification
|
||||||
in order to make the calculation more efficient. Here, we
|
at the initialization stage (before the DMFT loop) it is possible to detect if previous runs
|
||||||
will see a more advanced example, which is also suited for parallel
|
are present, or if the calculation should start from scratch::
|
||||||
execution. For the convenience of the user, we provide also two
|
|
||||||
working python scripts in this documentation. One for a calculation
|
|
||||||
using Kanamori definitions (:download:`dft_dmft_cthyb.py
|
|
||||||
<images_scripts/dft_dmft_cthyb.py>`) and one with a
|
|
||||||
rotational-invariant Slater interaction Hamiltonian (:download:`dft_dmft_cthyb_slater.py
|
|
||||||
<images_scripts/dft_dmft_cthyb.py>`). The user has to adapt these
|
|
||||||
scripts to his own needs.
|
|
||||||
|
|
||||||
First, we load the necessary modules::
|
|
||||||
|
|
||||||
from pytriqs.applications.dft.sumk_dft import *
|
|
||||||
from pytriqs.gf.local import *
|
|
||||||
from pytriqs.archive import HDFArchive
|
|
||||||
from pytriqs.operators.util import *
|
|
||||||
from pytriqs.applications.impurity_solvers.cthyb import *
|
|
||||||
|
|
||||||
The last two lines load the modules for the construction of the CTHYB
|
|
||||||
solver.
|
|
||||||
|
|
||||||
Then we define some parameters::
|
|
||||||
|
|
||||||
dft_filename='SrVO3'
|
|
||||||
U = 4.0
|
|
||||||
J = 0.65
|
|
||||||
beta = 40
|
|
||||||
loops = 10 # Number of DMFT sc-loops
|
|
||||||
sigma_mix = 0.8 # Mixing factor of Sigma after solution of the AIM
|
|
||||||
dc_type = 1 # DC type: 0 FLL, 1 Held, 2 AMF
|
|
||||||
use_blocks = True # use bloc structure from DFT input
|
|
||||||
prec_mu = 0.0001
|
|
||||||
|
|
||||||
# Solver parameters
|
|
||||||
p = {}
|
|
||||||
p["length_cycle"] = 200
|
|
||||||
p["n_warmup_cycles"] = 2000
|
|
||||||
p["n_cycles"] = 20000
|
|
||||||
|
|
||||||
Most of these parameters are self-explanatory. The first,
|
|
||||||
`dft_filename`, gives the filename of the input files. For more
|
|
||||||
details on the solver parameters, we refer the user to
|
|
||||||
the :ref:`CTHYB solver <triqscthyb:welcome>` documentation.
|
|
||||||
|
|
||||||
We assume that the conversion to the hdf5 archive is already done. We
|
|
||||||
can check now in this archive, if previous runs are present, or if we have to start
|
|
||||||
from scratch::
|
|
||||||
|
|
||||||
previous_runs = 0
|
previous_runs = 0
|
||||||
previous_present = False
|
previous_present = False
|
||||||
@ -165,127 +119,49 @@ from scratch::
|
|||||||
previous_present = mpi.bcast(previous_present)
|
previous_present = mpi.bcast(previous_present)
|
||||||
|
|
||||||
|
|
||||||
You can see in this code snippet, that all results of this calculation
|
You can see from this code snippet, that removing the subgroup :emphasis:`dmft_results` from the
|
||||||
will be stored in a separate subgroup in the hdf5 file, called
|
hdf file has the effect of reseting the calculation to the starting point. If there are previous
|
||||||
`dmft_output`. Removing this subgroup allows you to reset your
|
runs stored in the hdf5 archive, we can now load the self energy, the chemical potential and
|
||||||
calculation to the starting point easily.
|
double counting values of the last iteration::
|
||||||
|
|
||||||
Now we can use all this information to initialise the :class:`SumkDFT <pytriqs.applications.dft.sumk_dft.SumkDFT>` class::
|
|
||||||
|
|
||||||
SK = SumkDFT(hdf_file=dft_filename+'.h5',use_dft_blocks=use_blocks)
|
|
||||||
|
|
||||||
The next step is to initialise the :class:`Solver <pytriqs.applications.impurity_solvers.cthyb.Solver>` class. It consist
|
|
||||||
of two steps
|
|
||||||
|
|
||||||
#. Calculating the multi-band interaction matrix, and setting up the
|
|
||||||
interaction Hamiltonian
|
|
||||||
#. Setting up the solver class
|
|
||||||
|
|
||||||
The first step is done using methods of
|
|
||||||
the :ref:`TRIQS <triqslibs:welcome>` library::
|
|
||||||
|
|
||||||
n_orb = SK.corr_shells[0]['dim']
|
|
||||||
l = SK.corr_shells[0]['l']
|
|
||||||
spin_names = ["up","down"]
|
|
||||||
orb_names = [i for i in range(n_orb)]
|
|
||||||
# Use GF structure determined by DFT blocks:
|
|
||||||
gf_struct = SK.gf_struct_solver[0]
|
|
||||||
# Construct U matrix for density-density calculations:
|
|
||||||
Umat, Upmat = U_matrix_kanamori(n_orb=n_orb, U_int=U, J_hund=J)
|
|
||||||
|
|
||||||
We assumed here that we want to use an interaction matrix with
|
|
||||||
Kanamori definitions of :math:`U` and :math:`J`. For
|
|
||||||
other choices (Slater interaction matrix for instance), and other
|
|
||||||
parameters, we refer to the reference manual
|
|
||||||
of the :ref:`TRIQS <triqslibs:welcome>` library.
|
|
||||||
|
|
||||||
Next, we construct the Hamiltonian and the solver::
|
|
||||||
|
|
||||||
h_int = h_int_density(spin_names, orb_names, map_operator_structure=SK.sumk_to_solver[0], U=Umat, Uprime=Upmat)
|
|
||||||
S = Solver(beta=beta, gf_struct=gf_struct)
|
|
||||||
|
|
||||||
As you see, we take only density-density interactions into
|
|
||||||
account. Other choices for the Hamiltonian are
|
|
||||||
|
|
||||||
* h_int_kanamori
|
|
||||||
* h_int_slater
|
|
||||||
|
|
||||||
These two include full rotational invariant interactions. Again,
|
|
||||||
options can be found in the :ref:`TRIQS <triqslibs:welcome>` library
|
|
||||||
reference manual.
|
|
||||||
|
|
||||||
|
|
||||||
If there are previous runs stored in the hdf5 archive, we can now load the self energy
|
|
||||||
of the last iteration::
|
|
||||||
|
|
||||||
if previous_present:
|
if previous_present:
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
ar = HDFArchive(dft_filename+'.h5','a')
|
ar = HDFArchive(dft_filename+'.h5','a')
|
||||||
S.Sigma_iw << ar['dmft_output']['Sigma_iw']
|
S.Sigma_iw << ar['dmft_output']['Sigma_iw']
|
||||||
del ar
|
del ar
|
||||||
chemical_potential,dc_imp,dc_energ = SK.load(['chemical_potential','dc_imp','dc_energ'])
|
|
||||||
S.Sigma_iw << mpi.bcast(S.Sigma_iw)
|
S.Sigma_iw << mpi.bcast(S.Sigma_iw)
|
||||||
|
chemical_potential,dc_imp,dc_energ = SK.load(['chemical_potential','dc_imp','dc_energ'])
|
||||||
SK.set_mu(chemical_potential)
|
SK.set_mu(chemical_potential)
|
||||||
SK.set_dc(dc_imp,dc_energ)
|
SK.set_dc(dc_imp,dc_energ)
|
||||||
|
|
||||||
The self-energy is broadcast from the master node to the slave nodes. Also, the
|
The data is loaded only on the master node, and therefore we broadcast it to the slave nodes.
|
||||||
last saved chemical potential and double counting values are read in and set.
|
Be careful when storing the :emphasis:`iteration_number` as we also have to add the previous
|
||||||
|
iteration count::
|
||||||
|
|
||||||
Now we can go to the definition of the self-consistency step. It consists again
|
ar['dmft_output']['iterations'] = iteration_number + previous_runs
|
||||||
of the basic steps discussed in the previous section, with some additional
|
|
||||||
refinements::
|
|
||||||
|
|
||||||
for iteration_number in range(1,loops+1):
|
.. _mixing:
|
||||||
if mpi.is_master_node(): print "Iteration = ", iteration_number
|
|
||||||
|
|
||||||
SK.symm_deg_gf(S.Sigma_iw,orb=0) # symmetrise Sigma
|
|
||||||
SK.set_Sigma([ S.Sigma_iw ]) # put Sigma into the SumK class
|
|
||||||
chemical_potential = SK.calc_mu( precision = prec_mu ) # find the chemical potential for given density
|
|
||||||
S.G_iw << SK.extract_G_loc()[0] # calc the local Green function
|
|
||||||
mpi.report("Total charge of Gloc : %.6f"%S.G_iw.total_density())
|
|
||||||
|
|
||||||
# Init the DC term and the real part of Sigma, if no previous runs found:
|
Mixing
|
||||||
if (iteration_number==1 and previous_present==False):
|
------
|
||||||
dm = S.G_iw.density()
|
|
||||||
SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type)
|
|
||||||
S.Sigma_iw << SK.dc_imp[0]['up'][0,0]
|
|
||||||
|
|
||||||
# Calculate new G0_iw to input into the solver:
|
In some cases a mixing of two consecutive self energies (or alternatively two hybridization
|
||||||
S.G0_iw << S.Sigma_iw + inverse(S.G_iw)
|
functions) can be necessary in order to ensure convergence::
|
||||||
S.G0_iw << inverse(S.G0_iw)
|
|
||||||
|
|
||||||
# Solve the impurity problem:
|
mix = 0.8 # mixing factor
|
||||||
S.solve(h_int=h_int, **p)
|
|
||||||
|
|
||||||
# Solved. Now do post-solution stuff:
|
|
||||||
mpi.report("Total charge of impurity problem : %.6f"%S.G_iw.total_density())
|
|
||||||
|
|
||||||
# Now mix Sigma and G with factor sigma_mix, if wanted:
|
|
||||||
if (iteration_number>1 or previous_present):
|
if (iteration_number>1 or previous_present):
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
ar = HDFArchive(dft_filename+'.h5','a')
|
ar = HDFArchive(dft_filename+'.h5','a')
|
||||||
mpi.report("Mixing Sigma and G with factor %s"%sigma_mix)
|
mpi.report("Mixing Sigma and G with factor %s"%mix)
|
||||||
S.Sigma_iw << sigma_mix * S.Sigma_iw + (1.0-sigma_mix) * ar['dmft_output']['Sigma_iw']
|
S.Sigma_iw << mix * S.Sigma_iw + (1.0-mix) * ar['dmft_output']['Sigma_iw']
|
||||||
S.G_iw << sigma_mix * S.G_iw + (1.0-sigma_mix) * ar['dmft_output']['G_iw']
|
S.G_iw << mix * S.G_iw + (1.0-mix) * ar['dmft_output']['G_iw']
|
||||||
del ar
|
del ar
|
||||||
S.G_iw << mpi.bcast(S.G_iw)
|
S.G_iw << mpi.bcast(S.G_iw)
|
||||||
S.Sigma_iw << mpi.bcast(S.Sigma_iw)
|
S.Sigma_iw << mpi.bcast(S.Sigma_iw)
|
||||||
|
|
||||||
# Write the final Sigma and G to the hdf5 archive:
|
In this little piece of code, which should be placed after calling the solver, two consecutive
|
||||||
if mpi.is_master_node():
|
self energies are linearly mixed with the factor :emphasis:`mix`. Of course, it is possible
|
||||||
ar = HDFArchive(dft_filename+'.h5','a')
|
to implement more advanced mixing schemes (e.g. Broyden's methods), however, in most cases
|
||||||
ar['dmft_output']['iterations'] = iteration_number + previous_runs
|
simple linear mixing or even no mixing is sufficient for a reasonably fast convergence.
|
||||||
ar['dmft_output']['G_0'] = S.G0_iw
|
|
||||||
ar['dmft_output']['G_tau'] = S.G_tau
|
|
||||||
ar['dmft_output']['G_iw'] = S.G_iw
|
|
||||||
ar['dmft_output']['Sigma_iw'] = S.Sigma_iw
|
|
||||||
del ar
|
|
||||||
|
|
||||||
# Set the new double counting:
|
|
||||||
dm = S.G_iw.density() # compute the density matrix of the impurity problem
|
|
||||||
SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type)
|
|
||||||
|
|
||||||
# Save stuff into the dft_output group of hdf5 archive in case of rerun:
|
|
||||||
SK.save(['chemical_potential','dc_imp','dc_energ'])
|
|
||||||
|
|
||||||
This is all we need for the DFT+DMFT calculation. At the end, all results are stored in the hdf5 output file.
|
|
||||||
|
@ -89,7 +89,7 @@ however there are also some differences. First difference is that we import the
|
|||||||
|
|
||||||
The Hubbard-I solver is very fast and we do not need to take into account the DFT block structure or use any approximation for the *U*-matrix.
|
The Hubbard-I solver is very fast and we do not need to take into account the DFT block structure or use any approximation for the *U*-matrix.
|
||||||
We load and convert the :program:`dmftproj` output and initialize the
|
We load and convert the :program:`dmftproj` output and initialize the
|
||||||
:class:`SumkDFT <pytriqs.applications.dft.sumk_dft.SumkDFT>` class as described in :ref:`conversion` and
|
:class:`SumkDFT <dft.sumk_dft.SumkDFT>` class as described in :ref:`conversion` and
|
||||||
:ref:`singleshot` and then set up the Hubbard-I solver ::
|
:ref:`singleshot` and then set up the Hubbard-I solver ::
|
||||||
|
|
||||||
S = Solver(beta = beta, l = l)
|
S = Solver(beta = beta, l = l)
|
||||||
@ -206,7 +206,7 @@ symmetries::
|
|||||||
Converter.convert_parpoj_input()
|
Converter.convert_parpoj_input()
|
||||||
|
|
||||||
To get access to analysing tools we initialize the
|
To get access to analysing tools we initialize the
|
||||||
:class:`SumkDFTTools <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools>` class ::
|
:class:`SumkDFTTools <dft.sumk_dft_tools.SumkDFTTools>` class ::
|
||||||
|
|
||||||
SK = SumkDFTTools(hdf_file=dft_filename+'.h5', use_dft_blocks=False)
|
SK = SumkDFTTools(hdf_file=dft_filename+'.h5', use_dft_blocks=False)
|
||||||
|
|
||||||
|
BIN
doc/guide/images_scripts/SrVO3_Sigma_iw_it1.png
Normal file
BIN
doc/guide/images_scripts/SrVO3_Sigma_iw_it1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 46 KiB |
@ -1,5 +1,8 @@
|
|||||||
64 ! number of k-points
|
64 ! number of k-points
|
||||||
1.0 ! Electron density
|
1.0 ! Electron density
|
||||||
1 ! number of correlated atoms
|
2 ! number of total atomic shells
|
||||||
1 1 2 5 ! iatom, isort, l, dimension
|
1 1 2 5 ! iatom, isort, l, dimension
|
||||||
|
2 2 1 3 ! iatom, isort, l, dimension
|
||||||
|
1 ! number of correlated shells
|
||||||
|
1 1 2 5 0 0 ! iatom, isort, l, dimension, SO, irep
|
||||||
1 5 ! # of ireps, dimension of irep
|
1 5 ! # of ireps, dimension of irep
|
||||||
|
@ -6,10 +6,10 @@ from pytriqs.gf.local import *
|
|||||||
from pytriqs.applications.dft.sumk_dft import *
|
from pytriqs.applications.dft.sumk_dft import *
|
||||||
|
|
||||||
dft_filename='SrVO3'
|
dft_filename='SrVO3'
|
||||||
U = U.0
|
U = 4.0
|
||||||
J = 0.65
|
J = 0.65
|
||||||
beta = 40
|
beta = 40
|
||||||
loops = 10 # Number of DMFT sc-loops
|
loops = 15 # Number of DMFT sc-loops
|
||||||
sigma_mix = 1.0 # Mixing factor of Sigma after solution of the AIM
|
sigma_mix = 1.0 # Mixing factor of Sigma after solution of the AIM
|
||||||
delta_mix = 1.0 # Mixing factor of Delta as input for the AIM
|
delta_mix = 1.0 # Mixing factor of Delta as input for the AIM
|
||||||
dc_type = 1 # DC type: 0 FLL, 1 Held, 2 AMF
|
dc_type = 1 # DC type: 0 FLL, 1 Held, 2 AMF
|
||||||
@ -20,9 +20,14 @@ h_field = 0.0
|
|||||||
# Solver parameters
|
# Solver parameters
|
||||||
p = {}
|
p = {}
|
||||||
p["max_time"] = -1
|
p["max_time"] = -1
|
||||||
p["length_cycle"] = 50
|
p["random_seed"] = 123 * mpi.rank + 567
|
||||||
p["n_warmup_cycles"] = 50
|
p["length_cycle"] = 200
|
||||||
p["n_cycles"] = 5000
|
p["n_warmup_cycles"] = 100000
|
||||||
|
p["n_cycles"] = 1000000
|
||||||
|
p["perfrom_tail_fit"] = True
|
||||||
|
p["fit_max_moments"] = 4
|
||||||
|
p["fit_min_n"] = 30
|
||||||
|
p["fit_max_n"] = 60
|
||||||
|
|
||||||
# If conversion step was not done, we could do it here. Uncomment the lines it you want to do this.
|
# If conversion step was not done, we could do it here. Uncomment the lines it you want to do this.
|
||||||
#from pytriqs.applications.dft.converters.wien2k_converter import *
|
#from pytriqs.applications.dft.converters.wien2k_converter import *
|
||||||
@ -141,6 +146,5 @@ for iteration_number in range(1,loops+1):
|
|||||||
dm = S.G_iw.density() # compute the density matrix of the impurity problem
|
dm = S.G_iw.density() # compute the density matrix of the impurity problem
|
||||||
SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type)
|
SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type)
|
||||||
|
|
||||||
# Save stuff into the dft_output group of hdf5 archive in case of rerun:
|
# Save stuff into the user_data group of hdf5 archive in case of rerun:
|
||||||
SK.save(['chemical_potential','dc_imp','dc_energ'])
|
SK.save(['chemical_potential','dc_imp','dc_energ'])
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ from pytriqs.gf.local import *
|
|||||||
from pytriqs.applications.dft.sumk_dft import *
|
from pytriqs.applications.dft.sumk_dft import *
|
||||||
from pytriqs.applications.dft.converters.wien2k_converter import *
|
from pytriqs.applications.dft.converters.wien2k_converter import *
|
||||||
|
|
||||||
dft_filename='Gd_fcc'
|
dft_filename='SrVO3'
|
||||||
U = 9.6
|
U = 9.6
|
||||||
J = 0.8
|
J = 0.8
|
||||||
beta = 40
|
beta = 40
|
||||||
@ -21,9 +21,14 @@ h_field = 0.0
|
|||||||
# Solver parameters
|
# Solver parameters
|
||||||
p = {}
|
p = {}
|
||||||
p["max_time"] = -1
|
p["max_time"] = -1
|
||||||
p["length_cycle"] = 50
|
p["random_seed"] = 123 * mpi.rank + 567
|
||||||
p["n_warmup_cycles"] = 50
|
p["length_cycle"] = 200
|
||||||
p["n_cycles"] = 5000
|
p["n_warmup_cycles"] = 100000
|
||||||
|
p["n_cycles"] = 1000000
|
||||||
|
p["perfrom_tail_fit"] = True
|
||||||
|
p["fit_max_moments"] = 4
|
||||||
|
p["fit_min_n"] = 30
|
||||||
|
p["fit_max_n"] = 60
|
||||||
|
|
||||||
# If conversion step was not done, we could do it here. Uncomment the lines it you want to do this.
|
# If conversion step was not done, we could do it here. Uncomment the lines it you want to do this.
|
||||||
#from pytriqs.applications.dft.converters.wien2k_converter import *
|
#from pytriqs.applications.dft.converters.wien2k_converter import *
|
||||||
@ -144,5 +149,3 @@ for iteration_number in range(1,loops+1):
|
|||||||
|
|
||||||
# Save stuff into the dft_output group of hdf5 archive in case of rerun:
|
# Save stuff into the dft_output group of hdf5 archive in case of rerun:
|
||||||
SK.save(['chemical_potential','dc_imp','dc_energ'])
|
SK.save(['chemical_potential','dc_imp','dc_energ'])
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
.. _Transport:
|
.. _Transport:
|
||||||
|
|
||||||
Transport calculations test
|
Transport calculations
|
||||||
======================
|
============================
|
||||||
|
|
||||||
Formalism
|
Formalism
|
||||||
---------
|
---------
|
||||||
@ -44,13 +44,13 @@ real-frequency self energy by doing an analytic continuation.
|
|||||||
it is crucial to perform the analytic continuation in such a way that the obtained real frequency self energy
|
it is crucial to perform the analytic continuation in such a way that the obtained real frequency self energy
|
||||||
is accurate around the Fermi energy as low energy features strongly influence the final results!
|
is accurate around the Fermi energy as low energy features strongly influence the final results!
|
||||||
|
|
||||||
Besides the self energy the Wien2k files read by the transport converter (:meth:`convert_transport_input <pytriqs.applications.dft.converters.wien2k_converter.Wien2kConverter.convert_transport_input>`) are:
|
Besides the self energy the Wien2k files read by the transport converter (:meth:`convert_transport_input <dft.converters.wien2k_converter.Wien2kConverter.convert_transport_input>`) are:
|
||||||
* :file:`.struct`: The lattice constants specified in the struct file are used to calculate the unit cell volume.
|
* :file:`.struct`: The lattice constants specified in the struct file are used to calculate the unit cell volume.
|
||||||
* :file:`.outputs`: In this file the k-point symmetries are given.
|
* :file:`.outputs`: In this file the k-point symmetries are given.
|
||||||
* :file:`.oubwin`: Contains the indices of the bands within the projected subspace (written by :program:`dmftproj`) for each k-point.
|
* :file:`.oubwin`: Contains the indices of the bands within the projected subspace (written by :program:`dmftproj`) for each k-point.
|
||||||
* :file:`.pmat`: This file is the output of the Wien2k optics package and contains the velocity (momentum) matrix elements between all bands in the desired energy
|
* :file:`.pmat`: This file is the output of the Wien2k optics package and contains the velocity (momentum) matrix elements between all bands in the desired energy
|
||||||
window for each k-point. How to use the optics package is described below.
|
window for each k-point. How to use the optics package is described below.
|
||||||
* :file:`.h5`: The hdf5 archive has to be present and should contain the dft_input subgroup. Otherwise :meth:`convert_dft_input <pytriqs.applications.dft.converters.wien2k_converter.Wien2kConverter.convert_dft_input>` needs to be called before :meth:`convert_transport_input <pytriqs.applications.dft.converters.wien2k_converter.Wien2kConverter.convert_transport_input>`.
|
* :file:`.h5`: The hdf5 archive has to be present and should contain the dft_input subgroup. Otherwise :meth:`convert_dft_input <dft.converters.wien2k_converter.Wien2kConverter.convert_dft_input>` needs to be called before :meth:`convert_transport_input <dft.converters.wien2k_converter.Wien2kConverter.convert_transport_input>`.
|
||||||
|
|
||||||
|
|
||||||
Wien2k optics package
|
Wien2k optics package
|
||||||
@ -84,7 +84,7 @@ First we have to read the Wien2k files and store the relevant information in the
|
|||||||
|
|
||||||
SK = SumkDFTTools(hdf_file='case.h5', use_dft_blocks=True)
|
SK = SumkDFTTools(hdf_file='case.h5', use_dft_blocks=True)
|
||||||
|
|
||||||
The converter :meth:`convert_transport_input <pytriqs.applications.dft.converters.wien2k_converter.Wien2kConverter.convert_transport_input>`
|
The converter :meth:`convert_transport_input <dft.converters.wien2k_converter.Wien2kConverter.convert_transport_input>`
|
||||||
reads the required data of the Wien2k output and stores it in the `dft_transp_input` subgroup of your hdf file.
|
reads the required data of the Wien2k output and stores it in the `dft_transp_input` subgroup of your hdf file.
|
||||||
Additionally we need to read and set the self energy, the chemical potential and the double counting::
|
Additionally we need to read and set the self energy, the chemical potential and the double counting::
|
||||||
|
|
||||||
@ -104,7 +104,7 @@ Here the transport distribution is calculated in :math:`xx` direction for the fr
|
|||||||
To use the previously obtained self energy we set with_Sigma to True and the broadening to :math:`0.0`.
|
To use the previously obtained self energy we set with_Sigma to True and the broadening to :math:`0.0`.
|
||||||
As we also want to calculate the Seebeck coefficient we have to include :math:`\Omega=0.0` in the mesh.
|
As we also want to calculate the Seebeck coefficient we have to include :math:`\Omega=0.0` in the mesh.
|
||||||
Note that the current version of the code repines the :math:`\Omega` values to the closest values on the self energy mesh.
|
Note that the current version of the code repines the :math:`\Omega` values to the closest values on the self energy mesh.
|
||||||
For complete description of the input parameters see the :meth:`transport_distribution reference <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.transport_distribution>`.
|
For complete description of the input parameters see the :meth:`transport_distribution reference <dft.sumk_dft_tools.SumkDFTTools.transport_distribution>`.
|
||||||
|
|
||||||
The resulting transport distribution is not automatically saved, but this can be easily achieved with::
|
The resulting transport distribution is not automatically saved, but this can be easily achieved with::
|
||||||
|
|
||||||
|
@ -2,10 +2,10 @@
|
|||||||
|
|
||||||
.. module:: pytriqs.applications.dft
|
.. module:: pytriqs.applications.dft
|
||||||
|
|
||||||
.. _dfttools:
|
.. _dft:
|
||||||
|
|
||||||
DFTTools
|
DFTTools
|
||||||
=========
|
========
|
||||||
|
|
||||||
This :ref:`TRIQS-based <triqslibs:welcome>`-based application is aimed
|
This :ref:`TRIQS-based <triqslibs:welcome>`-based application is aimed
|
||||||
at ab-initio calculations for
|
at ab-initio calculations for
|
||||||
|
22
doc/reference/block_structure.rst
Normal file
22
doc/reference/block_structure.rst
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Block Structure
|
||||||
|
===============
|
||||||
|
|
||||||
|
The `BlockStructure` class allows to change and manipulate
|
||||||
|
Green's functions structures and mappings from sumk to solver.
|
||||||
|
|
||||||
|
The block structure can also be written to and read from HDF files.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
Do not write the individual elements of this class to a HDF file,
|
||||||
|
as they belong together and changing one without the other can
|
||||||
|
result in unexpected results. Always write the BlockStructure
|
||||||
|
object as a whole.
|
||||||
|
|
||||||
|
Writing the sumk_to_solver and solver_to_sumk elements
|
||||||
|
individually is not implemented.
|
||||||
|
|
||||||
|
.. autoclass:: dft.block_structure.BlockStructure
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
@ -17,7 +17,7 @@ H(k) Converter
|
|||||||
:special-members:
|
:special-members:
|
||||||
|
|
||||||
Wannier90 Converter
|
Wannier90 Converter
|
||||||
--------------
|
-------------------
|
||||||
.. autoclass:: dft.converters.wannier90_converter.Wannier90Converter
|
.. autoclass:: dft.converters.wannier90_converter.Wannier90Converter
|
||||||
:members:
|
:members:
|
||||||
:special-members:
|
:special-members:
|
||||||
|
@ -2,7 +2,7 @@ SumK DFT
|
|||||||
========
|
========
|
||||||
|
|
||||||
|
|
||||||
.. autoclass:: sumk_dft.SumkDFT
|
.. autoclass:: dft.sumk_dft.SumkDFT
|
||||||
:members:
|
:members:
|
||||||
:special-members:
|
:special-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
@ -2,7 +2,7 @@ SumK DFT Tools
|
|||||||
==============
|
==============
|
||||||
|
|
||||||
|
|
||||||
.. autoclass:: sumk_dft_tools.SumkDFTTools
|
.. autoclass:: dft.sumk_dft_tools.SumkDFTTools
|
||||||
:members:
|
:members:
|
||||||
:special-members:
|
:special-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
Symmetry
|
Symmetry
|
||||||
========
|
========
|
||||||
|
|
||||||
.. autoclass:: Symmetry
|
.. autoclass:: dft.Symmetry
|
||||||
:members:
|
:members:
|
||||||
:special-members:
|
:special-members:
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
TransBasis
|
TransBasis
|
||||||
==========
|
==========
|
||||||
|
|
||||||
.. autoclass:: trans_basis.TransBasis
|
.. autoclass:: dft.trans_basis.TransBasis
|
||||||
:members:
|
:members:
|
||||||
:special-members:
|
:special-members:
|
||||||
|
@ -8,4 +8,9 @@ configure_file(${CMAKE_SOURCE_DIR}/cmake/sitecustomize.py ${CMAKE_CURRENT_BINARY
|
|||||||
# make a local pytriqs copy
|
# make a local pytriqs copy
|
||||||
triqs_prepare_local_pytriqs(${python_destination})
|
triqs_prepare_local_pytriqs(${python_destination})
|
||||||
|
|
||||||
|
# VASP converter
|
||||||
add_subdirectory(converters/plovasp)
|
add_subdirectory(converters/plovasp)
|
||||||
|
|
||||||
|
# add version file
|
||||||
|
configure_file(version.py.in version.py)
|
||||||
|
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/version.py DESTINATION ${TRIQS_PYTHON_LIB_DEST_ROOT}/${python_destination})
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,11 +18,13 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
|
|
||||||
from sumk_dft import SumkDFT
|
from sumk_dft import SumkDFT
|
||||||
from symmetry import Symmetry
|
from symmetry import Symmetry
|
||||||
|
from block_structure import BlockStructure
|
||||||
from sumk_dft_tools import SumkDFTTools
|
from sumk_dft_tools import SumkDFTTools
|
||||||
from converters import *
|
from converters import *
|
||||||
|
|
||||||
__all__=['SumkDFT','Symmetry','SumkDFTTools','Wien2kConverter','HkConverter']
|
__all__ = ['SumkDFT', 'Symmetry', 'SumkDFTTools',
|
||||||
|
'Wien2kConverter', 'HkConverter','BlockStructure']
|
||||||
|
442
python/block_structure.py
Normal file
442
python/block_structure.py
Normal file
@ -0,0 +1,442 @@
|
|||||||
|
import copy
|
||||||
|
import numpy as np
|
||||||
|
from pytriqs.gf.local import GfImFreq, BlockGf
|
||||||
|
from ast import literal_eval
|
||||||
|
from warnings import warn
|
||||||
|
|
||||||
|
class BlockStructure(object):
|
||||||
|
""" Contains information about the Green function structure.
|
||||||
|
|
||||||
|
This class contains information about the structure of the solver
|
||||||
|
and sumk Green functions and the mapping between them.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
gf_struct_sumk : list of list of tuple
|
||||||
|
gf_struct_sumk[ish][idx] = (block_name,list of indices in block)
|
||||||
|
|
||||||
|
for correlated shell ish; idx is just a counter in the list
|
||||||
|
gf_struct_solver : list of dict
|
||||||
|
gf_struct_solver[ish][block] = list of indices in that block
|
||||||
|
|
||||||
|
for *inequivalent* correlated shell ish
|
||||||
|
solver_to_sumk : list of dict
|
||||||
|
solver_to_sumk[ish][(from_block,from_idx)] = (to_block,to_idx)
|
||||||
|
|
||||||
|
maps from the solver block and index to the sumk block and index
|
||||||
|
for *inequivalent* correlated shell ish
|
||||||
|
sumk_to_solver : list of dict
|
||||||
|
sumk_to_solver[ish][(from_block,from_idx)] = (to_block,to_idx)
|
||||||
|
|
||||||
|
maps from the sumk block and index to the solver block and index
|
||||||
|
for *inequivalent* correlated shell ish
|
||||||
|
solver_to_sumk_block : list of dict
|
||||||
|
solver_to_sumk_block[ish][from_block] = to_block
|
||||||
|
|
||||||
|
maps from the solver block to the sumk block
|
||||||
|
for *inequivalent* correlated shell ish
|
||||||
|
"""
|
||||||
|
def __init__(self,gf_struct_sumk=None,
|
||||||
|
gf_struct_solver=None,
|
||||||
|
solver_to_sumk=None,
|
||||||
|
sumk_to_solver=None,
|
||||||
|
solver_to_sumk_block=None):
|
||||||
|
self.gf_struct_sumk = gf_struct_sumk
|
||||||
|
self.gf_struct_solver = gf_struct_solver
|
||||||
|
self.solver_to_sumk = solver_to_sumk
|
||||||
|
self.sumk_to_solver = sumk_to_solver
|
||||||
|
self.solver_to_sumk_block = solver_to_sumk_block
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def full_structure(cls,gf_struct,corr_to_inequiv):
|
||||||
|
""" Construct structure that maps to itself.
|
||||||
|
|
||||||
|
This has the same structure for sumk and solver, and the
|
||||||
|
mapping solver_to_sumk and sumk_to_solver is one-to-one.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
gf_struct : list of dict
|
||||||
|
gf_struct[ish][block] = list of indices in that block
|
||||||
|
|
||||||
|
for (inequivalent) correlated shell ish
|
||||||
|
corr_to_inequiv : list
|
||||||
|
gives the mapping from correlated shell csh to inequivalent
|
||||||
|
correlated shell icsh, so that corr_to_inequiv[csh]=icsh
|
||||||
|
e.g. SumkDFT.corr_to_inequiv
|
||||||
|
|
||||||
|
if None, each inequivalent correlated shell is supposed to
|
||||||
|
be correspond to just one correlated shell with the same
|
||||||
|
index; there is not default, None has to be set explicitly!
|
||||||
|
"""
|
||||||
|
|
||||||
|
solver_to_sumk = []
|
||||||
|
s2sblock = []
|
||||||
|
gs_sumk = []
|
||||||
|
for ish in range(len(gf_struct)):
|
||||||
|
so2su = {}
|
||||||
|
so2sublock = {}
|
||||||
|
gss = []
|
||||||
|
for block in gf_struct[ish]:
|
||||||
|
so2sublock[block]=block
|
||||||
|
for ind in gf_struct[ish][block]:
|
||||||
|
so2su[(block,ind)]=(block,ind)
|
||||||
|
gss.append((block,gf_struct[ish][block]))
|
||||||
|
solver_to_sumk.append(so2su)
|
||||||
|
s2sblock.append(so2sublock)
|
||||||
|
gs_sumk.append(gss)
|
||||||
|
|
||||||
|
# gf_struct_sumk is not given for each inequivalent correlated
|
||||||
|
# shell, but for every correlated shell!
|
||||||
|
if corr_to_inequiv is not None:
|
||||||
|
gs_sumk_all = [None]*len(corr_to_inequiv)
|
||||||
|
for i in range(len(corr_to_inequiv)):
|
||||||
|
gs_sumk_all[i] = gs_sumk[corr_to_inequiv[i]]
|
||||||
|
else:
|
||||||
|
gs_sumk_all = gs_sumk
|
||||||
|
|
||||||
|
return cls(gf_struct_solver=copy.deepcopy(gf_struct),
|
||||||
|
gf_struct_sumk = gs_sumk_all,
|
||||||
|
solver_to_sumk = copy.deepcopy(solver_to_sumk),
|
||||||
|
sumk_to_solver = solver_to_sumk,
|
||||||
|
solver_to_sumk_block = s2sblock)
|
||||||
|
|
||||||
|
def pick_gf_struct_solver(self,new_gf_struct):
|
||||||
|
""" Pick selected orbitals within blocks.
|
||||||
|
|
||||||
|
This throws away parts of the Green's function that (for some
|
||||||
|
reason - be sure that you know what you're doing) shouldn't be
|
||||||
|
included in the calculation.
|
||||||
|
|
||||||
|
To drop an entire block, just don't include it.
|
||||||
|
To drop a certain index within a block, just don't include it.
|
||||||
|
|
||||||
|
If it was before:
|
||||||
|
|
||||||
|
[{'up':[0,1],'down':[0,1],'left':[0,1]}]
|
||||||
|
|
||||||
|
to choose the 0th index of the up block and the 1st index of
|
||||||
|
the down block and drop the left block, the new_gf_struct would
|
||||||
|
have to be
|
||||||
|
|
||||||
|
[{'up':[0],'down':[1]}]
|
||||||
|
|
||||||
|
Note that the indices will be renamed to be a 0-based
|
||||||
|
sequence of integers, i.e. the new structure will actually
|
||||||
|
be [{'up':[0],'down':[0]}].
|
||||||
|
|
||||||
|
For dropped indices, sumk_to_solver will map to (None,None).
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
new_gf_struct : list of dict
|
||||||
|
formatted the same as gf_struct_solver:
|
||||||
|
|
||||||
|
new_gf_struct[ish][block]=list of indices in that block.
|
||||||
|
"""
|
||||||
|
|
||||||
|
for ish in range(len(self.gf_struct_solver)):
|
||||||
|
gf_struct = new_gf_struct[ish]
|
||||||
|
|
||||||
|
# create new solver_to_sumk
|
||||||
|
so2su={}
|
||||||
|
so2su_block = {}
|
||||||
|
for blk,idxs in gf_struct.items():
|
||||||
|
for i in range(len(idxs)):
|
||||||
|
so2su[(blk,i)]=self.solver_to_sumk[ish][(blk,idxs[i])]
|
||||||
|
so2su_block[blk]=so2su[(blk,i)][0]
|
||||||
|
self.solver_to_sumk[ish] = so2su
|
||||||
|
self.solver_to_sumk_block[ish] = so2su_block
|
||||||
|
# create new sumk_to_solver
|
||||||
|
for k,v in self.sumk_to_solver[ish].items():
|
||||||
|
blk,ind=v
|
||||||
|
if blk in gf_struct and ind in gf_struct[blk]:
|
||||||
|
new_ind = gf_struct[blk].index(ind)
|
||||||
|
self.sumk_to_solver[ish][k]=(blk,new_ind)
|
||||||
|
else:
|
||||||
|
self.sumk_to_solver[ish][k]=(None,None)
|
||||||
|
# reindexing gf_struct so that it starts with 0
|
||||||
|
for k in gf_struct:
|
||||||
|
gf_struct[k]=range(len(gf_struct[k]))
|
||||||
|
self.gf_struct_solver[ish]=gf_struct
|
||||||
|
|
||||||
|
def pick_gf_struct_sumk(self,new_gf_struct):
|
||||||
|
""" Pick selected orbitals within blocks.
|
||||||
|
|
||||||
|
This throws away parts of the Green's function that (for some
|
||||||
|
reason - be sure that you know what you're doing) shouldn't be
|
||||||
|
included in the calculation.
|
||||||
|
|
||||||
|
To drop an entire block, just don't include it.
|
||||||
|
To drop a certain index within a block, just don't include it.
|
||||||
|
|
||||||
|
If it was before:
|
||||||
|
|
||||||
|
[{'up':[0,1],'down':[0,1],'left':[0,1]}]
|
||||||
|
|
||||||
|
to choose the 0th index of the up block and the 1st index of
|
||||||
|
the down block and drop the left block, the new_gf_struct would
|
||||||
|
have to be
|
||||||
|
|
||||||
|
[{'up':[0],'down':[1]}]
|
||||||
|
|
||||||
|
Note that the indices will be renamed to be a 0-based
|
||||||
|
sequence of integers.
|
||||||
|
|
||||||
|
For dropped indices, sumk_to_solver will map to (None,None).
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
new_gf_struct : list of dict
|
||||||
|
formatted the same as gf_struct_solver:
|
||||||
|
|
||||||
|
new_gf_struct[ish][block]=list of indices in that block.
|
||||||
|
|
||||||
|
However, the indices are not according to the solver Gf
|
||||||
|
but the sumk Gf.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
gfs = []
|
||||||
|
# construct gfs, which is the equivalent of new_gf_struct
|
||||||
|
# but according to the solver Gf, by using the sumk_to_solver
|
||||||
|
# mapping
|
||||||
|
for ish in range(len(new_gf_struct)):
|
||||||
|
gfs.append({})
|
||||||
|
for block in new_gf_struct[ish].keys():
|
||||||
|
for ind in new_gf_struct[ish][block]:
|
||||||
|
ind_sol = self.sumk_to_solver[ish][(block,ind)]
|
||||||
|
if not ind_sol[0] in gfs[ish]:
|
||||||
|
gfs[ish][ind_sol[0]]=[]
|
||||||
|
gfs[ish][ind_sol[0]].append(ind_sol[1])
|
||||||
|
self.pick_gf_struct_solver(gfs)
|
||||||
|
|
||||||
|
|
||||||
|
def map_gf_struct_solver(self,mapping):
|
||||||
|
""" Map the Green function structure from one struct to another.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
mapping : list of dict
|
||||||
|
the dict consists of elements
|
||||||
|
(from_block,from_index) : (to_block,to_index)
|
||||||
|
that maps from one structure to the other
|
||||||
|
"""
|
||||||
|
|
||||||
|
for ish in range(len(mapping)):
|
||||||
|
gf_struct = {}
|
||||||
|
so2su = {}
|
||||||
|
su2so = {}
|
||||||
|
so2su_block = {}
|
||||||
|
for frm,to in mapping[ish].iteritems():
|
||||||
|
if not to[0] in gf_struct:
|
||||||
|
gf_struct[to[0]]=[]
|
||||||
|
gf_struct[to[0]].append(to[1])
|
||||||
|
|
||||||
|
so2su[to]=self.solver_to_sumk[ish][frm]
|
||||||
|
su2so[self.solver_to_sumk[ish][frm]]=to
|
||||||
|
if to[0] in so2su_block:
|
||||||
|
if so2su_block[to[0]] != \
|
||||||
|
self.solver_to_sumk_block[ish][frm[0]]:
|
||||||
|
warn("solver block '{}' maps to more than one sumk block: '{}', '{}'".format(
|
||||||
|
to[0],so2su_block[to[0]],self.solver_to_sumk_block[ish][frm[0]]))
|
||||||
|
else:
|
||||||
|
so2su_block[to[0]]=\
|
||||||
|
self.solver_to_sumk_block[ish][frm[0]]
|
||||||
|
for k in self.sumk_to_solver[ish].keys():
|
||||||
|
if not k in su2so:
|
||||||
|
su2so[k] = (None,None)
|
||||||
|
self.gf_struct_solver[ish]=gf_struct
|
||||||
|
self.solver_to_sumk[ish]=so2su
|
||||||
|
self.sumk_to_solver[ish]=su2so
|
||||||
|
self.solver_to_sumk_block[ish]=so2su_block
|
||||||
|
|
||||||
|
def create_gf(self,ish=0,gf_function=GfImFreq,**kwargs):
|
||||||
|
""" Create a zero BlockGf having the gf_struct_solver structure.
|
||||||
|
|
||||||
|
When using GfImFreq as gf_function, typically you have to
|
||||||
|
supply beta as keyword argument.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
ish : int
|
||||||
|
shell index
|
||||||
|
gf_function : constructor
|
||||||
|
function used to construct the Gf objects constituting the
|
||||||
|
individual blocks; default: GfImFreq
|
||||||
|
**kwargs :
|
||||||
|
options passed on to the Gf constructor for the individual
|
||||||
|
blocks
|
||||||
|
"""
|
||||||
|
|
||||||
|
names = self.gf_struct_solver[ish].keys()
|
||||||
|
blocks=[]
|
||||||
|
for n in names:
|
||||||
|
G = gf_function(indices=self.gf_struct_solver[ish][n],**kwargs)
|
||||||
|
blocks.append(G)
|
||||||
|
G = BlockGf(name_list = names, block_list = blocks)
|
||||||
|
return G
|
||||||
|
|
||||||
|
|
||||||
|
def convert_gf(self,G,G_struct,ish=0,show_warnings=True,**kwargs):
|
||||||
|
""" Convert BlockGf from its structure to this structure.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
Elements that are zero in the new structure due to
|
||||||
|
the new block structure will be just ignored, thus
|
||||||
|
approximated to zero.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
G : BlockGf
|
||||||
|
the Gf that should be converted
|
||||||
|
G_struct : GfStructure
|
||||||
|
the structure ofthat G
|
||||||
|
ish : int
|
||||||
|
shell index
|
||||||
|
show_warnings : bool
|
||||||
|
whether to show warnings when elements of the Green's
|
||||||
|
function get thrown away
|
||||||
|
**kwargs :
|
||||||
|
options passed to the constructor for the new Gf
|
||||||
|
"""
|
||||||
|
G_new = self.create_gf(ish=ish,**kwargs)
|
||||||
|
for block in G_struct.gf_struct_solver[ish].keys():
|
||||||
|
for i1 in G_struct.gf_struct_solver[ish][block]:
|
||||||
|
for i2 in G_struct.gf_struct_solver[ish][block]:
|
||||||
|
i1_sumk = G_struct.solver_to_sumk[ish][(block,i1)]
|
||||||
|
i2_sumk = G_struct.solver_to_sumk[ish][(block,i2)]
|
||||||
|
i1_sol = self.sumk_to_solver[ish][i1_sumk]
|
||||||
|
i2_sol = self.sumk_to_solver[ish][i2_sumk]
|
||||||
|
if i1_sol[0] is None or i2_sol[0] is None:
|
||||||
|
if show_warnings:
|
||||||
|
warn(('Element {},{} of block {} of G is not present '+
|
||||||
|
'in the new structure').format(i1,i2,block))
|
||||||
|
continue
|
||||||
|
if i1_sol[0]!=i2_sol[0]:
|
||||||
|
if show_warnings:
|
||||||
|
warn(('Element {},{} of block {} of G is approximated '+
|
||||||
|
'to zero to match the new structure.').format(
|
||||||
|
i1,i2,block))
|
||||||
|
continue
|
||||||
|
G_new[i1_sol[0]][i1_sol[1],i2_sol[1]] = \
|
||||||
|
G[block][i1,i2]
|
||||||
|
return G_new
|
||||||
|
|
||||||
|
def approximate_as_diagonal(self):
|
||||||
|
""" Create a structure for a GF with zero off-diagonal elements.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
In general, this will throw away non-zero elements of the
|
||||||
|
Green's function. Be sure to verify whether this approximation
|
||||||
|
is justified.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.gf_struct_solver=[]
|
||||||
|
self.solver_to_sumk=[]
|
||||||
|
self.solver_to_sumk_block=[]
|
||||||
|
for ish in range(len(self.sumk_to_solver)):
|
||||||
|
self.gf_struct_solver.append({})
|
||||||
|
self.solver_to_sumk.append({})
|
||||||
|
self.solver_to_sumk_block.append({})
|
||||||
|
for frm,to in self.sumk_to_solver[ish].iteritems():
|
||||||
|
if to[0] is not None:
|
||||||
|
self.gf_struct_solver[ish][frm[0]+'_'+str(frm[1])]=[0]
|
||||||
|
self.sumk_to_solver[ish][frm]=(frm[0]+'_'+str(frm[1]),0)
|
||||||
|
self.solver_to_sumk[ish][(frm[0]+'_'+str(frm[1]),0)]=frm
|
||||||
|
self.solver_to_sumk_block[ish][frm[0]+'_'+str(frm[1])]=frm[0]
|
||||||
|
|
||||||
|
def __eq__(self,other):
|
||||||
|
def compare(one,two):
|
||||||
|
if type(one)!=type(two):
|
||||||
|
return False
|
||||||
|
if one is None and two is None:
|
||||||
|
return True
|
||||||
|
if isinstance(one,list) or isinstance(one,tuple):
|
||||||
|
if len(one) != len(two):
|
||||||
|
return False
|
||||||
|
for x,y in zip(one,two):
|
||||||
|
if not compare(x,y):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
elif isinstance(one,int):
|
||||||
|
return one==two
|
||||||
|
elif isinstance(one,str):
|
||||||
|
return one==two
|
||||||
|
elif isinstance(one,dict):
|
||||||
|
if set(one.keys()) != set(two.keys()):
|
||||||
|
return False
|
||||||
|
for k in set(one.keys()).intersection(two.keys()):
|
||||||
|
if not compare(one[k],two[k]):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
warn('Cannot compare {}'.format(type(one)))
|
||||||
|
return False
|
||||||
|
|
||||||
|
for prop in [ "gf_struct_sumk", "gf_struct_solver",
|
||||||
|
"solver_to_sumk", "sumk_to_solver", "solver_to_sumk_block"]:
|
||||||
|
if not compare(getattr(self,prop),getattr(other,prop)):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
return copy.deepcopy(self)
|
||||||
|
|
||||||
|
def __reduce_to_dict__(self):
|
||||||
|
""" Reduce to dict for HDF5 export."""
|
||||||
|
|
||||||
|
ret = {}
|
||||||
|
for element in [ "gf_struct_sumk", "gf_struct_solver",
|
||||||
|
"solver_to_sumk_block"]:
|
||||||
|
ret[element] = getattr(self,element)
|
||||||
|
|
||||||
|
def construct_mapping(mapping):
|
||||||
|
d = []
|
||||||
|
for ish in range(len(mapping)):
|
||||||
|
d.append({})
|
||||||
|
for k,v in mapping[ish].iteritems():
|
||||||
|
d[ish][repr(k)] = repr(v)
|
||||||
|
return d
|
||||||
|
|
||||||
|
ret['solver_to_sumk']=construct_mapping(self.solver_to_sumk)
|
||||||
|
ret['sumk_to_solver']=construct_mapping(self.sumk_to_solver)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __factory_from_dict__(cls,name,D) :
|
||||||
|
""" Create from dict for HDF5 import."""
|
||||||
|
|
||||||
|
def reconstruct_mapping(mapping):
|
||||||
|
d = []
|
||||||
|
for ish in range(len(mapping)):
|
||||||
|
d.append({})
|
||||||
|
for k,v in mapping[ish].iteritems():
|
||||||
|
# literal_eval is a saje alternative to eval
|
||||||
|
d[ish][literal_eval(k)] = literal_eval(v)
|
||||||
|
return d
|
||||||
|
|
||||||
|
D['solver_to_sumk']=reconstruct_mapping(D['solver_to_sumk'])
|
||||||
|
D['sumk_to_solver']=reconstruct_mapping(D['sumk_to_solver'])
|
||||||
|
return cls(**D)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
s=''
|
||||||
|
s+= "gf_struct_sumk "+str( self.gf_struct_sumk)+'\n'
|
||||||
|
s+= "gf_struct_solver "+str(self.gf_struct_solver)+'\n'
|
||||||
|
s+= "solver_to_sumk_block "+str(self.solver_to_sumk_block)+'\n'
|
||||||
|
for el in ['solver_to_sumk','sumk_to_solver']:
|
||||||
|
s+=el+'\n'
|
||||||
|
element=getattr(self,el)
|
||||||
|
for ish in range(len(element)):
|
||||||
|
s+=' shell '+str(ish)+'\n'
|
||||||
|
def keyfun(el):
|
||||||
|
return '{}_{:05d}'.format(el[0],el[1])
|
||||||
|
keys = sorted(element[ish].keys(),key=keyfun)
|
||||||
|
for k in keys:
|
||||||
|
s+=' '+str(k)+str(element[ish][k])+'\n'
|
||||||
|
return s
|
||||||
|
|
||||||
|
from pytriqs.archive.hdf_archive_schemes import register_class
|
||||||
|
register_class(BlockStructure)
|
@ -14,7 +14,8 @@ and to restore it to the original post-converter state.
|
|||||||
filename = sys.argv[1]
|
filename = sys.argv[1]
|
||||||
A = h5py.File(filename)
|
A = h5py.File(filename)
|
||||||
for group in ['dmft_output', 'user_data']:
|
for group in ['dmft_output', 'user_data']:
|
||||||
if group in A: del(A[group])
|
if group in A:
|
||||||
|
del(A[group])
|
||||||
A.close()
|
A.close()
|
||||||
|
|
||||||
# Repack to reclaim disk space
|
# Repack to reclaim disk space
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,7 +18,7 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
|
|
||||||
from wien2k_converter import Wien2kConverter
|
from wien2k_converter import Wien2kConverter
|
||||||
from hk_converter import HkConverter
|
from hk_converter import HkConverter
|
||||||
@ -27,4 +27,3 @@ from wannier90_converter import Wannier90Converter
|
|||||||
|
|
||||||
__all__ =['Wien2kConverter','HkConverter','Wannier90Converter','VaspConverter']
|
__all__ =['Wien2kConverter','HkConverter','Wannier90Converter','VaspConverter']
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,12 +18,16 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
from pytriqs.cmake_info import hdf5_command_path
|
from pytriqs.cmake_info import hdf5_command_path
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
|
|
||||||
|
|
||||||
class ConverterTools:
|
class ConverterTools:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
def read_fortran_file(self, filename, to_replace):
|
def read_fortran_file(self, filename, to_replace):
|
||||||
"""
|
"""
|
||||||
Returns a generator that yields all numbers in the Fortran file as float, with possible replacements.
|
Returns a generator that yields all numbers in the Fortran file as float, with possible replacements.
|
||||||
@ -43,11 +47,13 @@ class ConverterTools:
|
|||||||
"""
|
"""
|
||||||
import os.path
|
import os.path
|
||||||
import string
|
import string
|
||||||
if not(os.path.exists(filename)) : raise IOError, "File %s does not exist."%filename
|
if not(os.path.exists(filename)):
|
||||||
|
raise IOError, "File %s does not exist." % filename
|
||||||
for line in open(filename, 'r'):
|
for line in open(filename, 'r'):
|
||||||
for old,new in to_replace.iteritems(): line = line.replace(old,new)
|
for old, new in to_replace.iteritems():
|
||||||
for x in line.split(): yield string.atof(x)
|
line = line.replace(old, new)
|
||||||
|
for x in line.split():
|
||||||
|
yield string.atof(x)
|
||||||
|
|
||||||
def repack(self):
|
def repack(self):
|
||||||
"""
|
"""
|
||||||
@ -62,16 +68,17 @@ class ConverterTools:
|
|||||||
|
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
mpi.report("Repacking the file %s" % self.hdf_file)
|
mpi.report("Repacking the file %s" % self.hdf_file)
|
||||||
|
|
||||||
retcode = subprocess.call([hdf5_command_path+"/h5repack","-i%s"%self.hdf_file,"-otemphgfrt.h5"])
|
retcode = subprocess.call(
|
||||||
|
[hdf5_command_path + "/h5repack", "-i%s" % self.hdf_file, "-otemphgfrt.h5"])
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
mpi.report("h5repack failed!")
|
mpi.report("h5repack failed!")
|
||||||
else:
|
else:
|
||||||
subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % self.hdf_file])
|
subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % self.hdf_file])
|
||||||
|
|
||||||
|
|
||||||
def det_shell_equivalence(self, corr_shells):
|
def det_shell_equivalence(self, corr_shells):
|
||||||
"""
|
"""
|
||||||
Determine the equivalence of correlated shells.
|
Determine the equivalence of correlated shells.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,7 +18,7 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
|
|
||||||
from types import *
|
from types import *
|
||||||
import numpy
|
import numpy
|
||||||
@ -27,6 +27,7 @@ import pytriqs.utility.mpi as mpi
|
|||||||
from math import sqrt
|
from math import sqrt
|
||||||
from converter_tools import *
|
from converter_tools import *
|
||||||
|
|
||||||
|
|
||||||
class HkConverter(ConverterTools):
|
class HkConverter(ConverterTools):
|
||||||
"""
|
"""
|
||||||
Conversion from general H(k) file to an hdf5 file that can be used as input for the SumKDFT class.
|
Conversion from general H(k) file to an hdf5 file that can be used as input for the SumKDFT class.
|
||||||
@ -52,8 +53,10 @@ class HkConverter(ConverterTools):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert type(filename)==StringType,"HkConverter: filename must be a filename."
|
assert type(
|
||||||
if hdf_filename is None: hdf_filename = filename+'.h5'
|
filename) == StringType, "HkConverter: filename must be a filename."
|
||||||
|
if hdf_filename is None:
|
||||||
|
hdf_filename = filename + '.h5'
|
||||||
self.hdf_file = hdf_filename
|
self.hdf_file = hdf_filename
|
||||||
self.dft_file = filename
|
self.dft_file = filename
|
||||||
self.dft_subgrp = dft_subgrp
|
self.dft_subgrp = dft_subgrp
|
||||||
@ -65,7 +68,6 @@ class HkConverter(ConverterTools):
|
|||||||
if (os.path.exists(self.hdf_file) and repacking):
|
if (os.path.exists(self.hdf_file) and repacking):
|
||||||
ConverterTools.repack(self)
|
ConverterTools.repack(self)
|
||||||
|
|
||||||
|
|
||||||
def convert_dft_input(self, first_real_part_matrix=True, only_upper_triangle=False, weights_in_file=False):
|
def convert_dft_input(self, first_real_part_matrix=True, only_upper_triangle=False, weights_in_file=False):
|
||||||
"""
|
"""
|
||||||
Reads the appropriate files and stores the data for the dft_subgrp in the hdf5 archive.
|
Reads the appropriate files and stores the data for the dft_subgrp in the hdf5 archive.
|
||||||
@ -82,39 +84,55 @@ class HkConverter(ConverterTools):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Read and write only on the master node
|
# Read and write only on the master node
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
mpi.report("Reading input from %s..." % self.dft_file)
|
mpi.report("Reading input from %s..." % self.dft_file)
|
||||||
|
|
||||||
# R is a generator : each R.Next() will return the next number in the file
|
# R is a generator : each R.Next() will return the next number in the
|
||||||
R = ConverterTools.read_fortran_file(self,self.dft_file,self.fortran_to_replace)
|
# file
|
||||||
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, self.dft_file, self.fortran_to_replace)
|
||||||
try:
|
try:
|
||||||
energy_unit = 1.0 # the energy conversion factor is 1.0, we assume eV in files
|
# the energy conversion factor is 1.0, we assume eV in files
|
||||||
n_k = int(R.next()) # read the number of k points
|
energy_unit = 1.0
|
||||||
|
# read the number of k points
|
||||||
|
n_k = int(R.next())
|
||||||
k_dep_projection = 0
|
k_dep_projection = 0
|
||||||
SP = 0 # no spin-polarision
|
SP = 0 # no spin-polarision
|
||||||
SO = 0 # no spin-orbit
|
SO = 0 # no spin-orbit
|
||||||
charge_below = 0.0 # total charge below energy window is set to 0
|
# total charge below energy window is set to 0
|
||||||
density_required = R.next() # density required, for setting the chemical potential
|
charge_below = 0.0
|
||||||
|
# density required, for setting the chemical potential
|
||||||
|
density_required = R.next()
|
||||||
symm_op = 0 # No symmetry groups for the k-sum
|
symm_op = 0 # No symmetry groups for the k-sum
|
||||||
|
|
||||||
# the information on the non-correlated shells is needed for defining dimension of matrices:
|
# the information on the non-correlated shells is needed for
|
||||||
n_shells = int(R.next()) # number of shells considered in the Wanniers
|
# defining dimension of matrices:
|
||||||
|
# number of shells considered in the Wanniers
|
||||||
|
n_shells = int(R.next())
|
||||||
# corresponds to index R in formulas
|
# corresponds to index R in formulas
|
||||||
# now read the information about the shells (atom, sort, l, dim):
|
# now read the information about the shells (atom, sort, l, dim):
|
||||||
shell_entries = ['atom', 'sort', 'l', 'dim']
|
shell_entries = ['atom', 'sort', 'l', 'dim']
|
||||||
shells = [ {name: int(val) for name, val in zip(shell_entries, R)} for ish in range(n_shells) ]
|
shells = [{name: int(val) for name, val in zip(
|
||||||
|
shell_entries, R)} for ish in range(n_shells)]
|
||||||
|
|
||||||
n_corr_shells = int(R.next()) # number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
||||||
|
n_corr_shells = int(R.next())
|
||||||
# corresponds to index R in formulas
|
# corresponds to index R in formulas
|
||||||
# now read the information about the shells (atom, sort, l, dim, SO flag, irep):
|
# now read the information about the shells (atom, sort, l, dim, SO
|
||||||
|
# flag, irep):
|
||||||
corr_shell_entries = ['atom', 'sort', 'l', 'dim','SO','irep']
|
corr_shell_entries = ['atom', 'sort', 'l', 'dim','SO','irep']
|
||||||
corr_shells = [ {name: int(val) for name, val in zip(corr_shell_entries, R)} for icrsh in range(n_corr_shells) ]
|
corr_shells = [{name: int(val) for name, val in zip(
|
||||||
|
corr_shell_entries, R)} for icrsh in range(n_corr_shells)]
|
||||||
|
|
||||||
# determine the number of inequivalent correlated shells and maps, needed for further reading
|
# determine the number of inequivalent correlated shells and maps,
|
||||||
[n_inequiv_shells, corr_to_inequiv, inequiv_to_corr] = ConverterTools.det_shell_equivalence(self,corr_shells)
|
# needed for further reading
|
||||||
|
[n_inequiv_shells, corr_to_inequiv,
|
||||||
|
inequiv_to_corr] = ConverterTools.det_shell_equivalence(self, corr_shells)
|
||||||
|
|
||||||
use_rotations = 0
|
use_rotations = 0
|
||||||
rot_mat = [numpy.identity(corr_shells[icrsh]['dim'],numpy.complex_) for icrsh in range(n_corr_shells)]
|
rot_mat = [numpy.identity(
|
||||||
|
corr_shells[icrsh]['dim'], numpy.complex_) for icrsh in range(n_corr_shells)]
|
||||||
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
||||||
|
|
||||||
# Representative representations are read from file
|
# Representative representations are read from file
|
||||||
@ -122,29 +140,39 @@ class HkConverter(ConverterTools):
|
|||||||
dim_reps = [0 for i in range(n_inequiv_shells)]
|
dim_reps = [0 for i in range(n_inequiv_shells)]
|
||||||
T = []
|
T = []
|
||||||
for ish in range(n_inequiv_shells):
|
for ish in range(n_inequiv_shells):
|
||||||
n_reps[ish] = int(R.next()) # number of representatives ("subsets"), e.g. t2g and eg
|
# number of representatives ("subsets"), e.g. t2g and eg
|
||||||
dim_reps[ish] = [int(R.next()) for i in range(n_reps[ish])] # dimensions of the subsets
|
n_reps[ish] = int(R.next())
|
||||||
|
dim_reps[ish] = [int(R.next()) for i in range(
|
||||||
|
n_reps[ish])] # dimensions of the subsets
|
||||||
|
|
||||||
# The transformation matrix:
|
# The transformation matrix:
|
||||||
# is of dimension 2l+1, it is taken to be standard d (as in Wien2k)
|
# is of dimension 2l+1, it is taken to be standard d (as in
|
||||||
|
# Wien2k)
|
||||||
ll = 2 * corr_shells[inequiv_to_corr[ish]]['l'] + 1
|
ll = 2 * corr_shells[inequiv_to_corr[ish]]['l'] + 1
|
||||||
lmax = ll * (corr_shells[inequiv_to_corr[ish]]['SO'] + 1)
|
lmax = ll * (corr_shells[inequiv_to_corr[ish]]['SO'] + 1)
|
||||||
T.append(numpy.zeros([lmax, lmax], numpy.complex_))
|
T.append(numpy.zeros([lmax, lmax], numpy.complex_))
|
||||||
|
|
||||||
T[ish] = numpy.array([[0.0, 0.0, 1.0, 0.0, 0.0],
|
T[ish] = numpy.array([[0.0, 0.0, 1.0, 0.0, 0.0],
|
||||||
[1.0/sqrt(2.0), 0.0, 0.0, 0.0, 1.0/sqrt(2.0)],
|
[1.0 / sqrt(2.0), 0.0, 0.0,
|
||||||
[-1.0/sqrt(2.0), 0.0, 0.0, 0.0, 1.0/sqrt(2.0)],
|
0.0, 1.0 / sqrt(2.0)],
|
||||||
[0.0, 1.0/sqrt(2.0), 0.0, -1.0/sqrt(2.0), 0.0],
|
[-1.0 / sqrt(2.0), 0.0, 0.0,
|
||||||
|
0.0, 1.0 / sqrt(2.0)],
|
||||||
|
[0.0, 1.0 /
|
||||||
|
sqrt(2.0), 0.0, -1.0 / sqrt(2.0), 0.0],
|
||||||
[0.0, 1.0 / sqrt(2.0), 0.0, 1.0 / sqrt(2.0), 0.0]])
|
[0.0, 1.0 / sqrt(2.0), 0.0, 1.0 / sqrt(2.0), 0.0]])
|
||||||
|
|
||||||
# Spin blocks to be read:
|
# Spin blocks to be read:
|
||||||
n_spin_blocs = SP + 1 - SO # number of spins to read for Norbs and Ham, NOT Projectors
|
# number of spins to read for Norbs and Ham, NOT Projectors
|
||||||
|
n_spin_blocs = SP + 1 - SO
|
||||||
|
|
||||||
# define the number of n_orbitals for all k points: it is the number of total bands and independent of k!
|
# define the number of n_orbitals for all k points: it is the
|
||||||
n_orbitals = numpy.ones([n_k,n_spin_blocs],numpy.int) * sum([ sh['dim'] for sh in shells ])
|
# number of total bands and independent of k!
|
||||||
|
n_orbitals = numpy.ones(
|
||||||
|
[n_k, n_spin_blocs], numpy.int) * sum([sh['dim'] for sh in shells])
|
||||||
|
|
||||||
# Initialise the projectors:
|
# Initialise the projectors:
|
||||||
proj_mat = numpy.zeros([n_k,n_spin_blocs,n_corr_shells,max([crsh['dim'] for crsh in corr_shells]),max(n_orbitals)],numpy.complex_)
|
proj_mat = numpy.zeros([n_k, n_spin_blocs, n_corr_shells, max(
|
||||||
|
[crsh['dim'] for crsh in corr_shells]), max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
# Read the projectors from the file:
|
# Read the projectors from the file:
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
@ -161,15 +189,19 @@ class HkConverter(ConverterTools):
|
|||||||
else:
|
else:
|
||||||
offset += shells[ish]['dim']
|
offset += shells[ish]['dim']
|
||||||
|
|
||||||
proj_mat[ik,isp,icrsh,0:n_orb,offset:offset+n_orb] = numpy.identity(n_orb)
|
proj_mat[ik, isp, icrsh, 0:n_orb,
|
||||||
|
offset:offset + n_orb] = numpy.identity(n_orb)
|
||||||
|
|
||||||
# now define the arrays for weights and hopping ...
|
# now define the arrays for weights and hopping ...
|
||||||
bz_weights = numpy.ones([n_k],numpy.float_)/ float(n_k) # w(k_index), default normalisation
|
# w(k_index), default normalisation
|
||||||
hopping = numpy.zeros([n_k,n_spin_blocs,max(n_orbitals),max(n_orbitals)],numpy.complex_)
|
bz_weights = numpy.ones([n_k], numpy.float_) / float(n_k)
|
||||||
|
hopping = numpy.zeros([n_k, n_spin_blocs, max(
|
||||||
|
n_orbitals), max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
if (weights_in_file):
|
if (weights_in_file):
|
||||||
# weights in the file
|
# weights in the file
|
||||||
for ik in range(n_k) : bz_weights[ik] = R.next()
|
for ik in range(n_k):
|
||||||
|
bz_weights[ik] = R.next()
|
||||||
|
|
||||||
# if the sum over spins is in the weights, take it out again!!
|
# if the sum over spins is in the weights, take it out again!!
|
||||||
sm = sum(bz_weights)
|
sm = sum(bz_weights)
|
||||||
@ -180,7 +212,9 @@ class HkConverter(ConverterTools):
|
|||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
n_orb = n_orbitals[ik, isp]
|
n_orb = n_orbitals[ik, isp]
|
||||||
|
|
||||||
if (first_real_part_matrix): # first read all real components for given k, then read imaginary parts
|
# first read all real components for given k, then read
|
||||||
|
# imaginary parts
|
||||||
|
if (first_real_part_matrix):
|
||||||
|
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
if (only_upper_triangle):
|
if (only_upper_triangle):
|
||||||
@ -197,7 +231,9 @@ class HkConverter(ConverterTools):
|
|||||||
istart = 0
|
istart = 0
|
||||||
for j in range(istart, n_orb):
|
for j in range(istart, n_orb):
|
||||||
hopping[ik, isp, i, j] += R.next() * 1j
|
hopping[ik, isp, i, j] += R.next() * 1j
|
||||||
if ((only_upper_triangle)and(i!=j)): hopping[ik,isp,j,i] = hopping[ik,isp,i,j].conjugate()
|
if ((only_upper_triangle)and(i != j)):
|
||||||
|
hopping[ik, isp, j, i] = hopping[
|
||||||
|
ik, isp, i, j].conjugate()
|
||||||
|
|
||||||
else: # read (real,im) tuple
|
else: # read (real,im) tuple
|
||||||
|
|
||||||
@ -210,10 +246,14 @@ class HkConverter(ConverterTools):
|
|||||||
hopping[ik, isp, i, j] = R.next()
|
hopping[ik, isp, i, j] = R.next()
|
||||||
hopping[ik, isp, i, j] += R.next() * 1j
|
hopping[ik, isp, i, j] += R.next() * 1j
|
||||||
|
|
||||||
if ((only_upper_triangle)and(i!=j)): hopping[ik,isp,j,i] = hopping[ik,isp,i,j].conjugate()
|
if ((only_upper_triangle)and(i != j)):
|
||||||
|
hopping[ik, isp, j, i] = hopping[
|
||||||
|
ik, isp, i, j].conjugate()
|
||||||
# keep some things that we need for reading parproj:
|
# keep some things that we need for reading parproj:
|
||||||
things_to_set = ['n_shells','shells','n_corr_shells','corr_shells','n_spin_blocs','n_orbitals','n_k','SO','SP','energy_unit']
|
things_to_set = ['n_shells', 'shells', 'n_corr_shells', 'corr_shells',
|
||||||
for it in things_to_set: setattr(self,it,locals()[it])
|
'n_spin_blocs', 'n_orbitals', 'n_k', 'SO', 'SP', 'energy_unit']
|
||||||
|
for it in things_to_set:
|
||||||
|
setattr(self, it, locals()[it])
|
||||||
except StopIteration: # a more explicit error if the file is corrupted.
|
except StopIteration: # a more explicit error if the file is corrupted.
|
||||||
raise "HK Converter : reading file dft_file failed!"
|
raise "HK Converter : reading file dft_file failed!"
|
||||||
|
|
||||||
@ -221,10 +261,12 @@ class HkConverter(ConverterTools):
|
|||||||
|
|
||||||
# Save to the HDF5:
|
# Save to the HDF5:
|
||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.dft_subgrp in ar): ar.create_group(self.dft_subgrp)
|
if not (self.dft_subgrp in ar):
|
||||||
|
ar.create_group(self.dft_subgrp)
|
||||||
things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
||||||
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
||||||
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
||||||
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr']
|
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr']
|
||||||
for it in things_to_save: ar[self.dft_subgrp][it] = locals()[it]
|
for it in things_to_save:
|
||||||
|
ar[self.dft_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
@ -91,7 +91,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
self.dft_subgrp = dft_subgrp
|
self.dft_subgrp = dft_subgrp
|
||||||
self.symmcorr_subgrp = symmcorr_subgrp
|
self.symmcorr_subgrp = symmcorr_subgrp
|
||||||
self.fortran_to_replace = {'D': 'E'}
|
self.fortran_to_replace = {'D': 'E'}
|
||||||
# threshold below which matrix elements from wannier90 should be considered equal
|
# threshold below which matrix elements from wannier90 should be
|
||||||
|
# considered equal
|
||||||
self._w90zero = 2.e-6
|
self._w90zero = 2.e-6
|
||||||
|
|
||||||
# Checks if h5 file is there and repacks it if wanted:
|
# Checks if h5 file is there and repacks it if wanted:
|
||||||
@ -114,12 +115,14 @@ class Wannier90Converter(ConverterTools):
|
|||||||
return
|
return
|
||||||
mpi.report("Reading input from %s..." % self.inp_file)
|
mpi.report("Reading input from %s..." % self.inp_file)
|
||||||
|
|
||||||
# R is a generator : each R.Next() will return the next number in the file
|
# R is a generator : each R.Next() will return the next number in the
|
||||||
|
# file
|
||||||
R = ConverterTools.read_fortran_file(
|
R = ConverterTools.read_fortran_file(
|
||||||
self, self.inp_file, self.fortran_to_replace)
|
self, self.inp_file, self.fortran_to_replace)
|
||||||
shell_entries = ['atom', 'sort', 'l', 'dim']
|
shell_entries = ['atom', 'sort', 'l', 'dim']
|
||||||
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
||||||
# First, let's read the input file with the parameters needed for the conversion
|
# First, let's read the input file with the parameters needed for the
|
||||||
|
# conversion
|
||||||
try:
|
try:
|
||||||
# read k - point mesh generation option
|
# read k - point mesh generation option
|
||||||
kmesh_mode = int(R.next())
|
kmesh_mode = int(R.next())
|
||||||
@ -135,7 +138,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
# and the data will be copied from corr_shells into shells (see below)
|
# and the data will be copied from corr_shells into shells (see below)
|
||||||
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
||||||
n_corr_shells = int(R.next())
|
n_corr_shells = int(R.next())
|
||||||
# now read the information about the correlated shells (atom, sort, l, dim, SO flag, irep):
|
# now read the information about the correlated shells (atom, sort,
|
||||||
|
# l, dim, SO flag, irep):
|
||||||
corr_shells = [{name: int(val) for name, val in zip(
|
corr_shells = [{name: int(val) for name, val in zip(
|
||||||
corr_shell_entries, R)} for icrsh in range(n_corr_shells)]
|
corr_shell_entries, R)} for icrsh in range(n_corr_shells)]
|
||||||
except StopIteration: # a more explicit error if the file is corrupted.
|
except StopIteration: # a more explicit error if the file is corrupted.
|
||||||
@ -166,7 +170,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
mpi.report(
|
mpi.report(
|
||||||
"Total number of WFs expected in the correlated shells: %d" % dim_corr_shells)
|
"Total number of WFs expected in the correlated shells: %d" % dim_corr_shells)
|
||||||
|
|
||||||
# determine the number of inequivalent correlated shells and maps, needed for further processing
|
# determine the number of inequivalent correlated shells and maps,
|
||||||
|
# needed for further processing
|
||||||
n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(
|
n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(
|
||||||
self, corr_shells)
|
self, corr_shells)
|
||||||
mpi.report("Number of inequivalent shells: %d" % n_inequiv_shells)
|
mpi.report("Number of inequivalent shells: %d" % n_inequiv_shells)
|
||||||
@ -176,7 +181,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
mpi.report("Mapping: " + format(shells_map))
|
mpi.report("Mapping: " + format(shells_map))
|
||||||
|
|
||||||
# build the k-point mesh, if its size was given on input (kmesh_mode >= 0),
|
# build the k-point mesh, if its size was given on input (kmesh_mode >= 0),
|
||||||
# otherwise it is built according to the data in the hr file (see below)
|
# otherwise it is built according to the data in the hr file (see
|
||||||
|
# below)
|
||||||
if kmesh_mode >= 0:
|
if kmesh_mode >= 0:
|
||||||
n_k, k_mesh, bz_weights = self.kmesh_build(nki, kmesh_mode)
|
n_k, k_mesh, bz_weights = self.kmesh_build(nki, kmesh_mode)
|
||||||
self.n_k = n_k
|
self.n_k = n_k
|
||||||
@ -197,7 +203,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
# TODO: generalise to SP=1 (only partially done)
|
# TODO: generalise to SP=1 (only partially done)
|
||||||
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
||||||
|
|
||||||
# Second, let's read the file containing the Hamiltonian in WF basis produced by Wannier90
|
# Second, let's read the file containing the Hamiltonian in WF basis
|
||||||
|
# produced by Wannier90
|
||||||
for isp in range(n_spin):
|
for isp in range(n_spin):
|
||||||
# begin loop on isp
|
# begin loop on isp
|
||||||
|
|
||||||
@ -212,20 +219,24 @@ class Wannier90Converter(ConverterTools):
|
|||||||
mpi.report(
|
mpi.report(
|
||||||
"The Hamiltonian in MLWF basis is extracted from %s ..." % hr_file)
|
"The Hamiltonian in MLWF basis is extracted from %s ..." % hr_file)
|
||||||
nr, rvec, rdeg, nw, hamr = self.read_wannier90hr(hr_file)
|
nr, rvec, rdeg, nw, hamr = self.read_wannier90hr(hr_file)
|
||||||
# number of R vectors, their indices, their degeneracy, number of WFs, H(R)
|
# number of R vectors, their indices, their degeneracy, number of
|
||||||
|
# WFs, H(R)
|
||||||
mpi.report("... done: %d R vectors, %d WFs found" % (nr, nw))
|
mpi.report("... done: %d R vectors, %d WFs found" % (nr, nw))
|
||||||
|
|
||||||
if isp == 0:
|
if isp == 0:
|
||||||
# set or check some quantities that must be the same for both spins
|
# set or check some quantities that must be the same for both
|
||||||
|
# spins
|
||||||
self.nrpt = nr
|
self.nrpt = nr
|
||||||
|
|
||||||
# k-point grid: (if not defined before)
|
# k-point grid: (if not defined before)
|
||||||
if kmesh_mode == -1:
|
if kmesh_mode == -1:
|
||||||
# the size of the k-point mesh is determined from the largest R vector
|
# the size of the k-point mesh is determined from the
|
||||||
|
# largest R vector
|
||||||
nki = [2 * rvec[:, idir].max() + 1 for idir in range(3)]
|
nki = [2 * rvec[:, idir].max() + 1 for idir in range(3)]
|
||||||
# it will be the same as in the win only when nki is odd, because of the
|
# it will be the same as in the win only when nki is odd, because of the
|
||||||
# wannier90 convention: if we have nki k-points along the i-th direction,
|
# wannier90 convention: if we have nki k-points along the i-th direction,
|
||||||
# then we should get 2*(nki/2)+nki%2 R points along that direction
|
# then we should get 2*(nki/2)+nki%2 R points along that
|
||||||
|
# direction
|
||||||
n_k, k_mesh, bz_weights = self.kmesh_build(nki)
|
n_k, k_mesh, bz_weights = self.kmesh_build(nki)
|
||||||
self.n_k = n_k
|
self.n_k = n_k
|
||||||
self.k_mesh = k_mesh
|
self.k_mesh = k_mesh
|
||||||
@ -237,33 +248,41 @@ class Wannier90Converter(ConverterTools):
|
|||||||
self.nwfs = nw
|
self.nwfs = nw
|
||||||
# check that the total number of WFs makes sense
|
# check that the total number of WFs makes sense
|
||||||
if self.nwfs < dim_corr_shells:
|
if self.nwfs < dim_corr_shells:
|
||||||
mpi.report("ERROR: number of WFs in the file smaller than number of correlated orbitals!")
|
mpi.report(
|
||||||
|
"ERROR: number of WFs in the file smaller than number of correlated orbitals!")
|
||||||
elif self.nwfs > dim_corr_shells:
|
elif self.nwfs > dim_corr_shells:
|
||||||
# NOTE: correlated shells must appear before uncorrelated ones inside the file
|
# NOTE: correlated shells must appear before uncorrelated
|
||||||
|
# ones inside the file
|
||||||
mpi.report("Number of WFs larger than correlated orbitals:\n" +
|
mpi.report("Number of WFs larger than correlated orbitals:\n" +
|
||||||
"WFs from %d to %d treated as uncorrelated" % (dim_corr_shells + 1, self.nwfs))
|
"WFs from %d to %d treated as uncorrelated" % (dim_corr_shells + 1, self.nwfs))
|
||||||
else:
|
else:
|
||||||
mpi.report("Number of WFs equal to number of correlated orbitals")
|
mpi.report(
|
||||||
|
"Number of WFs equal to number of correlated orbitals")
|
||||||
|
|
||||||
# we assume spin up and spin down always have same total number of WFs
|
# we assume spin up and spin down always have same total number
|
||||||
|
# of WFs
|
||||||
n_orbitals = numpy.ones(
|
n_orbitals = numpy.ones(
|
||||||
[self.n_k, n_spin], numpy.int) * self.nwfs
|
[self.n_k, n_spin], numpy.int) * self.nwfs
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# consistency check between the _up and _down file contents
|
# consistency check between the _up and _down file contents
|
||||||
if nr != self.nrpt:
|
if nr != self.nrpt:
|
||||||
mpi.report("Different number of R vectors for spin-up/spin-down!")
|
mpi.report(
|
||||||
|
"Different number of R vectors for spin-up/spin-down!")
|
||||||
if nw != self.nwfs:
|
if nw != self.nwfs:
|
||||||
mpi.report("Different number of WFs for spin-up/spin-down!")
|
mpi.report(
|
||||||
|
"Different number of WFs for spin-up/spin-down!")
|
||||||
|
|
||||||
hamr_full.append(hamr)
|
hamr_full.append(hamr)
|
||||||
# FIXME: when do we actually need deepcopy()?
|
# FIXME: when do we actually need deepcopy()?
|
||||||
# hamr_full.append(deepcopy(hamr))
|
# hamr_full.append(deepcopy(hamr))
|
||||||
|
|
||||||
for ir in range(nr):
|
for ir in range(nr):
|
||||||
# checks if the Hamiltonian is real (it should, if wannierisation worked fine)
|
# checks if the Hamiltonian is real (it should, if
|
||||||
|
# wannierisation worked fine)
|
||||||
if numpy.abs((hamr[ir].imag.max()).max()) > self._w90zero:
|
if numpy.abs((hamr[ir].imag.max()).max()) > self._w90zero:
|
||||||
mpi.report("H(R) has large complex components at R %d" % ir)
|
mpi.report(
|
||||||
|
"H(R) has large complex components at R %d" % ir)
|
||||||
# copy the R=0 block corresponding to the correlated shells
|
# copy the R=0 block corresponding to the correlated shells
|
||||||
# into another variable (needed later for finding rot_mat)
|
# into another variable (needed later for finding rot_mat)
|
||||||
if rvec[ir, 0] == 0 and rvec[ir, 1] == 0 and rvec[ir, 2] == 0:
|
if rvec[ir, 0] == 0 and rvec[ir, 1] == 0 and rvec[ir, 2] == 0:
|
||||||
@ -273,17 +292,22 @@ class Wannier90Converter(ConverterTools):
|
|||||||
if not numpy.allclose(ham_corr0.transpose().conjugate(), ham_corr0, atol=self._w90zero, rtol=1.e-9):
|
if not numpy.allclose(ham_corr0.transpose().conjugate(), ham_corr0, atol=self._w90zero, rtol=1.e-9):
|
||||||
raise ValueError("H(R=0) matrix is not Hermitian!")
|
raise ValueError("H(R=0) matrix is not Hermitian!")
|
||||||
|
|
||||||
# find rot_mat symmetries by diagonalising the on-site Hamiltonian of the first spin
|
# find rot_mat symmetries by diagonalising the on-site Hamiltonian
|
||||||
|
# of the first spin
|
||||||
if isp == 0:
|
if isp == 0:
|
||||||
use_rotations, rot_mat = self.find_rot_mat(n_corr_shells, corr_shells, shells_map, ham_corr0)
|
use_rotations, rot_mat = self.find_rot_mat(
|
||||||
|
n_corr_shells, corr_shells, shells_map, ham_corr0)
|
||||||
else:
|
else:
|
||||||
# consistency check
|
# consistency check
|
||||||
use_rotations_, rot_mat_ = self.find_rot_mat(n_corr_shells, corr_shells, shells_map, ham_corr0)
|
use_rotations_, rot_mat_ = self.find_rot_mat(
|
||||||
|
n_corr_shells, corr_shells, shells_map, ham_corr0)
|
||||||
if (use_rotations and not use_rotations_):
|
if (use_rotations and not use_rotations_):
|
||||||
mpi.report("Rotations cannot be used for spin component n. %d" % isp)
|
mpi.report(
|
||||||
|
"Rotations cannot be used for spin component n. %d" % isp)
|
||||||
for icrsh in range(n_corr_shells):
|
for icrsh in range(n_corr_shells):
|
||||||
if not numpy.allclose(rot_mat_[icrsh], rot_mat[icrsh], atol=self._w90zero, rtol=1.e-15):
|
if not numpy.allclose(rot_mat_[icrsh], rot_mat[icrsh], atol=self._w90zero, rtol=1.e-15):
|
||||||
mpi.report("Rotations for spin component n. %d do not match!" % isp)
|
mpi.report(
|
||||||
|
"Rotations for spin component n. %d do not match!" % isp)
|
||||||
# end loop on isp
|
# end loop on isp
|
||||||
|
|
||||||
mpi.report("The k-point grid has dimensions: %d, %d, %d" % tuple(nki))
|
mpi.report("The k-point grid has dimensions: %d, %d, %d" % tuple(nki))
|
||||||
@ -292,11 +316,14 @@ class Wannier90Converter(ConverterTools):
|
|||||||
bz_weights = 0.5 * bz_weights
|
bz_weights = 0.5 * bz_weights
|
||||||
|
|
||||||
# Third, compute the hoppings in reciprocal space
|
# Third, compute the hoppings in reciprocal space
|
||||||
hopping = numpy.zeros([self.n_k, n_spin, numpy.max(n_orbitals), numpy.max(n_orbitals)], numpy.complex_)
|
hopping = numpy.zeros([self.n_k, n_spin, numpy.max(
|
||||||
|
n_orbitals), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
for isp in range(n_spin):
|
for isp in range(n_spin):
|
||||||
# make Fourier transform H(R) -> H(k) : it can be done one spin at a time
|
# make Fourier transform H(R) -> H(k) : it can be done one spin at
|
||||||
|
# a time
|
||||||
hamk = self.fourier_ham(self.nwfs, hamr_full[isp])
|
hamk = self.fourier_ham(self.nwfs, hamr_full[isp])
|
||||||
# copy the H(k) in the right place of hoppings... is there a better way to do this??
|
# copy the H(k) in the right place of hoppings... is there a better
|
||||||
|
# way to do this??
|
||||||
for ik in range(self.n_k):
|
for ik in range(self.n_k):
|
||||||
#hopping[ik,isp,:,:] = deepcopy(hamk[ik][:,:])*energy_unit
|
#hopping[ik,isp,:,:] = deepcopy(hamk[ik][:,:])*energy_unit
|
||||||
hopping[ik, isp, :, :] = hamk[ik][:, :] * energy_unit
|
hopping[ik, isp, :, :] = hamk[ik][:, :] * energy_unit
|
||||||
@ -309,7 +336,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
# Projectors simply consist in identity matrix blocks selecting those MLWFs that
|
# Projectors simply consist in identity matrix blocks selecting those MLWFs that
|
||||||
# correspond to the specific correlated shell indexed by icrsh.
|
# correspond to the specific correlated shell indexed by icrsh.
|
||||||
# NOTE: we assume that the correlated orbitals appear at the beginning of the H(R)
|
# NOTE: we assume that the correlated orbitals appear at the beginning of the H(R)
|
||||||
# file and that the ordering of MLWFs matches the corr_shell info from the input.
|
# file and that the ordering of MLWFs matches the corr_shell info from
|
||||||
|
# the input.
|
||||||
for icrsh in range(n_corr_shells):
|
for icrsh in range(n_corr_shells):
|
||||||
norb = corr_shells[icrsh]['dim']
|
norb = corr_shells[icrsh]['dim']
|
||||||
proj_mat[:, :, icrsh, 0:norb, iorb:iorb +
|
proj_mat[:, :, icrsh, 0:norb, iorb:iorb +
|
||||||
@ -320,7 +348,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.dft_subgrp in ar):
|
if not (self.dft_subgrp in ar):
|
||||||
ar.create_group(self.dft_subgrp)
|
ar.create_group(self.dft_subgrp)
|
||||||
# The subgroup containing the data. If it does not exist, it is created. If it exists, the data is overwritten!
|
# The subgroup containing the data. If it does not exist, it is
|
||||||
|
# created. If it exists, the data is overwritten!
|
||||||
things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
||||||
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
||||||
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
||||||
@ -373,7 +402,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
mpi.report("Could not read number of WFs or R vectors")
|
mpi.report("Could not read number of WFs or R vectors")
|
||||||
|
|
||||||
# allocate arrays to save the R vector indexes and degeneracies and the Hamiltonian
|
# allocate arrays to save the R vector indexes and degeneracies and the
|
||||||
|
# Hamiltonian
|
||||||
rvec_idx = numpy.zeros((nrpt, 3), dtype=int)
|
rvec_idx = numpy.zeros((nrpt, 3), dtype=int)
|
||||||
rvec_deg = numpy.zeros(nrpt, dtype=int)
|
rvec_deg = numpy.zeros(nrpt, dtype=int)
|
||||||
h_of_r = [numpy.zeros((num_wf, num_wf), dtype=numpy.complex_)
|
h_of_r = [numpy.zeros((num_wf, num_wf), dtype=numpy.complex_)
|
||||||
@ -383,7 +413,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
currpos = 2
|
currpos = 2
|
||||||
try:
|
try:
|
||||||
ir = 0
|
ir = 0
|
||||||
# read the degeneracy of the R vectors (needed for the Fourier transform)
|
# read the degeneracy of the R vectors (needed for the Fourier
|
||||||
|
# transform)
|
||||||
while ir < nrpt:
|
while ir < nrpt:
|
||||||
currpos += 1
|
currpos += 1
|
||||||
for x in hr_data[currpos].split():
|
for x in hr_data[currpos].split():
|
||||||
@ -540,7 +571,8 @@ class Wannier90Converter(ConverterTools):
|
|||||||
kmesh = numpy.zeros((nkpt, 3), dtype=float)
|
kmesh = numpy.zeros((nkpt, 3), dtype=float)
|
||||||
ii = 0
|
ii = 0
|
||||||
for ix, iy, iz in product(range(msize[0]), range(msize[1]), range(msize[2])):
|
for ix, iy, iz in product(range(msize[0]), range(msize[1]), range(msize[2])):
|
||||||
kmesh[ii, :] = [float(ix) / msize[0], float(iy) / msize[1], float(iz) / msize[2]]
|
kmesh[ii, :] = [float(ix) / msize[0], float(iy) /
|
||||||
|
msize[1], float(iz) / msize[2]]
|
||||||
ii += 1
|
ii += 1
|
||||||
# weight is equal for all k-points because wannier90 uses uniform grid on whole BZ
|
# weight is equal for all k-points because wannier90 uses uniform grid on whole BZ
|
||||||
# (normalization is always 1 and takes into account spin degeneracy)
|
# (normalization is always 1 and takes into account spin degeneracy)
|
||||||
@ -568,11 +600,13 @@ class Wannier90Converter(ConverterTools):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
twopi = 2 * numpy.pi
|
twopi = 2 * numpy.pi
|
||||||
h_of_k = [numpy.zeros((norb, norb), dtype=numpy.complex_) for ik in range(self.n_k)]
|
h_of_k = [numpy.zeros((norb, norb), dtype=numpy.complex_)
|
||||||
|
for ik in range(self.n_k)]
|
||||||
ridx = numpy.array(range(self.nrpt))
|
ridx = numpy.array(range(self.nrpt))
|
||||||
for ik, ir in product(range(self.n_k), ridx):
|
for ik, ir in product(range(self.n_k), ridx):
|
||||||
rdotk = twopi * numpy.dot(self.k_mesh[ik], self.rvec[ir])
|
rdotk = twopi * numpy.dot(self.k_mesh[ik], self.rvec[ir])
|
||||||
factor = (math.cos(rdotk) + 1j * math.sin(rdotk)) / float(self.rdeg[ir])
|
factor = (math.cos(rdotk) + 1j * math.sin(rdotk)) / \
|
||||||
|
float(self.rdeg[ir])
|
||||||
h_of_k[ik][:, :] += factor * h_of_r[ir][:, :]
|
h_of_k[ik][:, :] += factor * h_of_r[ir][:, :]
|
||||||
|
|
||||||
return h_of_k
|
return h_of_k
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,7 +18,7 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
|
|
||||||
from types import *
|
from types import *
|
||||||
import numpy
|
import numpy
|
||||||
@ -26,6 +26,7 @@ from pytriqs.archive import *
|
|||||||
from converter_tools import *
|
from converter_tools import *
|
||||||
import os.path
|
import os.path
|
||||||
|
|
||||||
|
|
||||||
class Wien2kConverter(ConverterTools):
|
class Wien2kConverter(ConverterTools):
|
||||||
"""
|
"""
|
||||||
Conversion from Wien2k output to an hdf5 file that can be used as input for the SumkDFT class.
|
Conversion from Wien2k output to an hdf5 file that can be used as input for the SumkDFT class.
|
||||||
@ -64,8 +65,10 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert type(filename)==StringType, "Wien2kConverter: Please provide the DFT files' base name as a string."
|
assert type(
|
||||||
if hdf_filename is None: hdf_filename = filename+'.h5'
|
filename) == StringType, "Wien2kConverter: Please provide the DFT files' base name as a string."
|
||||||
|
if hdf_filename is None:
|
||||||
|
hdf_filename = filename + '.h5'
|
||||||
self.hdf_file = hdf_filename
|
self.hdf_file = hdf_filename
|
||||||
self.dft_file = filename + '.ctqmcout'
|
self.dft_file = filename + '.ctqmcout'
|
||||||
self.symmcorr_file = filename + '.symqmc'
|
self.symmcorr_file = filename + '.symqmc'
|
||||||
@ -89,7 +92,6 @@ class Wien2kConverter(ConverterTools):
|
|||||||
if (os.path.exists(self.hdf_file) and repacking):
|
if (os.path.exists(self.hdf_file) and repacking):
|
||||||
ConverterTools.repack(self)
|
ConverterTools.repack(self)
|
||||||
|
|
||||||
|
|
||||||
def convert_dft_input(self):
|
def convert_dft_input(self):
|
||||||
"""
|
"""
|
||||||
Reads the appropriate files and stores the data for the
|
Reads the appropriate files and stores the data for the
|
||||||
@ -103,39 +105,55 @@ class Wien2kConverter(ConverterTools):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Read and write only on the master node
|
# Read and write only on the master node
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
mpi.report("Reading input from %s..." % self.dft_file)
|
mpi.report("Reading input from %s..." % self.dft_file)
|
||||||
|
|
||||||
# R is a generator : each R.Next() will return the next number in the file
|
# R is a generator : each R.Next() will return the next number in the
|
||||||
R = ConverterTools.read_fortran_file(self,self.dft_file,self.fortran_to_replace)
|
# file
|
||||||
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, self.dft_file, self.fortran_to_replace)
|
||||||
try:
|
try:
|
||||||
energy_unit = R.next() # read the energy convertion factor
|
energy_unit = R.next() # read the energy convertion factor
|
||||||
n_k = int(R.next()) # read the number of k points
|
# read the number of k points
|
||||||
|
n_k = int(R.next())
|
||||||
k_dep_projection = 1
|
k_dep_projection = 1
|
||||||
SP = int(R.next()) # flag for spin-polarised calculation
|
# flag for spin-polarised calculation
|
||||||
SO = int(R.next()) # flag for spin-orbit calculation
|
SP = int(R.next())
|
||||||
|
# flag for spin-orbit calculation
|
||||||
|
SO = int(R.next())
|
||||||
charge_below = R.next() # total charge below energy window
|
charge_below = R.next() # total charge below energy window
|
||||||
density_required = R.next() # total density required, for setting the chemical potential
|
# total density required, for setting the chemical potential
|
||||||
|
density_required = R.next()
|
||||||
symm_op = 1 # Use symmetry groups for the k-sum
|
symm_op = 1 # Use symmetry groups for the k-sum
|
||||||
|
|
||||||
# the information on the non-correlated shells is not important here, maybe skip:
|
# the information on the non-correlated shells is not important
|
||||||
n_shells = int(R.next()) # number of shells (e.g. Fe d, As p, O p) in the unit cell,
|
# here, maybe skip:
|
||||||
|
# number of shells (e.g. Fe d, As p, O p) in the unit cell,
|
||||||
|
n_shells = int(R.next())
|
||||||
# corresponds to index R in formulas
|
# corresponds to index R in formulas
|
||||||
# now read the information about the shells (atom, sort, l, dim):
|
# now read the information about the shells (atom, sort, l, dim):
|
||||||
shell_entries = ['atom', 'sort', 'l', 'dim']
|
shell_entries = ['atom', 'sort', 'l', 'dim']
|
||||||
shells = [ {name: int(val) for name, val in zip(shell_entries, R)} for ish in range(n_shells) ]
|
shells = [{name: int(val) for name, val in zip(
|
||||||
|
shell_entries, R)} for ish in range(n_shells)]
|
||||||
|
|
||||||
n_corr_shells = int(R.next()) # number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
# number of corr. shells (e.g. Fe d, Ce f) in the unit cell,
|
||||||
|
n_corr_shells = int(R.next())
|
||||||
# corresponds to index R in formulas
|
# corresponds to index R in formulas
|
||||||
# now read the information about the shells (atom, sort, l, dim, SO flag, irep):
|
# now read the information about the shells (atom, sort, l, dim, SO
|
||||||
|
# flag, irep):
|
||||||
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
||||||
corr_shells = [ {name: int(val) for name, val in zip(corr_shell_entries, R)} for icrsh in range(n_corr_shells) ]
|
corr_shells = [{name: int(val) for name, val in zip(
|
||||||
|
corr_shell_entries, R)} for icrsh in range(n_corr_shells)]
|
||||||
|
|
||||||
# determine the number of inequivalent correlated shells and maps, needed for further reading
|
# determine the number of inequivalent correlated shells and maps,
|
||||||
n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(self,corr_shells)
|
# needed for further reading
|
||||||
|
n_inequiv_shells, corr_to_inequiv, inequiv_to_corr = ConverterTools.det_shell_equivalence(
|
||||||
|
self, corr_shells)
|
||||||
|
|
||||||
use_rotations = 1
|
use_rotations = 1
|
||||||
rot_mat = [numpy.identity(corr_shells[icrsh]['dim'],numpy.complex_) for icrsh in range(n_corr_shells)]
|
rot_mat = [numpy.identity(
|
||||||
|
corr_shells[icrsh]['dim'], numpy.complex_) for icrsh in range(n_corr_shells)]
|
||||||
|
|
||||||
# read the matrices
|
# read the matrices
|
||||||
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
rot_mat_time_inv = [0 for i in range(n_corr_shells)]
|
||||||
@ -144,7 +162,8 @@ class Wien2kConverter(ConverterTools):
|
|||||||
for i in range(corr_shells[icrsh]['dim']): # read real part:
|
for i in range(corr_shells[icrsh]['dim']): # read real part:
|
||||||
for j in range(corr_shells[icrsh]['dim']):
|
for j in range(corr_shells[icrsh]['dim']):
|
||||||
rot_mat[icrsh][i, j] = R.next()
|
rot_mat[icrsh][i, j] = R.next()
|
||||||
for i in range(corr_shells[icrsh]['dim']): # read imaginary part:
|
# read imaginary part:
|
||||||
|
for i in range(corr_shells[icrsh]['dim']):
|
||||||
for j in range(corr_shells[icrsh]['dim']):
|
for j in range(corr_shells[icrsh]['dim']):
|
||||||
rot_mat[icrsh][i, j] += 1j * R.next()
|
rot_mat[icrsh][i, j] += 1j * R.next()
|
||||||
|
|
||||||
@ -156,8 +175,10 @@ class Wien2kConverter(ConverterTools):
|
|||||||
dim_reps = [0 for i in range(n_inequiv_shells)]
|
dim_reps = [0 for i in range(n_inequiv_shells)]
|
||||||
T = []
|
T = []
|
||||||
for ish in range(n_inequiv_shells):
|
for ish in range(n_inequiv_shells):
|
||||||
n_reps[ish] = int(R.next()) # number of representatives ("subsets"), e.g. t2g and eg
|
# number of representatives ("subsets"), e.g. t2g and eg
|
||||||
dim_reps[ish] = [int(R.next()) for i in range(n_reps[ish])] # dimensions of the subsets
|
n_reps[ish] = int(R.next())
|
||||||
|
dim_reps[ish] = [int(R.next()) for i in range(
|
||||||
|
n_reps[ish])] # dimensions of the subsets
|
||||||
|
|
||||||
# The transformation matrix:
|
# The transformation matrix:
|
||||||
# is of dimension 2l+1 without SO, and 2*(2l+1) with SO!
|
# is of dimension 2l+1 without SO, and 2*(2l+1) with SO!
|
||||||
@ -183,13 +204,15 @@ class Wien2kConverter(ConverterTools):
|
|||||||
n_orbitals[ik, isp] = int(R.next())
|
n_orbitals[ik, isp] = int(R.next())
|
||||||
|
|
||||||
# Initialise the projectors:
|
# Initialise the projectors:
|
||||||
proj_mat = numpy.zeros([n_k,n_spin_blocs,n_corr_shells,max([crsh['dim'] for crsh in corr_shells]),numpy.max(n_orbitals)],numpy.complex_)
|
proj_mat = numpy.zeros([n_k, n_spin_blocs, n_corr_shells, max(
|
||||||
|
[crsh['dim'] for crsh in corr_shells]), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
# Read the projectors from the file:
|
# Read the projectors from the file:
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
for icrsh in range(n_corr_shells):
|
for icrsh in range(n_corr_shells):
|
||||||
n_orb = corr_shells[icrsh]['dim']
|
n_orb = corr_shells[icrsh]['dim']
|
||||||
# first Real part for BOTH spins, due to conventions in dmftproj:
|
# first Real part for BOTH spins, due to conventions in
|
||||||
|
# dmftproj:
|
||||||
for isp in range(n_spin_blocs):
|
for isp in range(n_spin_blocs):
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
for j in range(n_orbitals[ik][isp]):
|
for j in range(n_orbitals[ik][isp]):
|
||||||
@ -201,18 +224,22 @@ class Wien2kConverter(ConverterTools):
|
|||||||
proj_mat[ik, isp, icrsh, i, j] += 1j * R.next()
|
proj_mat[ik, isp, icrsh, i, j] += 1j * R.next()
|
||||||
|
|
||||||
# now define the arrays for weights and hopping ...
|
# now define the arrays for weights and hopping ...
|
||||||
bz_weights = numpy.ones([n_k],numpy.float_)/ float(n_k) # w(k_index), default normalisation
|
# w(k_index), default normalisation
|
||||||
hopping = numpy.zeros([n_k,n_spin_blocs,numpy.max(n_orbitals),numpy.max(n_orbitals)],numpy.complex_)
|
bz_weights = numpy.ones([n_k], numpy.float_) / float(n_k)
|
||||||
|
hopping = numpy.zeros([n_k, n_spin_blocs, numpy.max(
|
||||||
|
n_orbitals), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
# weights in the file
|
# weights in the file
|
||||||
for ik in range(n_k) : bz_weights[ik] = R.next()
|
for ik in range(n_k):
|
||||||
|
bz_weights[ik] = R.next()
|
||||||
|
|
||||||
# if the sum over spins is in the weights, take it out again!!
|
# if the sum over spins is in the weights, take it out again!!
|
||||||
sm = sum(bz_weights)
|
sm = sum(bz_weights)
|
||||||
bz_weights[:] /= sm
|
bz_weights[:] /= sm
|
||||||
|
|
||||||
# Grab the H
|
# Grab the H
|
||||||
# we use now the convention of a DIAGONAL Hamiltonian -- convention for Wien2K.
|
# we use now the convention of a DIAGONAL Hamiltonian -- convention
|
||||||
|
# for Wien2K.
|
||||||
for isp in range(n_spin_blocs):
|
for isp in range(n_spin_blocs):
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
n_orb = n_orbitals[ik, isp]
|
n_orb = n_orbitals[ik, isp]
|
||||||
@ -220,8 +247,10 @@ class Wien2kConverter(ConverterTools):
|
|||||||
hopping[ik, isp, i, i] = R.next() * energy_unit
|
hopping[ik, isp, i, i] = R.next() * energy_unit
|
||||||
|
|
||||||
# keep some things that we need for reading parproj:
|
# keep some things that we need for reading parproj:
|
||||||
things_to_set = ['n_shells','shells','n_corr_shells','corr_shells','n_spin_blocs','n_orbitals','n_k','SO','SP','energy_unit']
|
things_to_set = ['n_shells', 'shells', 'n_corr_shells', 'corr_shells',
|
||||||
for it in things_to_set: setattr(self,it,locals()[it])
|
'n_spin_blocs', 'n_orbitals', 'n_k', 'SO', 'SP', 'energy_unit']
|
||||||
|
for it in things_to_set:
|
||||||
|
setattr(self, it, locals()[it])
|
||||||
except StopIteration: # a more explicit error if the file is corrupted.
|
except StopIteration: # a more explicit error if the file is corrupted.
|
||||||
raise "Wien2k_converter : reading file %s failed!" % self.dft_file
|
raise "Wien2k_converter : reading file %s failed!" % self.dft_file
|
||||||
|
|
||||||
@ -230,20 +259,24 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
# Save it to the HDF:
|
# Save it to the HDF:
|
||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.dft_subgrp in ar): ar.create_group(self.dft_subgrp)
|
if not (self.dft_subgrp in ar):
|
||||||
# The subgroup containing the data. If it does not exist, it is created. If it exists, the data is overwritten!
|
ar.create_group(self.dft_subgrp)
|
||||||
|
# The subgroup containing the data. If it does not exist, it is
|
||||||
|
# created. If it exists, the data is overwritten!
|
||||||
things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
things_to_save = ['energy_unit', 'n_k', 'k_dep_projection', 'SP', 'SO', 'charge_below', 'density_required',
|
||||||
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
'symm_op', 'n_shells', 'shells', 'n_corr_shells', 'corr_shells', 'use_rotations', 'rot_mat',
|
||||||
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
'rot_mat_time_inv', 'n_reps', 'dim_reps', 'T', 'n_orbitals', 'proj_mat', 'bz_weights', 'hopping',
|
||||||
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr']
|
'n_inequiv_shells', 'corr_to_inequiv', 'inequiv_to_corr']
|
||||||
for it in things_to_save: ar[self.dft_subgrp][it] = locals()[it]
|
for it in things_to_save:
|
||||||
|
ar[self.dft_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
# Symmetries are used, so now convert symmetry information for *correlated* orbitals:
|
# Symmetries are used, so now convert symmetry information for
|
||||||
self.convert_symmetry_input(orbits=self.corr_shells,symm_file=self.symmcorr_file,symm_subgrp=self.symmcorr_subgrp,SO=self.SO,SP=self.SP)
|
# *correlated* orbitals:
|
||||||
|
self.convert_symmetry_input(orbits=self.corr_shells, symm_file=self.symmcorr_file,
|
||||||
|
symm_subgrp=self.symmcorr_subgrp, SO=self.SO, SP=self.SP)
|
||||||
self.convert_misc_input()
|
self.convert_misc_input()
|
||||||
|
|
||||||
|
|
||||||
def convert_parproj_input(self):
|
def convert_parproj_input(self):
|
||||||
"""
|
"""
|
||||||
Reads the appropriate files and stores the data for the
|
Reads the appropriate files and stores the data for the
|
||||||
@ -255,14 +288,17 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
|
||||||
# get needed data from hdf file
|
# get needed data from hdf file
|
||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
things_to_read = ['SP','SO','n_shells','n_k','n_orbitals','shells']
|
things_to_read = ['SP', 'SO', 'n_shells',
|
||||||
|
'n_k', 'n_orbitals', 'shells']
|
||||||
|
|
||||||
for it in things_to_read:
|
for it in things_to_read:
|
||||||
if not hasattr(self,it): setattr(self,it,ar[self.dft_subgrp][it])
|
if not hasattr(self, it):
|
||||||
|
setattr(self, it, ar[self.dft_subgrp][it])
|
||||||
self.n_spin_blocs = self.SP + 1 - self.SO
|
self.n_spin_blocs = self.SP + 1 - self.SO
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
@ -271,15 +307,18 @@ class Wien2kConverter(ConverterTools):
|
|||||||
dens_mat_below = [[numpy.zeros([self.shells[ish]['dim'], self.shells[ish]['dim']], numpy.complex_) for ish in range(self.n_shells)]
|
dens_mat_below = [[numpy.zeros([self.shells[ish]['dim'], self.shells[ish]['dim']], numpy.complex_) for ish in range(self.n_shells)]
|
||||||
for isp in range(self.n_spin_blocs)]
|
for isp in range(self.n_spin_blocs)]
|
||||||
|
|
||||||
R = ConverterTools.read_fortran_file(self,self.parproj_file,self.fortran_to_replace)
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, self.parproj_file, self.fortran_to_replace)
|
||||||
|
|
||||||
n_parproj = [int(R.next()) for i in range(self.n_shells)]
|
n_parproj = [int(R.next()) for i in range(self.n_shells)]
|
||||||
n_parproj = numpy.array(n_parproj)
|
n_parproj = numpy.array(n_parproj)
|
||||||
|
|
||||||
# Initialise P, here a double list of matrices:
|
# Initialise P, here a double list of matrices:
|
||||||
proj_mat_all = numpy.zeros([self.n_k,self.n_spin_blocs,self.n_shells,max(n_parproj),max([sh['dim'] for sh in self.shells]),max(self.n_orbitals)],numpy.complex_)
|
proj_mat_all = numpy.zeros([self.n_k, self.n_spin_blocs, self.n_shells, max(
|
||||||
|
n_parproj), max([sh['dim'] for sh in self.shells]), max(self.n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
rot_mat_all = [numpy.identity(self.shells[ish]['dim'],numpy.complex_) for ish in range(self.n_shells)]
|
rot_mat_all = [numpy.identity(
|
||||||
|
self.shells[ish]['dim'], numpy.complex_) for ish in range(self.n_shells)]
|
||||||
rot_mat_all_time_inv = [0 for i in range(self.n_shells)]
|
rot_mat_all_time_inv = [0 for i in range(self.n_shells)]
|
||||||
|
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
@ -288,26 +327,31 @@ class Wien2kConverter(ConverterTools):
|
|||||||
for ir in range(n_parproj[ish]):
|
for ir in range(n_parproj[ish]):
|
||||||
|
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(self.shells[ish]['dim']): # read real part:
|
# read real part:
|
||||||
|
for i in range(self.shells[ish]['dim']):
|
||||||
for j in range(self.n_orbitals[ik][isp]):
|
for j in range(self.n_orbitals[ik][isp]):
|
||||||
proj_mat_all[ik, isp, ish, ir, i, j] = R.next()
|
proj_mat_all[ik, isp, ish, ir, i, j] = R.next()
|
||||||
|
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(self.shells[ish]['dim']): # read imaginary part:
|
# read imaginary part:
|
||||||
|
for i in range(self.shells[ish]['dim']):
|
||||||
for j in range(self.n_orbitals[ik][isp]):
|
for j in range(self.n_orbitals[ik][isp]):
|
||||||
proj_mat_all[ik,isp,ish,ir,i,j] += 1j * R.next()
|
proj_mat_all[ik, isp, ish,
|
||||||
|
ir, i, j] += 1j * R.next()
|
||||||
|
|
||||||
|
# now read the Density Matrix for this orbital below the energy
|
||||||
# now read the Density Matrix for this orbital below the energy window:
|
# window:
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(self.shells[ish]['dim']): # read real part:
|
for i in range(self.shells[ish]['dim']): # read real part:
|
||||||
for j in range(self.shells[ish]['dim']):
|
for j in range(self.shells[ish]['dim']):
|
||||||
dens_mat_below[isp][ish][i, j] = R.next()
|
dens_mat_below[isp][ish][i, j] = R.next()
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(self.shells[ish]['dim']): # read imaginary part:
|
# read imaginary part:
|
||||||
|
for i in range(self.shells[ish]['dim']):
|
||||||
for j in range(self.shells[ish]['dim']):
|
for j in range(self.shells[ish]['dim']):
|
||||||
dens_mat_below[isp][ish][i, j] += 1j * R.next()
|
dens_mat_below[isp][ish][i, j] += 1j * R.next()
|
||||||
if (self.SP==0): dens_mat_below[isp][ish] /= 2.0
|
if (self.SP == 0):
|
||||||
|
dens_mat_below[isp][ish] /= 2.0
|
||||||
|
|
||||||
# Global -> local rotation matrix for this shell:
|
# Global -> local rotation matrix for this shell:
|
||||||
for i in range(self.shells[ish]['dim']): # read real part:
|
for i in range(self.shells[ish]['dim']): # read real part:
|
||||||
@ -325,15 +369,20 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
# Save it to the HDF:
|
# Save it to the HDF:
|
||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.parproj_subgrp in ar): ar.create_group(self.parproj_subgrp)
|
if not (self.parproj_subgrp in ar):
|
||||||
# The subgroup containing the data. If it does not exist, it is created. If it exists, the data is overwritten!
|
ar.create_group(self.parproj_subgrp)
|
||||||
things_to_save = ['dens_mat_below','n_parproj','proj_mat_all','rot_mat_all','rot_mat_all_time_inv']
|
# The subgroup containing the data. If it does not exist, it is
|
||||||
for it in things_to_save: ar[self.parproj_subgrp][it] = locals()[it]
|
# created. If it exists, the data is overwritten!
|
||||||
|
things_to_save = ['dens_mat_below', 'n_parproj',
|
||||||
|
'proj_mat_all', 'rot_mat_all', 'rot_mat_all_time_inv']
|
||||||
|
for it in things_to_save:
|
||||||
|
ar[self.parproj_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
# Symmetries are used, so now convert symmetry information for *all* orbitals:
|
# Symmetries are used, so now convert symmetry information for *all*
|
||||||
self.convert_symmetry_input(orbits=self.shells,symm_file=self.symmpar_file,symm_subgrp=self.symmpar_subgrp,SO=self.SO,SP=self.SP)
|
# orbitals:
|
||||||
|
self.convert_symmetry_input(orbits=self.shells, symm_file=self.symmpar_file,
|
||||||
|
symm_subgrp=self.symmpar_subgrp, SO=self.SO, SP=self.SP)
|
||||||
|
|
||||||
def convert_bands_input(self):
|
def convert_bands_input(self):
|
||||||
"""
|
"""
|
||||||
@ -341,20 +390,24 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# get needed data from hdf file
|
# get needed data from hdf file
|
||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
things_to_read = ['SP','SO','n_corr_shells','n_shells','corr_shells','shells','energy_unit']
|
things_to_read = ['SP', 'SO', 'n_corr_shells',
|
||||||
|
'n_shells', 'corr_shells', 'shells', 'energy_unit']
|
||||||
|
|
||||||
for it in things_to_read:
|
for it in things_to_read:
|
||||||
if not hasattr(self,it): setattr(self,it,ar[self.dft_subgrp][it])
|
if not hasattr(self, it):
|
||||||
|
setattr(self, it, ar[self.dft_subgrp][it])
|
||||||
self.n_spin_blocs = self.SP + 1 - self.SO
|
self.n_spin_blocs = self.SP + 1 - self.SO
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
mpi.report("Reading input from %s..." % self.band_file)
|
mpi.report("Reading input from %s..." % self.band_file)
|
||||||
R = ConverterTools.read_fortran_file(self,self.band_file,self.fortran_to_replace)
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, self.band_file, self.fortran_to_replace)
|
||||||
n_k = int(R.next())
|
n_k = int(R.next())
|
||||||
|
|
||||||
# read the list of n_orbitals for all k points
|
# read the list of n_orbitals for all k points
|
||||||
@ -364,13 +417,15 @@ class Wien2kConverter(ConverterTools):
|
|||||||
n_orbitals[ik, isp] = int(R.next())
|
n_orbitals[ik, isp] = int(R.next())
|
||||||
|
|
||||||
# Initialise the projectors:
|
# Initialise the projectors:
|
||||||
proj_mat = numpy.zeros([n_k,self.n_spin_blocs,self.n_corr_shells,max([crsh['dim'] for crsh in self.corr_shells]),numpy.max(n_orbitals)],numpy.complex_)
|
proj_mat = numpy.zeros([n_k, self.n_spin_blocs, self.n_corr_shells, max(
|
||||||
|
[crsh['dim'] for crsh in self.corr_shells]), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
# Read the projectors from the file:
|
# Read the projectors from the file:
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
for icrsh in range(self.n_corr_shells):
|
for icrsh in range(self.n_corr_shells):
|
||||||
n_orb = self.corr_shells[icrsh]['dim']
|
n_orb = self.corr_shells[icrsh]['dim']
|
||||||
# first Real part for BOTH spins, due to conventions in dmftproj:
|
# first Real part for BOTH spins, due to conventions in
|
||||||
|
# dmftproj:
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
for i in range(n_orb):
|
for i in range(n_orb):
|
||||||
for j in range(n_orbitals[ik, isp]):
|
for j in range(n_orbitals[ik, isp]):
|
||||||
@ -381,7 +436,8 @@ class Wien2kConverter(ConverterTools):
|
|||||||
for j in range(n_orbitals[ik, isp]):
|
for j in range(n_orbitals[ik, isp]):
|
||||||
proj_mat[ik, isp, icrsh, i, j] += 1j * R.next()
|
proj_mat[ik, isp, icrsh, i, j] += 1j * R.next()
|
||||||
|
|
||||||
hopping = numpy.zeros([n_k,self.n_spin_blocs,numpy.max(n_orbitals),numpy.max(n_orbitals)],numpy.complex_)
|
hopping = numpy.zeros([n_k, self.n_spin_blocs, numpy.max(
|
||||||
|
n_orbitals), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
# Grab the H
|
# Grab the H
|
||||||
# we use now the convention of a DIAGONAL Hamiltonian!!!!
|
# we use now the convention of a DIAGONAL Hamiltonian!!!!
|
||||||
@ -396,20 +452,25 @@ class Wien2kConverter(ConverterTools):
|
|||||||
n_parproj = numpy.array(n_parproj)
|
n_parproj = numpy.array(n_parproj)
|
||||||
|
|
||||||
# Initialise P, here a double list of matrices:
|
# Initialise P, here a double list of matrices:
|
||||||
proj_mat_all = numpy.zeros([n_k,self.n_spin_blocs,self.n_shells,max(n_parproj),max([sh['dim'] for sh in self.shells]),numpy.max(n_orbitals)],numpy.complex_)
|
proj_mat_all = numpy.zeros([n_k, self.n_spin_blocs, self.n_shells, max(n_parproj), max(
|
||||||
|
[sh['dim'] for sh in self.shells]), numpy.max(n_orbitals)], numpy.complex_)
|
||||||
|
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
for ik in range(n_k):
|
for ik in range(n_k):
|
||||||
for ir in range(n_parproj[ish]):
|
for ir in range(n_parproj[ish]):
|
||||||
for isp in range(self.n_spin_blocs):
|
for isp in range(self.n_spin_blocs):
|
||||||
|
|
||||||
for i in range(self.shells[ish]['dim']): # read real part:
|
# read real part:
|
||||||
|
for i in range(self.shells[ish]['dim']):
|
||||||
for j in range(n_orbitals[ik, isp]):
|
for j in range(n_orbitals[ik, isp]):
|
||||||
proj_mat_all[ik,isp,ish,ir,i,j] = R.next()
|
proj_mat_all[ik, isp, ish,
|
||||||
|
ir, i, j] = R.next()
|
||||||
|
|
||||||
for i in range(self.shells[ish]['dim']): # read imaginary part:
|
# read imaginary part:
|
||||||
|
for i in range(self.shells[ish]['dim']):
|
||||||
for j in range(n_orbitals[ik, isp]):
|
for j in range(n_orbitals[ik, isp]):
|
||||||
proj_mat_all[ik,isp,ish,ir,i,j] += 1j * R.next()
|
proj_mat_all[ik, isp, ish,
|
||||||
|
ir, i, j] += 1j * R.next()
|
||||||
|
|
||||||
R.close()
|
R.close()
|
||||||
|
|
||||||
@ -422,13 +483,16 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
# Save it to the HDF:
|
# Save it to the HDF:
|
||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.bands_subgrp in ar): ar.create_group(self.bands_subgrp)
|
if not (self.bands_subgrp in ar):
|
||||||
# The subgroup containing the data. If it does not exist, it is created. If it exists, the data is overwritten!
|
ar.create_group(self.bands_subgrp)
|
||||||
things_to_save = ['n_k','n_orbitals','proj_mat','hopping','n_parproj','proj_mat_all']
|
# The subgroup containing the data. If it does not exist, it is
|
||||||
for it in things_to_save: ar[self.bands_subgrp][it] = locals()[it]
|
# created. If it exists, the data is overwritten!
|
||||||
|
things_to_save = ['n_k', 'n_orbitals', 'proj_mat',
|
||||||
|
'hopping', 'n_parproj', 'proj_mat_all']
|
||||||
|
for it in things_to_save:
|
||||||
|
ar[self.bands_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
|
|
||||||
def convert_misc_input(self):
|
def convert_misc_input(self):
|
||||||
"""
|
"""
|
||||||
Reads additional information on:
|
Reads additional information on:
|
||||||
@ -442,11 +506,13 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
|
||||||
# Check if SP, SO and n_k are already in h5
|
# Check if SP, SO and n_k are already in h5
|
||||||
ar = HDFArchive(self.hdf_file, 'r')
|
ar = HDFArchive(self.hdf_file, 'r')
|
||||||
if not (self.dft_subgrp in ar): raise IOError, "convert_misc_input: No %s subgroup in hdf file found! Call convert_dft_input first." %self.dft_subgrp
|
if not (self.dft_subgrp in ar):
|
||||||
|
raise IOError, "convert_misc_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp
|
||||||
SP = ar[self.dft_subgrp]['SP']
|
SP = ar[self.dft_subgrp]['SP']
|
||||||
SO = ar[self.dft_subgrp]['SO']
|
SO = ar[self.dft_subgrp]['SO']
|
||||||
n_k = ar[self.dft_subgrp]['n_k']
|
n_k = ar[self.dft_subgrp]['n_k']
|
||||||
@ -470,11 +536,14 @@ class Wien2kConverter(ConverterTools):
|
|||||||
for isp, f in enumerate(files):
|
for isp, f in enumerate(files):
|
||||||
if os.path.exists(f):
|
if os.path.exists(f):
|
||||||
mpi.report("Reading input from %s..." % f)
|
mpi.report("Reading input from %s..." % f)
|
||||||
R = ConverterTools.read_fortran_file(self, f, self.fortran_to_replace)
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, f, self.fortran_to_replace)
|
||||||
n_k_oubwin = int(R.next())
|
n_k_oubwin = int(R.next())
|
||||||
if (n_k_oubwin != n_k):
|
if (n_k_oubwin != n_k):
|
||||||
mpi.report("convert_misc_input : WARNING : n_k in case.oubwin is different from n_k in case.klist")
|
mpi.report(
|
||||||
assert int(R.next()) == SO, "convert_misc_input: SO is inconsistent in oubwin file!"
|
"convert_misc_input : WARNING : n_k in case.oubwin is different from n_k in case.klist")
|
||||||
|
assert int(
|
||||||
|
R.next()) == SO, "convert_misc_input: SO is inconsistent in oubwin file!"
|
||||||
|
|
||||||
band_window[isp] = numpy.zeros((n_k_oubwin, 2), dtype=int)
|
band_window[isp] = numpy.zeros((n_k_oubwin, 2), dtype=int)
|
||||||
for ik in xrange(n_k_oubwin):
|
for ik in xrange(n_k_oubwin):
|
||||||
@ -501,15 +570,19 @@ class Wien2kConverter(ConverterTools):
|
|||||||
lattice_type = R.readline().split()[0]
|
lattice_type = R.readline().split()[0]
|
||||||
R.readline()
|
R.readline()
|
||||||
temp = R.readline()
|
temp = R.readline()
|
||||||
lattice_constants = numpy.array([float(temp[0+10*i:10+10*i].strip()) for i in range(3)])
|
lattice_constants = numpy.array(
|
||||||
lattice_angles = numpy.array([float(temp[30+10*i:40+10*i].strip()) for i in range(3)]) * numpy.pi / 180.0
|
[float(temp[0 + 10 * i:10 + 10 * i].strip()) for i in range(3)])
|
||||||
things_to_save.extend(['lattice_type', 'lattice_constants', 'lattice_angles'])
|
lattice_angles = numpy.array(
|
||||||
|
[float(temp[30 + 10 * i:40 + 10 * i].strip()) for i in range(3)]) * numpy.pi / 180.0
|
||||||
|
things_to_save.extend(
|
||||||
|
['lattice_type', 'lattice_constants', 'lattice_angles'])
|
||||||
except IOError:
|
except IOError:
|
||||||
raise "convert_misc_input: reading file %s failed" % self.struct_file
|
raise "convert_misc_input: reading file %s failed" % self.struct_file
|
||||||
|
|
||||||
# Read relevant data from .outputs file
|
# Read relevant data from .outputs file
|
||||||
#######################################
|
#######################################
|
||||||
# rot_symmetries: matrix representation of all (space group) symmetry operations
|
# rot_symmetries: matrix representation of all (space group) symmetry
|
||||||
|
# operations
|
||||||
|
|
||||||
if (os.path.exists(self.outputs_file)):
|
if (os.path.exists(self.outputs_file)):
|
||||||
mpi.report("Reading input from %s..." % self.outputs_file)
|
mpi.report("Reading input from %s..." % self.outputs_file)
|
||||||
@ -524,7 +597,8 @@ class Wien2kConverter(ConverterTools):
|
|||||||
break
|
break
|
||||||
for i in range(n_symmetries):
|
for i in range(n_symmetries):
|
||||||
while 1:
|
while 1:
|
||||||
if (R.readline().strip().split()[0] == 'Symmetry'): break
|
if (R.readline().strip().split()[0] == 'Symmetry'):
|
||||||
|
break
|
||||||
sym_i = numpy.zeros((3, 3), dtype=float)
|
sym_i = numpy.zeros((3, 3), dtype=float)
|
||||||
for ir in range(3):
|
for ir in range(3):
|
||||||
temp = R.readline().strip().split()
|
temp = R.readline().strip().split()
|
||||||
@ -539,11 +613,12 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
# Save it to the HDF:
|
# Save it to the HDF:
|
||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.misc_subgrp in ar): ar.create_group(self.misc_subgrp)
|
if not (self.misc_subgrp in ar):
|
||||||
for it in things_to_save: ar[self.misc_subgrp][it] = locals()[it]
|
ar.create_group(self.misc_subgrp)
|
||||||
|
for it in things_to_save:
|
||||||
|
ar[self.misc_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
|
|
||||||
def convert_transport_input(self):
|
def convert_transport_input(self):
|
||||||
"""
|
"""
|
||||||
Reads the necessary information for transport calculations on:
|
Reads the necessary information for transport calculations on:
|
||||||
@ -554,11 +629,13 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
|
||||||
# Check if SP, SO and n_k are already in h5
|
# Check if SP, SO and n_k are already in h5
|
||||||
ar = HDFArchive(self.hdf_file, 'r')
|
ar = HDFArchive(self.hdf_file, 'r')
|
||||||
if not (self.dft_subgrp in ar): raise IOError, "convert_transport_input: No %s subgroup in hdf file found! Call convert_dft_input first." %self.dft_subgrp
|
if not (self.dft_subgrp in ar):
|
||||||
|
raise IOError, "convert_transport_input: No %s subgroup in hdf file found! Call convert_dft_input first." % self.dft_subgrp
|
||||||
SP = ar[self.dft_subgrp]['SP']
|
SP = ar[self.dft_subgrp]['SP']
|
||||||
SO = ar[self.dft_subgrp]['SO']
|
SO = ar[self.dft_subgrp]['SO']
|
||||||
n_k = ar[self.dft_subgrp]['n_k']
|
n_k = ar[self.dft_subgrp]['n_k']
|
||||||
@ -581,10 +658,12 @@ class Wien2kConverter(ConverterTools):
|
|||||||
velocities_k = [[] for f in files]
|
velocities_k = [[] for f in files]
|
||||||
band_window_optics = []
|
band_window_optics = []
|
||||||
for isp, f in enumerate(files):
|
for isp, f in enumerate(files):
|
||||||
if not os.path.exists(f) : raise IOError, "convert_transport_input: File %s does not exist" %f
|
if not os.path.exists(f):
|
||||||
|
raise IOError, "convert_transport_input: File %s does not exist" % f
|
||||||
mpi.report("Reading input from %s..." % f)
|
mpi.report("Reading input from %s..." % f)
|
||||||
|
|
||||||
R = ConverterTools.read_fortran_file(self, f, {'D':'E','(':'',')':'',',':' '})
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, f, {'D': 'E', '(': '', ')': '', ',': ' '})
|
||||||
band_window_optics_isp = []
|
band_window_optics_isp = []
|
||||||
for ik in xrange(n_k):
|
for ik in xrange(n_k):
|
||||||
R.next()
|
R.next()
|
||||||
@ -592,26 +671,34 @@ class Wien2kConverter(ConverterTools):
|
|||||||
nu2 = int(R.next())
|
nu2 = int(R.next())
|
||||||
band_window_optics_isp.append((nu1, nu2))
|
band_window_optics_isp.append((nu1, nu2))
|
||||||
n_bands = nu2 - nu1 + 1
|
n_bands = nu2 - nu1 + 1
|
||||||
for _ in range(4): R.next()
|
for _ in range(4):
|
||||||
|
R.next()
|
||||||
if n_bands <= 0:
|
if n_bands <= 0:
|
||||||
velocity_xyz = numpy.zeros((1, 1, 3), dtype=complex)
|
velocity_xyz = numpy.zeros((1, 1, 3), dtype=complex)
|
||||||
else:
|
else:
|
||||||
velocity_xyz = numpy.zeros((n_bands, n_bands, 3), dtype = complex)
|
velocity_xyz = numpy.zeros(
|
||||||
|
(n_bands, n_bands, 3), dtype=complex)
|
||||||
for nu_i in range(n_bands):
|
for nu_i in range(n_bands):
|
||||||
for nu_j in range(nu_i, n_bands):
|
for nu_j in range(nu_i, n_bands):
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
velocity_xyz[nu_i][nu_j][i] = R.next() + R.next()*1j
|
velocity_xyz[nu_i][nu_j][
|
||||||
if (nu_i != nu_j): velocity_xyz[nu_j][nu_i][i] = velocity_xyz[nu_i][nu_j][i].conjugate()
|
i] = R.next() + R.next() * 1j
|
||||||
|
if (nu_i != nu_j):
|
||||||
|
velocity_xyz[nu_j][nu_i][i] = velocity_xyz[
|
||||||
|
nu_i][nu_j][i].conjugate()
|
||||||
velocities_k[isp].append(velocity_xyz)
|
velocities_k[isp].append(velocity_xyz)
|
||||||
band_window_optics.append(numpy.array(band_window_optics_isp))
|
band_window_optics.append(numpy.array(band_window_optics_isp))
|
||||||
R.close() # Reading done!
|
R.close() # Reading done!
|
||||||
|
|
||||||
# Put data to HDF5 file
|
# Put data to HDF5 file
|
||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (self.transp_subgrp in ar): ar.create_group(self.transp_subgrp)
|
if not (self.transp_subgrp in ar):
|
||||||
# The subgroup containing the data. If it does not exist, it is created. If it exists, the data is overwritten!!!
|
ar.create_group(self.transp_subgrp)
|
||||||
|
# The subgroup containing the data. If it does not exist, it is
|
||||||
|
# created. If it exists, the data is overwritten!!!
|
||||||
things_to_save = ['band_window_optics', 'velocities_k']
|
things_to_save = ['band_window_optics', 'velocities_k']
|
||||||
for it in things_to_save: ar[self.transp_subgrp][it] = locals()[it]
|
for it in things_to_save:
|
||||||
|
ar[self.transp_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
def convert_symmetry_input(self, orbits, symm_file, symm_subgrp, SO, SP):
|
def convert_symmetry_input(self, orbits, symm_file, symm_subgrp, SO, SP):
|
||||||
@ -635,19 +722,23 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
mpi.report("Reading input from %s..." % symm_file)
|
mpi.report("Reading input from %s..." % symm_file)
|
||||||
|
|
||||||
n_orbits = len(orbits)
|
n_orbits = len(orbits)
|
||||||
|
|
||||||
R = ConverterTools.read_fortran_file(self,symm_file,self.fortran_to_replace)
|
R = ConverterTools.read_fortran_file(
|
||||||
|
self, symm_file, self.fortran_to_replace)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
n_symm = int(R.next()) # Number of symmetry operations
|
n_symm = int(R.next()) # Number of symmetry operations
|
||||||
n_atoms = int(R.next()) # number of atoms involved
|
n_atoms = int(R.next()) # number of atoms involved
|
||||||
perm = [ [int(R.next()) for i in range(n_atoms)] for j in range(n_symm) ] # list of permutations of the atoms
|
perm = [[int(R.next()) for i in range(n_atoms)]
|
||||||
|
for j in range(n_symm)] # list of permutations of the atoms
|
||||||
if SP:
|
if SP:
|
||||||
time_inv = [ int(R.next()) for j in range(n_symm) ] # time inversion for SO coupling
|
# time inversion for SO coupling
|
||||||
|
time_inv = [int(R.next()) for j in range(n_symm)]
|
||||||
else:
|
else:
|
||||||
time_inv = [0 for j in range(n_symm)]
|
time_inv = [0 for j in range(n_symm)]
|
||||||
|
|
||||||
@ -655,29 +746,33 @@ class Wien2kConverter(ConverterTools):
|
|||||||
mat = []
|
mat = []
|
||||||
for i_symm in range(n_symm):
|
for i_symm in range(n_symm):
|
||||||
|
|
||||||
mat.append( [ numpy.zeros([orbits[orb]['dim'], orbits[orb]['dim']],numpy.complex_) for orb in range(n_orbits) ] )
|
mat.append([numpy.zeros([orbits[orb]['dim'], orbits[orb][
|
||||||
|
'dim']], numpy.complex_) for orb in range(n_orbits)])
|
||||||
for orb in range(n_orbits):
|
for orb in range(n_orbits):
|
||||||
for i in range(orbits[orb]['dim']):
|
for i in range(orbits[orb]['dim']):
|
||||||
for j in range(orbits[orb]['dim']):
|
for j in range(orbits[orb]['dim']):
|
||||||
mat[i_symm][orb][i,j] = R.next() # real part
|
# real part
|
||||||
|
mat[i_symm][orb][i, j] = R.next()
|
||||||
for i in range(orbits[orb]['dim']):
|
for i in range(orbits[orb]['dim']):
|
||||||
for j in range(orbits[orb]['dim']):
|
for j in range(orbits[orb]['dim']):
|
||||||
mat[i_symm][orb][i,j] += 1j * R.next() # imaginary part
|
mat[i_symm][orb][i, j] += 1j * \
|
||||||
|
R.next() # imaginary part
|
||||||
|
|
||||||
mat_tinv = [numpy.identity(orbits[orb]['dim'], numpy.complex_)
|
mat_tinv = [numpy.identity(orbits[orb]['dim'], numpy.complex_)
|
||||||
for orb in range(n_orbits)]
|
for orb in range(n_orbits)]
|
||||||
|
|
||||||
if ((SO == 0) and (SP == 0)):
|
if ((SO == 0) and (SP == 0)):
|
||||||
# here we need an additional time inversion operation, so read it:
|
# here we need an additional time inversion operation, so read
|
||||||
|
# it:
|
||||||
for orb in range(n_orbits):
|
for orb in range(n_orbits):
|
||||||
for i in range(orbits[orb]['dim']):
|
for i in range(orbits[orb]['dim']):
|
||||||
for j in range(orbits[orb]['dim']):
|
for j in range(orbits[orb]['dim']):
|
||||||
mat_tinv[orb][i,j] = R.next() # real part
|
# real part
|
||||||
|
mat_tinv[orb][i, j] = R.next()
|
||||||
for i in range(orbits[orb]['dim']):
|
for i in range(orbits[orb]['dim']):
|
||||||
for j in range(orbits[orb]['dim']):
|
for j in range(orbits[orb]['dim']):
|
||||||
mat_tinv[orb][i,j] += 1j * R.next() # imaginary part
|
mat_tinv[orb][i, j] += 1j * \
|
||||||
|
R.next() # imaginary part
|
||||||
|
|
||||||
|
|
||||||
except StopIteration: # a more explicit error if the file is corrupted.
|
except StopIteration: # a more explicit error if the file is corrupted.
|
||||||
raise "Wien2k_converter : reading file symm_file failed!"
|
raise "Wien2k_converter : reading file symm_file failed!"
|
||||||
@ -687,7 +782,10 @@ class Wien2kConverter(ConverterTools):
|
|||||||
|
|
||||||
# Save it to the HDF:
|
# Save it to the HDF:
|
||||||
ar = HDFArchive(self.hdf_file, 'a')
|
ar = HDFArchive(self.hdf_file, 'a')
|
||||||
if not (symm_subgrp in ar): ar.create_group(symm_subgrp)
|
if not (symm_subgrp in ar):
|
||||||
things_to_save = ['n_symm','n_atoms','perm','orbits','SO','SP','time_inv','mat','mat_tinv']
|
ar.create_group(symm_subgrp)
|
||||||
for it in things_to_save: ar[symm_subgrp][it] = locals()[it]
|
things_to_save = ['n_symm', 'n_atoms', 'perm',
|
||||||
|
'orbits', 'SO', 'SP', 'time_inv', 'mat', 'mat_tinv']
|
||||||
|
for it in things_to_save:
|
||||||
|
ar[symm_subgrp][it] = locals()[it]
|
||||||
del ar
|
del ar
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
|||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -17,7 +17,7 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
import sys
|
import sys
|
||||||
from types import *
|
from types import *
|
||||||
import numpy
|
import numpy
|
||||||
@ -28,12 +28,12 @@ from sumk_dft import SumkDFT
|
|||||||
from scipy.integrate import *
|
from scipy.integrate import *
|
||||||
from scipy.interpolate import *
|
from scipy.interpolate import *
|
||||||
|
|
||||||
|
|
||||||
class SumkDFTTools(SumkDFT):
|
class SumkDFTTools(SumkDFT):
|
||||||
"""
|
"""
|
||||||
Extends the SumkDFT class with some tools for analysing the data.
|
Extends the SumkDFT class with some tools for analysing the data.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def __init__(self, hdf_file, h_field=0.0, use_dft_blocks=False, dft_data='dft_input', symmcorr_data='dft_symmcorr_input',
|
def __init__(self, hdf_file, h_field=0.0, use_dft_blocks=False, dft_data='dft_input', symmcorr_data='dft_symmcorr_input',
|
||||||
parproj_data='dft_parproj_input', symmpar_data='dft_symmpar_input', bands_data='dft_bands_input',
|
parproj_data='dft_parproj_input', symmpar_data='dft_symmpar_input', bands_data='dft_bands_input',
|
||||||
transp_data='dft_transp_input', misc_data='dft_misc_input'):
|
transp_data='dft_transp_input', misc_data='dft_misc_input'):
|
||||||
@ -46,7 +46,6 @@ class SumkDFTTools(SumkDFT):
|
|||||||
symmpar_data=symmpar_data, bands_data=bands_data, transp_data=transp_data,
|
symmpar_data=symmpar_data, bands_data=bands_data, transp_data=transp_data,
|
||||||
misc_data=misc_data)
|
misc_data=misc_data)
|
||||||
|
|
||||||
|
|
||||||
# Uses .data of only GfReFreq objects.
|
# Uses .data of only GfReFreq objects.
|
||||||
def dos_wannier_basis(self, mu=None, broadening=None, mesh=None, with_Sigma=True, with_dc=True, save_to_file=True):
|
def dos_wannier_basis(self, mu=None, broadening=None, mesh=None, with_Sigma=True, with_dc=True, save_to_file=True):
|
||||||
"""
|
"""
|
||||||
@ -92,78 +91,100 @@ class SumkDFTTools(SumkDFT):
|
|||||||
G_loc = []
|
G_loc = []
|
||||||
for icrsh in range(self.n_corr_shells):
|
for icrsh in range(self.n_corr_shells):
|
||||||
spn = self.spin_block_names[self.corr_shells[icrsh]['SO']]
|
spn = self.spin_block_names[self.corr_shells[icrsh]['SO']]
|
||||||
glist = [ GfReFreq(indices = inner, window = (om_min,om_max), n_points = n_om) for block,inner in self.gf_struct_sumk[icrsh]]
|
glist = [GfReFreq(indices=inner, window=(om_min, om_max), n_points=n_om)
|
||||||
G_loc.append(BlockGf(name_list = spn, block_list = glist, make_copies=False))
|
for block, inner in self.gf_struct_sumk[icrsh]]
|
||||||
for icrsh in range(self.n_corr_shells): G_loc[icrsh].zero()
|
G_loc.append(
|
||||||
|
BlockGf(name_list=spn, block_list=glist, make_copies=False))
|
||||||
|
for icrsh in range(self.n_corr_shells):
|
||||||
|
G_loc[icrsh].zero()
|
||||||
|
|
||||||
DOS = { sp: numpy.zeros([n_om],numpy.float_) for sp in self.spin_block_names[self.SO] }
|
DOS = {sp: numpy.zeros([n_om], numpy.float_)
|
||||||
|
for sp in self.spin_block_names[self.SO]}
|
||||||
DOSproj = [{} for ish in range(self.n_inequiv_shells)]
|
DOSproj = [{} for ish in range(self.n_inequiv_shells)]
|
||||||
DOSproj_orb = [{} for ish in range(self.n_inequiv_shells)]
|
DOSproj_orb = [{} for ish in range(self.n_inequiv_shells)]
|
||||||
for ish in range(self.n_inequiv_shells):
|
for ish in range(self.n_inequiv_shells):
|
||||||
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]:
|
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]:
|
||||||
dim = self.corr_shells[self.inequiv_to_corr[ish]]['dim']
|
dim = self.corr_shells[self.inequiv_to_corr[ish]]['dim']
|
||||||
DOSproj[ish][sp] = numpy.zeros([n_om], numpy.float_)
|
DOSproj[ish][sp] = numpy.zeros([n_om], numpy.float_)
|
||||||
DOSproj_orb[ish][sp] = numpy.zeros([n_om,dim,dim],numpy.float_)
|
DOSproj_orb[ish][sp] = numpy.zeros(
|
||||||
|
[n_om, dim, dim], numpy.complex_)
|
||||||
|
|
||||||
ikarray = numpy.array(range(self.n_k))
|
ikarray = numpy.array(range(self.n_k))
|
||||||
for ik in mpi.slice_array(ikarray):
|
for ik in mpi.slice_array(ikarray):
|
||||||
|
|
||||||
G_latt_w = self.lattice_gf(ik=ik,mu=mu,iw_or_w="w",broadening=broadening,mesh=mesh,with_Sigma=with_Sigma,with_dc=with_dc)
|
G_latt_w = self.lattice_gf(
|
||||||
|
ik=ik, mu=mu, iw_or_w="w", broadening=broadening, mesh=mesh, with_Sigma=with_Sigma, with_dc=with_dc)
|
||||||
G_latt_w *= self.bz_weights[ik]
|
G_latt_w *= self.bz_weights[ik]
|
||||||
|
|
||||||
# Non-projected DOS
|
# Non-projected DOS
|
||||||
for iom in range(n_om):
|
for iom in range(n_om):
|
||||||
for bname, gf in G_latt_w:
|
for bname, gf in G_latt_w:
|
||||||
DOS[bname][iom] -= gf.data[iom,:,:].imag.trace()/numpy.pi
|
DOS[bname][iom] -= gf.data[iom, :, :].imag.trace() / \
|
||||||
|
numpy.pi
|
||||||
|
|
||||||
# Projected DOS:
|
# Projected DOS:
|
||||||
for icrsh in range(self.n_corr_shells):
|
for icrsh in range(self.n_corr_shells):
|
||||||
tmp = G_loc[icrsh].copy()
|
tmp = G_loc[icrsh].copy()
|
||||||
for bname,gf in tmp: tmp[bname] << self.downfold(ik,icrsh,bname,G_latt_w[bname],gf) # downfolding G
|
for bname, gf in tmp:
|
||||||
|
tmp[bname] << self.downfold(ik, icrsh, bname, G_latt_w[
|
||||||
|
bname], gf) # downfolding G
|
||||||
G_loc[icrsh] += tmp
|
G_loc[icrsh] += tmp
|
||||||
|
|
||||||
# Collect data from mpi:
|
# Collect data from mpi:
|
||||||
for bname in DOS:
|
for bname in DOS:
|
||||||
DOS[bname] = mpi.all_reduce(mpi.world, DOS[bname], lambda x,y : x+y)
|
DOS[bname] = mpi.all_reduce(
|
||||||
|
mpi.world, DOS[bname], lambda x, y: x + y)
|
||||||
for icrsh in range(self.n_corr_shells):
|
for icrsh in range(self.n_corr_shells):
|
||||||
G_loc[icrsh] << mpi.all_reduce(mpi.world, G_loc[icrsh], lambda x,y : x+y)
|
G_loc[icrsh] << mpi.all_reduce(
|
||||||
|
mpi.world, G_loc[icrsh], lambda x, y: x + y)
|
||||||
mpi.barrier()
|
mpi.barrier()
|
||||||
|
|
||||||
# Symmetrize and rotate to local coord. system if needed:
|
# Symmetrize and rotate to local coord. system if needed:
|
||||||
if self.symm_op != 0: G_loc = self.symmcorr.symmetrize(G_loc)
|
if self.symm_op != 0:
|
||||||
|
G_loc = self.symmcorr.symmetrize(G_loc)
|
||||||
if self.use_rotations:
|
if self.use_rotations:
|
||||||
for icrsh in range(self.n_corr_shells):
|
for icrsh in range(self.n_corr_shells):
|
||||||
for bname,gf in G_loc[icrsh]: G_loc[icrsh][bname] << self.rotloc(icrsh,gf,direction='toLocal')
|
for bname, gf in G_loc[icrsh]:
|
||||||
|
G_loc[icrsh][bname] << self.rotloc(
|
||||||
|
icrsh, gf, direction='toLocal')
|
||||||
|
|
||||||
# G_loc can now also be used to look at orbitally-resolved quantities
|
# G_loc can now also be used to look at orbitally-resolved quantities
|
||||||
for ish in range(self.n_inequiv_shells):
|
for ish in range(self.n_inequiv_shells):
|
||||||
for bname, gf in G_loc[self.inequiv_to_corr[ish]]: # loop over spins
|
for bname, gf in G_loc[self.inequiv_to_corr[ish]]: # loop over spins
|
||||||
for iom in range(n_om): DOSproj[ish][bname][iom] -= gf.data[iom,:,:].imag.trace()/numpy.pi
|
for iom in range(n_om):
|
||||||
DOSproj_orb[ish][bname][:,:,:] -= gf.data[:,:,:].imag/numpy.pi
|
DOSproj[ish][bname][iom] -= gf.data[iom,
|
||||||
|
:, :].imag.trace() / numpy.pi
|
||||||
|
DOSproj_orb[ish][bname][
|
||||||
|
:, :, :] += (1.0j*(gf-gf.conjugate().transpose())/2.0/numpy.pi).data[:,:,:]
|
||||||
|
|
||||||
# Write to files
|
# Write to files
|
||||||
if save_to_file and mpi.is_master_node():
|
if save_to_file and mpi.is_master_node():
|
||||||
for sp in self.spin_block_names[self.SO]:
|
for sp in self.spin_block_names[self.SO]:
|
||||||
f = open('DOS_wann_%s.dat' % sp, 'w')
|
f = open('DOS_wann_%s.dat' % sp, 'w')
|
||||||
for iom in range(n_om): f.write("%s %s\n"%(om_mesh[iom],DOS[sp][iom]))
|
for iom in range(n_om):
|
||||||
|
f.write("%s %s\n" % (om_mesh[iom], DOS[sp][iom]))
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
# Partial
|
# Partial
|
||||||
for ish in range(self.n_inequiv_shells):
|
for ish in range(self.n_inequiv_shells):
|
||||||
f = open('DOS_wann_%s_proj%s.dat' % (sp, ish), 'w')
|
f = open('DOS_wann_%s_proj%s.dat' % (sp, ish), 'w')
|
||||||
for iom in range(n_om): f.write("%s %s\n"%(om_mesh[iom],DOSproj[ish][sp][iom]))
|
for iom in range(n_om):
|
||||||
|
f.write("%s %s\n" %
|
||||||
|
(om_mesh[iom], DOSproj[ish][sp][iom]))
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
# Orbitally-resolved
|
# Orbitally-resolved
|
||||||
for i in range(self.corr_shells[self.inequiv_to_corr[ish]]['dim']):
|
for i in range(self.corr_shells[self.inequiv_to_corr[ish]]['dim']):
|
||||||
for j in range(i, self.corr_shells[self.inequiv_to_corr[ish]]['dim']):
|
for j in range(i, self.corr_shells[self.inequiv_to_corr[ish]]['dim']):
|
||||||
f = open('DOS_wann_'+sp+'_proj'+str(ish)+'_'+str(i)+'_'+str(j)+'.dat','w')
|
f = open('DOS_wann_' + sp + '_proj' + str(ish) +
|
||||||
for iom in range(n_om): f.write("%s %s\n"%(om_mesh[iom],DOSproj_orb[ish][sp][iom,i,j]))
|
'_' + str(i) + '_' + str(j) + '.dat', 'w')
|
||||||
|
for iom in range(n_om):
|
||||||
|
f.write("%s %s %s\n" % (
|
||||||
|
om_mesh[iom], DOSproj_orb[ish][sp][iom, i, j].real,DOSproj_orb[ish][sp][iom, i, j].imag))
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
return DOS, DOSproj, DOSproj_orb
|
return DOS, DOSproj, DOSproj_orb
|
||||||
|
|
||||||
|
|
||||||
# Uses .data of only GfReFreq objects.
|
# Uses .data of only GfReFreq objects.
|
||||||
def dos_parproj_basis(self, mu=None, broadening=None, mesh=None, with_Sigma=True, with_dc=True, save_to_file=True):
|
def dos_parproj_basis(self, mu=None, broadening=None, mesh=None, with_Sigma=True, with_dc=True, save_to_file=True):
|
||||||
"""
|
"""
|
||||||
@ -196,11 +217,14 @@ class SumkDFTTools(SumkDFT):
|
|||||||
DOS projected to atoms and resolved into orbital contributions.
|
DOS projected to atoms and resolved into orbital contributions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
things_to_read = ['n_parproj', 'proj_mat_all',
|
||||||
things_to_read = ['n_parproj','proj_mat_all','rot_mat_all','rot_mat_all_time_inv']
|
'rot_mat_all', 'rot_mat_all_time_inv']
|
||||||
value_read = self.read_input_from_hdf(subgrp=self.parproj_data,things_to_read = things_to_read)
|
value_read = self.read_input_from_hdf(
|
||||||
if not value_read: return value_read
|
subgrp=self.parproj_data, things_to_read=things_to_read)
|
||||||
if self.symm_op: self.symmpar = Symmetry(self.hdf_file,subgroup=self.symmpar_data)
|
if not value_read:
|
||||||
|
return value_read
|
||||||
|
if self.symm_op:
|
||||||
|
self.symmpar = Symmetry(self.hdf_file, subgroup=self.symmpar_data)
|
||||||
|
|
||||||
if (mesh is None) and (not with_Sigma):
|
if (mesh is None) and (not with_Sigma):
|
||||||
raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq."
|
raise ValueError, "lattice_gf: Give the mesh=(om_min,om_max,n_points) for the lattice GfReFreq."
|
||||||
@ -219,79 +243,101 @@ class SumkDFTTools(SumkDFT):
|
|||||||
gf_struct_parproj = [[(sp, range(self.shells[ish]['dim'])) for sp in spn]
|
gf_struct_parproj = [[(sp, range(self.shells[ish]['dim'])) for sp in spn]
|
||||||
for ish in range(self.n_shells)]
|
for ish in range(self.n_shells)]
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
glist = [ GfReFreq(indices = inner, window = (om_min,om_max), n_points = n_om) for block,inner in gf_struct_parproj[ish] ]
|
glist = [GfReFreq(indices=inner, window=(om_min, om_max), n_points=n_om)
|
||||||
G_loc.append(BlockGf(name_list = spn, block_list = glist, make_copies=False))
|
for block, inner in gf_struct_parproj[ish]]
|
||||||
for ish in range(self.n_shells): G_loc[ish].zero()
|
G_loc.append(
|
||||||
|
BlockGf(name_list=spn, block_list=glist, make_copies=False))
|
||||||
|
for ish in range(self.n_shells):
|
||||||
|
G_loc[ish].zero()
|
||||||
|
|
||||||
DOS = { sp: numpy.zeros([n_om],numpy.float_) for sp in self.spin_block_names[self.SO] }
|
DOS = {sp: numpy.zeros([n_om], numpy.float_)
|
||||||
|
for sp in self.spin_block_names[self.SO]}
|
||||||
DOSproj = [{} for ish in range(self.n_shells)]
|
DOSproj = [{} for ish in range(self.n_shells)]
|
||||||
DOSproj_orb = [{} for ish in range(self.n_shells)]
|
DOSproj_orb = [{} for ish in range(self.n_shells)]
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
for sp in self.spin_block_names[self.SO]:
|
for sp in self.spin_block_names[self.SO]:
|
||||||
dim = self.shells[ish]['dim']
|
dim = self.shells[ish]['dim']
|
||||||
DOSproj[ish][sp] = numpy.zeros([n_om], numpy.float_)
|
DOSproj[ish][sp] = numpy.zeros([n_om], numpy.float_)
|
||||||
DOSproj_orb[ish][sp] = numpy.zeros([n_om,dim,dim],numpy.float_)
|
DOSproj_orb[ish][sp] = numpy.zeros(
|
||||||
|
[n_om, dim, dim], numpy.complex_)
|
||||||
|
|
||||||
ikarray = numpy.array(range(self.n_k))
|
ikarray = numpy.array(range(self.n_k))
|
||||||
for ik in mpi.slice_array(ikarray):
|
for ik in mpi.slice_array(ikarray):
|
||||||
|
|
||||||
G_latt_w = self.lattice_gf(ik=ik,mu=mu,iw_or_w="w",broadening=broadening,mesh=mesh,with_Sigma=with_Sigma,with_dc=with_dc)
|
G_latt_w = self.lattice_gf(
|
||||||
|
ik=ik, mu=mu, iw_or_w="w", broadening=broadening, mesh=mesh, with_Sigma=with_Sigma, with_dc=with_dc)
|
||||||
G_latt_w *= self.bz_weights[ik]
|
G_latt_w *= self.bz_weights[ik]
|
||||||
|
|
||||||
# Non-projected DOS
|
# Non-projected DOS
|
||||||
for iom in range(n_om):
|
for iom in range(n_om):
|
||||||
for bname, gf in G_latt_w:
|
for bname, gf in G_latt_w:
|
||||||
DOS[bname][iom] -= gf.data[iom,:,:].imag.trace()/numpy.pi
|
DOS[bname][iom] -= gf.data[iom, :, :].imag.trace() / \
|
||||||
|
numpy.pi
|
||||||
|
|
||||||
# Projected DOS:
|
# Projected DOS:
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
tmp = G_loc[ish].copy()
|
tmp = G_loc[ish].copy()
|
||||||
for ir in range(self.n_parproj[ish]):
|
for ir in range(self.n_parproj[ish]):
|
||||||
for bname,gf in tmp: tmp[bname] << self.downfold(ik,ish,bname,G_latt_w[bname],gf,shells='all',ir=ir)
|
for bname, gf in tmp:
|
||||||
|
tmp[bname] << self.downfold(ik, ish, bname, G_latt_w[
|
||||||
|
bname], gf, shells='all', ir=ir)
|
||||||
G_loc[ish] += tmp
|
G_loc[ish] += tmp
|
||||||
|
|
||||||
# Collect data from mpi:
|
# Collect data from mpi:
|
||||||
for bname in DOS:
|
for bname in DOS:
|
||||||
DOS[bname] = mpi.all_reduce(mpi.world, DOS[bname], lambda x,y : x+y)
|
DOS[bname] = mpi.all_reduce(
|
||||||
|
mpi.world, DOS[bname], lambda x, y: x + y)
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
G_loc[ish] << mpi.all_reduce(mpi.world, G_loc[ish], lambda x,y : x+y)
|
G_loc[ish] << mpi.all_reduce(
|
||||||
|
mpi.world, G_loc[ish], lambda x, y: x + y)
|
||||||
mpi.barrier()
|
mpi.barrier()
|
||||||
|
|
||||||
# Symmetrize and rotate to local coord. system if needed:
|
# Symmetrize and rotate to local coord. system if needed:
|
||||||
if self.symm_op != 0: G_loc = self.symmpar.symmetrize(G_loc)
|
if self.symm_op != 0:
|
||||||
|
G_loc = self.symmpar.symmetrize(G_loc)
|
||||||
if self.use_rotations:
|
if self.use_rotations:
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
for bname,gf in G_loc[ish]: G_loc[ish][bname] << self.rotloc(ish,gf,direction='toLocal',shells='all')
|
for bname, gf in G_loc[ish]:
|
||||||
|
G_loc[ish][bname] << self.rotloc(
|
||||||
|
ish, gf, direction='toLocal', shells='all')
|
||||||
|
|
||||||
# G_loc can now also be used to look at orbitally-resolved quantities
|
# G_loc can now also be used to look at orbitally-resolved quantities
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
for bname, gf in G_loc[ish]:
|
for bname, gf in G_loc[ish]:
|
||||||
for iom in range(n_om): DOSproj[ish][bname][iom] -= gf.data[iom,:,:].imag.trace()/numpy.pi
|
for iom in range(n_om):
|
||||||
DOSproj_orb[ish][bname][:,:,:] -= gf.data[:,:,:].imag/numpy.pi
|
DOSproj[ish][bname][iom] -= gf.data[iom,
|
||||||
|
:, :].imag.trace() / numpy.pi
|
||||||
|
DOSproj_orb[ish][bname][
|
||||||
|
:, :, :] += (1.0j*(gf-gf.conjugate().transpose())/2.0/numpy.pi).data[:,:,:]
|
||||||
|
|
||||||
# Write to files
|
# Write to files
|
||||||
if save_to_file and mpi.is_master_node():
|
if save_to_file and mpi.is_master_node():
|
||||||
for sp in self.spin_block_names[self.SO]:
|
for sp in self.spin_block_names[self.SO]:
|
||||||
f = open('DOS_parproj_%s.dat' % sp, 'w')
|
f = open('DOS_parproj_%s.dat' % sp, 'w')
|
||||||
for iom in range(n_om): f.write("%s %s\n"%(om_mesh[iom],DOS[sp][iom]))
|
for iom in range(n_om):
|
||||||
|
f.write("%s %s\n" % (om_mesh[iom], DOS[sp][iom]))
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
# Partial
|
# Partial
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
f = open('DOS_parproj_%s_proj%s.dat' % (sp, ish), 'w')
|
f = open('DOS_parproj_%s_proj%s.dat' % (sp, ish), 'w')
|
||||||
for iom in range(n_om): f.write("%s %s\n"%(om_mesh[iom],DOSproj[ish][sp][iom]))
|
for iom in range(n_om):
|
||||||
|
f.write("%s %s\n" %
|
||||||
|
(om_mesh[iom], DOSproj[ish][sp][iom]))
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
# Orbitally-resolved
|
# Orbitally-resolved
|
||||||
for i in range(self.shells[ish]['dim']):
|
for i in range(self.shells[ish]['dim']):
|
||||||
for j in range(i, self.shells[ish]['dim']):
|
for j in range(i, self.shells[ish]['dim']):
|
||||||
f = open('DOS_parproj_'+sp+'_proj'+str(ish)+'_'+str(i)+'_'+str(j)+'.dat','w')
|
f = open('DOS_parproj_' + sp + '_proj' + str(ish) +
|
||||||
for iom in range(n_om): f.write("%s %s\n"%(om_mesh[iom],DOSproj_orb[ish][sp][iom,i,j]))
|
'_' + str(i) + '_' + str(j) + '.dat', 'w')
|
||||||
|
for iom in range(n_om):
|
||||||
|
f.write("%s %s %s\n" % (
|
||||||
|
om_mesh[iom], DOSproj_orb[ish][sp][iom, i, j].real,DOSproj_orb[ish][sp][iom, i, j].imag))
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
return DOS, DOSproj, DOSproj_orb
|
return DOS, DOSproj, DOSproj_orb
|
||||||
|
|
||||||
|
|
||||||
# Uses .data of only GfReFreq objects.
|
# Uses .data of only GfReFreq objects.
|
||||||
def spaghettis(self, broadening=None, plot_shift=0.0, plot_range=None, ishell=None, mu=None, save_to_file='Akw_'):
|
def spaghettis(self, broadening=None, plot_shift=0.0, plot_range=None, ishell=None, mu=None, save_to_file='Akw_'):
|
||||||
"""
|
"""
|
||||||
@ -318,16 +364,23 @@ class SumkDFTTools(SumkDFT):
|
|||||||
Data as it is also written to the files.
|
Data as it is also written to the files.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
assert hasattr(
|
||||||
assert hasattr(self,"Sigma_imp_w"), "spaghettis: Set Sigma_imp_w first."
|
self, "Sigma_imp_w"), "spaghettis: Set Sigma_imp_w first."
|
||||||
things_to_read = ['n_k','n_orbitals','proj_mat','hopping','n_parproj','proj_mat_all']
|
things_to_read = ['n_k', 'n_orbitals', 'proj_mat',
|
||||||
value_read = self.read_input_from_hdf(subgrp=self.bands_data,things_to_read=things_to_read)
|
'hopping', 'n_parproj', 'proj_mat_all']
|
||||||
if not value_read: return value_read
|
value_read = self.read_input_from_hdf(
|
||||||
|
subgrp=self.bands_data, things_to_read=things_to_read)
|
||||||
|
if not value_read:
|
||||||
|
return value_read
|
||||||
|
if ishell is not None:
|
||||||
things_to_read = ['rot_mat_all', 'rot_mat_all_time_inv']
|
things_to_read = ['rot_mat_all', 'rot_mat_all_time_inv']
|
||||||
value_read = self.read_input_from_hdf(subgrp=self.parproj_data,things_to_read = things_to_read)
|
value_read = self.read_input_from_hdf(
|
||||||
if not value_read: return value_read
|
subgrp=self.parproj_data, things_to_read=things_to_read)
|
||||||
|
if not value_read:
|
||||||
|
return value_read
|
||||||
|
|
||||||
if mu is None: mu = self.chemical_potential
|
if mu is None:
|
||||||
|
mu = self.chemical_potential
|
||||||
spn = self.spin_block_names[self.SO]
|
spn = self.spin_block_names[self.SO]
|
||||||
mesh = [x.real for x in self.Sigma_imp_w[0].mesh]
|
mesh = [x.real for x in self.Sigma_imp_w[0].mesh]
|
||||||
n_om = len(mesh)
|
n_om = len(mesh)
|
||||||
@ -340,12 +393,15 @@ class SumkDFTTools(SumkDFT):
|
|||||||
om_maxplot = plot_range[1]
|
om_maxplot = plot_range[1]
|
||||||
|
|
||||||
if ishell is None:
|
if ishell is None:
|
||||||
Akw = { sp: numpy.zeros([self.n_k,n_om],numpy.float_) for sp in spn }
|
Akw = {sp: numpy.zeros([self.n_k, n_om], numpy.float_)
|
||||||
|
for sp in spn}
|
||||||
else:
|
else:
|
||||||
Akw = { sp: numpy.zeros([self.shells[ishell]['dim'],self.n_k,n_om],numpy.float_) for sp in spn }
|
Akw = {sp: numpy.zeros(
|
||||||
|
[self.shells[ishell]['dim'], self.n_k, n_om], numpy.float_) for sp in spn}
|
||||||
|
|
||||||
if not ishell is None:
|
if not ishell is None:
|
||||||
gf_struct_parproj = [ (sp, range(self.shells[ishell]['dim'])) for sp in spn ]
|
gf_struct_parproj = [
|
||||||
|
(sp, range(self.shells[ishell]['dim'])) for sp in spn]
|
||||||
G_loc = BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=self.Sigma_imp_w[0].mesh))
|
G_loc = BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=self.Sigma_imp_w[0].mesh))
|
||||||
for block, inner in gf_struct_parproj], make_copies=False)
|
for block, inner in gf_struct_parproj], make_copies=False)
|
||||||
G_loc.zero()
|
G_loc.zero()
|
||||||
@ -353,32 +409,42 @@ class SumkDFTTools(SumkDFT):
|
|||||||
ikarray = numpy.array(range(self.n_k))
|
ikarray = numpy.array(range(self.n_k))
|
||||||
for ik in mpi.slice_array(ikarray):
|
for ik in mpi.slice_array(ikarray):
|
||||||
|
|
||||||
G_latt_w = self.lattice_gf(ik=ik,mu=mu,iw_or_w="w",broadening=broadening)
|
G_latt_w = self.lattice_gf(
|
||||||
|
ik=ik, mu=mu, iw_or_w="w", broadening=broadening)
|
||||||
|
|
||||||
if ishell is None:
|
if ishell is None:
|
||||||
# Non-projected A(k,w)
|
# Non-projected A(k,w)
|
||||||
for iom in range(n_om):
|
for iom in range(n_om):
|
||||||
if (mesh[iom] > om_minplot) and (mesh[iom] < om_maxplot):
|
if (mesh[iom] > om_minplot) and (mesh[iom] < om_maxplot):
|
||||||
for bname,gf in G_latt_w: Akw[bname][ik,iom] += gf.data[iom,:,:].imag.trace()/(-1.0*numpy.pi)
|
for bname, gf in G_latt_w:
|
||||||
Akw[bname][ik,iom] += ik*plot_shift # shift Akw for plotting stacked k-resolved eps(k) curves
|
Akw[bname][ik, iom] += gf.data[iom, :,
|
||||||
|
:].imag.trace() / (-1.0 * numpy.pi)
|
||||||
|
# shift Akw for plotting stacked k-resolved eps(k)
|
||||||
|
# curves
|
||||||
|
Akw[bname][ik, iom] += ik * plot_shift
|
||||||
|
|
||||||
else: # ishell not None
|
else: # ishell not None
|
||||||
# Projected A(k,w):
|
# Projected A(k,w):
|
||||||
G_loc.zero()
|
G_loc.zero()
|
||||||
tmp = G_loc.copy()
|
tmp = G_loc.copy()
|
||||||
for ir in range(self.n_parproj[ishell]):
|
for ir in range(self.n_parproj[ishell]):
|
||||||
for bname,gf in tmp: tmp[bname] << self.downfold(ik,ishell,bname,G_latt_w[bname],gf,shells='all',ir=ir)
|
for bname, gf in tmp:
|
||||||
|
tmp[bname] << self.downfold(ik, ishell, bname, G_latt_w[
|
||||||
|
bname], gf, shells='all', ir=ir)
|
||||||
G_loc += tmp
|
G_loc += tmp
|
||||||
|
|
||||||
# Rotate to local frame
|
# Rotate to local frame
|
||||||
if self.use_rotations:
|
if self.use_rotations:
|
||||||
for bname,gf in G_loc: G_loc[bname] << self.rotloc(ishell,gf,direction='toLocal',shells='all')
|
for bname, gf in G_loc:
|
||||||
|
G_loc[bname] << self.rotloc(
|
||||||
|
ishell, gf, direction='toLocal', shells='all')
|
||||||
|
|
||||||
for iom in range(n_om):
|
for iom in range(n_om):
|
||||||
if (mesh[iom] > om_minplot) and (mesh[iom] < om_maxplot):
|
if (mesh[iom] > om_minplot) and (mesh[iom] < om_maxplot):
|
||||||
for ish in range(self.shells[ishell]['dim']):
|
for ish in range(self.shells[ishell]['dim']):
|
||||||
for sp in spn:
|
for sp in spn:
|
||||||
Akw[sp][ish,ik,iom] = G_loc[sp].data[iom,ish,ish].imag/(-1.0*numpy.pi)
|
Akw[sp][ish, ik, iom] = G_loc[sp].data[
|
||||||
|
iom, ish, ish].imag / (-1.0 * numpy.pi)
|
||||||
|
|
||||||
# Collect data from mpi
|
# Collect data from mpi
|
||||||
for sp in spn:
|
for sp in spn:
|
||||||
@ -388,28 +454,35 @@ class SumkDFTTools(SumkDFT):
|
|||||||
if save_to_file and mpi.is_master_node():
|
if save_to_file and mpi.is_master_node():
|
||||||
if ishell is None:
|
if ishell is None:
|
||||||
for sp in spn: # loop over GF blocs:
|
for sp in spn: # loop over GF blocs:
|
||||||
f = open(save_to_file+sp+'.dat','w') # Open file for storage:
|
# Open file for storage:
|
||||||
|
f = open(save_to_file + sp + '.dat', 'w')
|
||||||
for ik in range(self.n_k):
|
for ik in range(self.n_k):
|
||||||
for iom in range(n_om):
|
for iom in range(n_om):
|
||||||
if (mesh[iom] > om_minplot) and (mesh[iom] < om_maxplot):
|
if (mesh[iom] > om_minplot) and (mesh[iom] < om_maxplot):
|
||||||
if plot_shift > 0.0001:
|
if plot_shift > 0.0001:
|
||||||
f.write('%s %s\n'%(mesh[iom],Akw[sp][ik,iom]))
|
f.write('%s %s\n' %
|
||||||
|
(mesh[iom], Akw[sp][ik, iom]))
|
||||||
else:
|
else:
|
||||||
f.write('%s %s %s\n'%(ik,mesh[iom],Akw[sp][ik,iom]))
|
f.write('%s %s %s\n' %
|
||||||
|
(ik, mesh[iom], Akw[sp][ik, iom]))
|
||||||
f.write('\n')
|
f.write('\n')
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
else: # ishell is not None
|
else: # ishell is not None
|
||||||
for sp in spn:
|
for sp in spn:
|
||||||
for ish in range(self.shells[ishell]['dim']):
|
for ish in range(self.shells[ishell]['dim']):
|
||||||
f = open(save_to_file+str(ishell)+'_'+sp+'_proj'+str(ish)+'.dat','w') # Open file for storage:
|
# Open file for storage:
|
||||||
|
f = open(save_to_file + str(ishell) + '_' +
|
||||||
|
sp + '_proj' + str(ish) + '.dat', 'w')
|
||||||
for ik in range(self.n_k):
|
for ik in range(self.n_k):
|
||||||
for iom in range(n_om):
|
for iom in range(n_om):
|
||||||
if (mesh[iom] > om_minplot) and (mesh[iom] < om_maxplot):
|
if (mesh[iom] > om_minplot) and (mesh[iom] < om_maxplot):
|
||||||
if plot_shift > 0.0001:
|
if plot_shift > 0.0001:
|
||||||
f.write('%s %s\n'%(mesh[iom],Akw[sp][ish,ik,iom]))
|
f.write('%s %s\n' % (
|
||||||
|
mesh[iom], Akw[sp][ish, ik, iom]))
|
||||||
else:
|
else:
|
||||||
f.write('%s %s %s\n'%(ik,mesh[iom],Akw[sp][ish,ik,iom]))
|
f.write('%s %s %s\n' % (
|
||||||
|
ik, mesh[iom], Akw[sp][ish, ik, iom]))
|
||||||
f.write('\n')
|
f.write('\n')
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
@ -439,10 +512,14 @@ class SumkDFTTools(SumkDFT):
|
|||||||
A list of density matrices projected to all shells provided in the input.
|
A list of density matrices projected to all shells provided in the input.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
things_to_read = ['dens_mat_below','n_parproj','proj_mat_all','rot_mat_all','rot_mat_all_time_inv']
|
things_to_read = ['dens_mat_below', 'n_parproj',
|
||||||
value_read = self.read_input_from_hdf(subgrp=self.parproj_data,things_to_read = things_to_read)
|
'proj_mat_all', 'rot_mat_all', 'rot_mat_all_time_inv']
|
||||||
if not value_read: return value_read
|
value_read = self.read_input_from_hdf(
|
||||||
if self.symm_op: self.symmpar = Symmetry(self.hdf_file,subgroup=self.symmpar_data)
|
subgrp=self.parproj_data, things_to_read=things_to_read)
|
||||||
|
if not value_read:
|
||||||
|
return value_read
|
||||||
|
if self.symm_op:
|
||||||
|
self.symmpar = Symmetry(self.hdf_file, subgroup=self.symmpar_data)
|
||||||
|
|
||||||
spn = self.spin_block_names[self.SO]
|
spn = self.spin_block_names[self.SO]
|
||||||
ntoi = self.spin_names_to_ind[self.SO]
|
ntoi = self.spin_names_to_ind[self.SO]
|
||||||
@ -462,29 +539,37 @@ class SumkDFTTools(SumkDFT):
|
|||||||
G_loc = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, beta=beta))
|
G_loc = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, beta=beta))
|
||||||
for block, inner in gf_struct_parproj[ish]], make_copies=False)
|
for block, inner in gf_struct_parproj[ish]], make_copies=False)
|
||||||
for ish in range(self.n_shells)]
|
for ish in range(self.n_shells)]
|
||||||
for ish in range(self.n_shells): G_loc[ish].zero()
|
for ish in range(self.n_shells):
|
||||||
|
G_loc[ish].zero()
|
||||||
|
|
||||||
ikarray = numpy.array(range(self.n_k))
|
ikarray = numpy.array(range(self.n_k))
|
||||||
for ik in mpi.slice_array(ikarray):
|
for ik in mpi.slice_array(ikarray):
|
||||||
|
|
||||||
G_latt_iw = self.lattice_gf(ik=ik,mu=mu,iw_or_w="iw",beta=beta,with_Sigma=with_Sigma,with_dc=with_dc)
|
G_latt_iw = self.lattice_gf(
|
||||||
|
ik=ik, mu=mu, iw_or_w="iw", beta=beta, with_Sigma=with_Sigma, with_dc=with_dc)
|
||||||
G_latt_iw *= self.bz_weights[ik]
|
G_latt_iw *= self.bz_weights[ik]
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
tmp = G_loc[ish].copy()
|
tmp = G_loc[ish].copy()
|
||||||
for ir in range(self.n_parproj[ish]):
|
for ir in range(self.n_parproj[ish]):
|
||||||
for bname,gf in tmp: tmp[bname] << self.downfold(ik,ish,bname,G_latt_iw[bname],gf,shells='all',ir=ir)
|
for bname, gf in tmp:
|
||||||
|
tmp[bname] << self.downfold(ik, ish, bname, G_latt_iw[
|
||||||
|
bname], gf, shells='all', ir=ir)
|
||||||
G_loc[ish] += tmp
|
G_loc[ish] += tmp
|
||||||
|
|
||||||
# Collect data from mpi:
|
# Collect data from mpi:
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
G_loc[ish] << mpi.all_reduce(mpi.world, G_loc[ish], lambda x,y : x+y)
|
G_loc[ish] << mpi.all_reduce(
|
||||||
|
mpi.world, G_loc[ish], lambda x, y: x + y)
|
||||||
mpi.barrier()
|
mpi.barrier()
|
||||||
|
|
||||||
# Symmetrize and rotate to local coord. system if needed:
|
# Symmetrize and rotate to local coord. system if needed:
|
||||||
if self.symm_op != 0: G_loc = self.symmpar.symmetrize(G_loc)
|
if self.symm_op != 0:
|
||||||
|
G_loc = self.symmpar.symmetrize(G_loc)
|
||||||
if self.use_rotations:
|
if self.use_rotations:
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
for bname,gf in G_loc[ish]: G_loc[ish][bname] << self.rotloc(ish,gf,direction='toLocal',shells='all')
|
for bname, gf in G_loc[ish]:
|
||||||
|
G_loc[ish][bname] << self.rotloc(
|
||||||
|
ish, gf, direction='toLocal', shells='all')
|
||||||
|
|
||||||
for ish in range(self.n_shells):
|
for ish in range(self.n_shells):
|
||||||
isp = 0
|
isp = 0
|
||||||
@ -499,7 +584,6 @@ class SumkDFTTools(SumkDFT):
|
|||||||
|
|
||||||
return dens_mat
|
return dens_mat
|
||||||
|
|
||||||
|
|
||||||
def print_hamiltonian(self):
|
def print_hamiltonian(self):
|
||||||
"""
|
"""
|
||||||
Prints the Kohn-Sham Hamiltonian to the text files hamup.dat and hamdn.dat (no spin orbit-coupling), or to ham.dat (with spin-orbit coupling).
|
Prints the Kohn-Sham Hamiltonian to the text files hamup.dat and hamdn.dat (no spin orbit-coupling), or to ham.dat (with spin-orbit coupling).
|
||||||
@ -510,9 +594,11 @@ class SumkDFTTools(SumkDFT):
|
|||||||
f2 = open('hamdn.dat', 'w')
|
f2 = open('hamdn.dat', 'w')
|
||||||
for ik in range(self.n_k):
|
for ik in range(self.n_k):
|
||||||
for i in range(self.n_orbitals[ik, 0]):
|
for i in range(self.n_orbitals[ik, 0]):
|
||||||
f1.write('%s %s\n'%(ik,self.hopping[ik,0,i,i].real))
|
f1.write('%s %s\n' %
|
||||||
|
(ik, self.hopping[ik, 0, i, i].real))
|
||||||
for i in range(self.n_orbitals[ik, 1]):
|
for i in range(self.n_orbitals[ik, 1]):
|
||||||
f2.write('%s %s\n'%(ik,self.hopping[ik,1,i,i].real))
|
f2.write('%s %s\n' %
|
||||||
|
(ik, self.hopping[ik, 1, i, i].real))
|
||||||
f1.write('\n')
|
f1.write('\n')
|
||||||
f2.write('\n')
|
f2.write('\n')
|
||||||
f1.close()
|
f1.close()
|
||||||
@ -521,7 +607,8 @@ class SumkDFTTools(SumkDFT):
|
|||||||
f = open('ham.dat', 'w')
|
f = open('ham.dat', 'w')
|
||||||
for ik in range(self.n_k):
|
for ik in range(self.n_k):
|
||||||
for i in range(self.n_orbitals[ik, 0]):
|
for i in range(self.n_orbitals[ik, 0]):
|
||||||
f.write('%s %s\n'%(ik,self.hopping[ik,0,i,i].real))
|
f.write('%s %s\n' %
|
||||||
|
(ik, self.hopping[ik, 0, i, i].real))
|
||||||
f.write('\n')
|
f.write('\n')
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
@ -533,10 +620,12 @@ class SumkDFTTools(SumkDFT):
|
|||||||
Reads the data for transport calculations from the hdf5 archive.
|
Reads the data for transport calculations from the hdf5 archive.
|
||||||
"""
|
"""
|
||||||
thingstoread = ['band_window_optics', 'velocities_k']
|
thingstoread = ['band_window_optics', 'velocities_k']
|
||||||
self.read_input_from_hdf(subgrp=self.transp_data,things_to_read = thingstoread)
|
self.read_input_from_hdf(
|
||||||
thingstoread = ['band_window','lattice_angles','lattice_constants','lattice_type','n_symmetries','rot_symmetries']
|
subgrp=self.transp_data, things_to_read=thingstoread)
|
||||||
self.read_input_from_hdf(subgrp=self.misc_data,things_to_read = thingstoread)
|
thingstoread = ['band_window', 'lattice_angles', 'lattice_constants',
|
||||||
|
'lattice_type', 'n_symmetries', 'rot_symmetries']
|
||||||
|
self.read_input_from_hdf(
|
||||||
|
subgrp=self.misc_data, things_to_read=thingstoread)
|
||||||
|
|
||||||
def cellvolume(self, lattice_type, lattice_constants, latticeangle):
|
def cellvolume(self, lattice_type, lattice_constants, latticeangle):
|
||||||
r"""
|
r"""
|
||||||
@ -565,14 +654,16 @@ class SumkDFTTools(SumkDFT):
|
|||||||
c_al = numpy.cos(latticeangle[0])
|
c_al = numpy.cos(latticeangle[0])
|
||||||
c_be = numpy.cos(latticeangle[1])
|
c_be = numpy.cos(latticeangle[1])
|
||||||
c_ga = numpy.cos(latticeangle[2])
|
c_ga = numpy.cos(latticeangle[2])
|
||||||
vol_c = a * b * c * numpy.sqrt(1 + 2 * c_al * c_be * c_ga - c_al ** 2 - c_be ** 2 - c_ga ** 2)
|
vol_c = a * b * c * \
|
||||||
|
numpy.sqrt(1 + 2 * c_al * c_be * c_ga -
|
||||||
|
c_al ** 2 - c_be ** 2 - c_ga ** 2)
|
||||||
|
|
||||||
det = {"P":1, "F":4, "B":2, "R":3, "H":1, "CXY":2, "CYZ":2, "CXZ":2}
|
det = {"P": 1, "F": 4, "B": 2, "R": 3,
|
||||||
|
"H": 1, "CXY": 2, "CYZ": 2, "CXZ": 2}
|
||||||
vol_p = vol_c / det[lattice_type]
|
vol_p = vol_c / det[lattice_type]
|
||||||
|
|
||||||
return vol_c, vol_p
|
return vol_c, vol_p
|
||||||
|
|
||||||
|
|
||||||
# Uses .data of only GfReFreq objects.
|
# Uses .data of only GfReFreq objects.
|
||||||
def transport_distribution(self, beta, directions=['xx'], energy_window=None, Om_mesh=[0.0], with_Sigma=False, n_om=None, broadening=0.0):
|
def transport_distribution(self, beta, directions=['xx'], energy_window=None, Om_mesh=[0.0], with_Sigma=False, n_om=None, broadening=0.0):
|
||||||
r"""
|
r"""
|
||||||
@ -607,17 +698,24 @@ class SumkDFTTools(SumkDFT):
|
|||||||
Lorentzian broadening. It is necessary to specify the boradening if with_Sigma = False, otherwise this parameter can be set to 0.0.
|
Lorentzian broadening. It is necessary to specify the boradening if with_Sigma = False, otherwise this parameter can be set to 0.0.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Check if wien converter was called and read transport subgroup form hdf file
|
# Check if wien converter was called and read transport subgroup form
|
||||||
|
# hdf file
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
ar = HDFArchive(self.hdf_file, 'r')
|
ar = HDFArchive(self.hdf_file, 'r')
|
||||||
if not (self.transp_data in ar): raise IOError, "transport_distribution: No %s subgroup in hdf file found! Call convert_transp_input first." %self.transp_data
|
if not (self.transp_data in ar):
|
||||||
|
raise IOError, "transport_distribution: No %s subgroup in hdf file found! Call convert_transp_input first." % self.transp_data
|
||||||
|
# check if outputs file was converted
|
||||||
|
if not ('n_symmetries' in ar['dft_misc_input']):
|
||||||
|
raise IOError, "transport_distribution: n_symmetries missing. Check if case.outputs file is present and call convert_misc_input() or convert_dft_input()."
|
||||||
|
|
||||||
self.read_transport_input_from_hdf()
|
self.read_transport_input_from_hdf()
|
||||||
|
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
# k-dependent-projections.
|
# k-dependent-projections.
|
||||||
assert self.k_dep_projection == 1, "transport_distribution: k dependent projection is not implemented!"
|
assert self.k_dep_projection == 1, "transport_distribution: k dependent projection is not implemented!"
|
||||||
# positive Om_mesh
|
# positive Om_mesh
|
||||||
assert all(Om >= 0.0 for Om in Om_mesh), "transport_distribution: Om_mesh should not contain negative values!"
|
assert all(
|
||||||
|
Om >= 0.0 for Om in Om_mesh), "transport_distribution: Om_mesh should not contain negative values!"
|
||||||
|
|
||||||
# Check if energy_window is sufficiently large and correct
|
# Check if energy_window is sufficiently large and correct
|
||||||
|
|
||||||
@ -626,11 +724,15 @@ class SumkDFTTools(SumkDFT):
|
|||||||
|
|
||||||
if (abs(self.fermi_dis(energy_window[0], beta) * self.fermi_dis(-energy_window[0], beta)) > 1e-5
|
if (abs(self.fermi_dis(energy_window[0], beta) * self.fermi_dis(-energy_window[0], beta)) > 1e-5
|
||||||
or abs(self.fermi_dis(energy_window[1], beta) * self.fermi_dis(-energy_window[1], beta)) > 1e-5):
|
or abs(self.fermi_dis(energy_window[1], beta) * self.fermi_dis(-energy_window[1], beta)) > 1e-5):
|
||||||
mpi.report("\n####################################################################")
|
mpi.report(
|
||||||
mpi.report("transport_distribution: WARNING - energy window might be too narrow!")
|
"\n####################################################################")
|
||||||
mpi.report("####################################################################\n")
|
mpi.report(
|
||||||
|
"transport_distribution: WARNING - energy window might be too narrow!")
|
||||||
|
mpi.report(
|
||||||
|
"####################################################################\n")
|
||||||
|
|
||||||
n_inequiv_spin_blocks = self.SP + 1 - self.SO # up and down are equivalent if SP = 0
|
# up and down are equivalent if SP = 0
|
||||||
|
n_inequiv_spin_blocks = self.SP + 1 - self.SO
|
||||||
self.directions = directions
|
self.directions = directions
|
||||||
dir_to_int = {'x': 0, 'y': 1, 'z': 2}
|
dir_to_int = {'x': 0, 'y': 1, 'z': 2}
|
||||||
|
|
||||||
@ -639,7 +741,8 @@ class SumkDFTTools(SumkDFT):
|
|||||||
|
|
||||||
# Define mesh for Green's function and in the specified energy window
|
# Define mesh for Green's function and in the specified energy window
|
||||||
if (with_Sigma == True):
|
if (with_Sigma == True):
|
||||||
self.omega = numpy.array([round(x.real,12) for x in self.Sigma_imp_w[0].mesh])
|
self.omega = numpy.array([round(x.real, 12)
|
||||||
|
for x in self.Sigma_imp_w[0].mesh])
|
||||||
mesh = None
|
mesh = None
|
||||||
mu = self.chemical_potential
|
mu = self.chemical_potential
|
||||||
n_om = len(self.omega)
|
n_om = len(self.omega)
|
||||||
@ -647,8 +750,10 @@ class SumkDFTTools(SumkDFT):
|
|||||||
|
|
||||||
if energy_window is not None:
|
if energy_window is not None:
|
||||||
# Find according window in Sigma mesh
|
# Find according window in Sigma mesh
|
||||||
ioffset = numpy.sum(self.omega < energy_window[0]-max(Om_mesh))
|
ioffset = numpy.sum(
|
||||||
self.omega = self.omega[numpy.logical_and(self.omega >= energy_window[0]-max(Om_mesh), self.omega <= energy_window[1]+max(Om_mesh))]
|
self.omega < energy_window[0] - max(Om_mesh))
|
||||||
|
self.omega = self.omega[numpy.logical_and(self.omega >= energy_window[
|
||||||
|
0] - max(Om_mesh), self.omega <= energy_window[1] + max(Om_mesh))]
|
||||||
n_om = len(self.omega)
|
n_om = len(self.omega)
|
||||||
|
|
||||||
# Truncate Sigma to given omega window
|
# Truncate Sigma to given omega window
|
||||||
@ -657,19 +762,24 @@ class SumkDFTTools(SumkDFT):
|
|||||||
for icrsh in range(self.n_corr_shells):
|
for icrsh in range(self.n_corr_shells):
|
||||||
Sigma_save = self.Sigma_imp_w[icrsh].copy()
|
Sigma_save = self.Sigma_imp_w[icrsh].copy()
|
||||||
spn = self.spin_block_names[self.corr_shells[icrsh]['SO']]
|
spn = self.spin_block_names[self.corr_shells[icrsh]['SO']]
|
||||||
glist = lambda : [ GfReFreq(indices = inner, window=(self.omega[0], self.omega[-1]),n_points=n_om) for block, inner in self.gf_struct_sumk[icrsh]]
|
glist = lambda: [GfReFreq(indices=inner, window=(self.omega[
|
||||||
self.Sigma_imp_w[icrsh] = BlockGf(name_list = spn, block_list = glist(),make_copies=False)
|
0], self.omega[-1]), n_points=n_om) for block, inner in self.gf_struct_sumk[icrsh]]
|
||||||
|
self.Sigma_imp_w[icrsh] = BlockGf(
|
||||||
|
name_list=spn, block_list=glist(), make_copies=False)
|
||||||
for i, g in self.Sigma_imp_w[icrsh]:
|
for i, g in self.Sigma_imp_w[icrsh]:
|
||||||
for iL in g.indices:
|
for iL in g.indices:
|
||||||
for iR in g.indices:
|
for iR in g.indices:
|
||||||
for iom in xrange(n_om):
|
for iom in xrange(n_om):
|
||||||
g.data[iom,iL,iR] = Sigma_save[i].data[ioffset+iom,iL,iR]
|
g.data[iom, iL, iR] = Sigma_save[
|
||||||
|
i].data[ioffset + iom, iL, iR]
|
||||||
else:
|
else:
|
||||||
assert n_om is not None, "transport_distribution: Number of omega points (n_om) needed to calculate transport distribution!"
|
assert n_om is not None, "transport_distribution: Number of omega points (n_om) needed to calculate transport distribution!"
|
||||||
assert energy_window is not None, "transport_distribution: Energy window needed to calculate transport distribution!"
|
assert energy_window is not None, "transport_distribution: Energy window needed to calculate transport distribution!"
|
||||||
assert broadening != 0.0 and broadening is not None, "transport_distribution: Broadening necessary to calculate transport distribution!"
|
assert broadening != 0.0 and broadening is not None, "transport_distribution: Broadening necessary to calculate transport distribution!"
|
||||||
self.omega = numpy.linspace(energy_window[0]-max(Om_mesh),energy_window[1]+max(Om_mesh),n_om)
|
self.omega = numpy.linspace(
|
||||||
mesh = [energy_window[0]-max(Om_mesh), energy_window[1]+max(Om_mesh), n_om]
|
energy_window[0] - max(Om_mesh), energy_window[1] + max(Om_mesh), n_om)
|
||||||
|
mesh = [energy_window[0] -
|
||||||
|
max(Om_mesh), energy_window[1] + max(Om_mesh), n_om]
|
||||||
mu = 0.0
|
mu = 0.0
|
||||||
|
|
||||||
# Define mesh for optic conductivity
|
# Define mesh for optic conductivity
|
||||||
@ -685,27 +795,36 @@ class SumkDFTTools(SumkDFT):
|
|||||||
print "Calculation requested for Omega mesh: ", numpy.array(Om_mesh)
|
print "Calculation requested for Omega mesh: ", numpy.array(Om_mesh)
|
||||||
print "Omega mesh automatically repined to: ", self.Om_mesh
|
print "Omega mesh automatically repined to: ", self.Om_mesh
|
||||||
|
|
||||||
self.Gamma_w = {direction: numpy.zeros((len(self.Om_mesh), n_om), dtype=numpy.float_) for direction in self.directions}
|
self.Gamma_w = {direction: numpy.zeros(
|
||||||
|
(len(self.Om_mesh), n_om), dtype=numpy.float_) for direction in self.directions}
|
||||||
|
|
||||||
# Sum over all k-points
|
# Sum over all k-points
|
||||||
ikarray = numpy.array(range(self.n_k))
|
ikarray = numpy.array(range(self.n_k))
|
||||||
for ik in mpi.slice_array(ikarray):
|
for ik in mpi.slice_array(ikarray):
|
||||||
# Calculate G_w for ik and initialize A_kw
|
# Calculate G_w for ik and initialize A_kw
|
||||||
G_w = self.lattice_gf(ik, mu, iw_or_w="w", beta=beta, broadening=broadening, mesh=mesh, with_Sigma=with_Sigma)
|
G_w = self.lattice_gf(ik, mu, iw_or_w="w", beta=beta,
|
||||||
|
broadening=broadening, mesh=mesh, with_Sigma=with_Sigma)
|
||||||
A_kw = [numpy.zeros((self.n_orbitals[ik][isp], self.n_orbitals[ik][isp], n_om), dtype=numpy.complex_)
|
A_kw = [numpy.zeros((self.n_orbitals[ik][isp], self.n_orbitals[ik][isp], n_om), dtype=numpy.complex_)
|
||||||
for isp in range(n_inequiv_spin_blocks)]
|
for isp in range(n_inequiv_spin_blocks)]
|
||||||
|
|
||||||
for isp in range(n_inequiv_spin_blocks):
|
for isp in range(n_inequiv_spin_blocks):
|
||||||
# copy data from G_w (swapaxes is used to have omega in the 3rd dimension)
|
# copy data from G_w (swapaxes is used to have omega in the 3rd
|
||||||
A_kw[isp] = copy.deepcopy(G_w[self.spin_block_names[self.SO][isp]].data.swapaxes(0,1).swapaxes(1,2));
|
# dimension)
|
||||||
|
A_kw[isp] = copy.deepcopy(G_w[self.spin_block_names[self.SO][
|
||||||
|
isp]].data.swapaxes(0, 1).swapaxes(1, 2))
|
||||||
# calculate A(k,w) for each frequency
|
# calculate A(k,w) for each frequency
|
||||||
for iw in xrange(n_om):
|
for iw in xrange(n_om):
|
||||||
A_kw[isp][:,:,iw] = -1.0/(2.0*numpy.pi*1j)*(A_kw[isp][:,:,iw]-numpy.conjugate(numpy.transpose(A_kw[isp][:,:,iw])))
|
A_kw[isp][:, :, iw] = -1.0 / (2.0 * numpy.pi * 1j) * (
|
||||||
|
A_kw[isp][:, :, iw] - numpy.conjugate(numpy.transpose(A_kw[isp][:, :, iw])))
|
||||||
|
|
||||||
b_min = max(self.band_window[isp][ik, 0], self.band_window_optics[isp][ik, 0])
|
b_min = max(self.band_window[isp][
|
||||||
b_max = min(self.band_window[isp][ik, 1], self.band_window_optics[isp][ik, 1])
|
ik, 0], self.band_window_optics[isp][ik, 0])
|
||||||
A_i = slice(b_min - self.band_window[isp][ik, 0], b_max - self.band_window[isp][ik, 0] + 1)
|
b_max = min(self.band_window[isp][
|
||||||
v_i = slice(b_min - self.band_window_optics[isp][ik, 0], b_max - self.band_window_optics[isp][ik, 0] + 1)
|
ik, 1], self.band_window_optics[isp][ik, 1])
|
||||||
|
A_i = slice(
|
||||||
|
b_min - self.band_window[isp][ik, 0], b_max - self.band_window[isp][ik, 0] + 1)
|
||||||
|
v_i = slice(b_min - self.band_window_optics[isp][
|
||||||
|
ik, 0], b_max - self.band_window_optics[isp][ik, 0] + 1)
|
||||||
|
|
||||||
# loop over all symmetries
|
# loop over all symmetries
|
||||||
for R in self.rot_symmetries:
|
for R in self.rot_symmetries:
|
||||||
@ -713,13 +832,16 @@ class SumkDFTTools(SumkDFT):
|
|||||||
vel_R = copy.deepcopy(self.velocities_k[isp][ik])
|
vel_R = copy.deepcopy(self.velocities_k[isp][ik])
|
||||||
for nu1 in range(self.band_window_optics[isp][ik, 1] - self.band_window_optics[isp][ik, 0] + 1):
|
for nu1 in range(self.band_window_optics[isp][ik, 1] - self.band_window_optics[isp][ik, 0] + 1):
|
||||||
for nu2 in range(self.band_window_optics[isp][ik, 1] - self.band_window_optics[isp][ik, 0] + 1):
|
for nu2 in range(self.band_window_optics[isp][ik, 1] - self.band_window_optics[isp][ik, 0] + 1):
|
||||||
vel_R[nu1][nu2][:] = numpy.dot(R, vel_R[nu1][nu2][:])
|
vel_R[nu1][nu2][:] = numpy.dot(
|
||||||
|
R, vel_R[nu1][nu2][:])
|
||||||
|
|
||||||
# calculate Gamma_w for each direction from the velocities vel_R and the spectral function A_kw
|
# calculate Gamma_w for each direction from the velocities
|
||||||
|
# vel_R and the spectral function A_kw
|
||||||
for direction in self.directions:
|
for direction in self.directions:
|
||||||
for iw in xrange(n_om):
|
for iw in xrange(n_om):
|
||||||
for iq in range(len(self.Om_mesh)):
|
for iq in range(len(self.Om_mesh)):
|
||||||
if(iw + iOm_mesh[iq] >= n_om or self.omega[iw] < -self.Om_mesh[iq] + energy_window[0] or self.omega[iw] > self.Om_mesh[iq] + energy_window[1]): continue
|
if(iw + iOm_mesh[iq] >= n_om or self.omega[iw] < -self.Om_mesh[iq] + energy_window[0] or self.omega[iw] > self.Om_mesh[iq] + energy_window[1]):
|
||||||
|
continue
|
||||||
|
|
||||||
self.Gamma_w[direction][iq, iw] += (numpy.dot(numpy.dot(numpy.dot(vel_R[v_i, v_i, dir_to_int[direction[0]]],
|
self.Gamma_w[direction][iq, iw] += (numpy.dot(numpy.dot(numpy.dot(vel_R[v_i, v_i, dir_to_int[direction[0]]],
|
||||||
A_kw[isp][A_i, A_i, iw + iOm_mesh[iq]]), vel_R[v_i, v_i, dir_to_int[direction[1]]]),
|
A_kw[isp][A_i, A_i, iw + iOm_mesh[iq]]), vel_R[v_i, v_i, dir_to_int[direction[1]]]),
|
||||||
@ -729,11 +851,10 @@ class SumkDFTTools(SumkDFT):
|
|||||||
self.Gamma_w[direction] = (mpi.all_reduce(mpi.world, self.Gamma_w[direction], lambda x, y: x + y)
|
self.Gamma_w[direction] = (mpi.all_reduce(mpi.world, self.Gamma_w[direction], lambda x, y: x + y)
|
||||||
/ self.cellvolume(self.lattice_type, self.lattice_constants, self.lattice_angles)[1] / self.n_symmetries)
|
/ self.cellvolume(self.lattice_type, self.lattice_constants, self.lattice_angles)[1] / self.n_symmetries)
|
||||||
|
|
||||||
|
|
||||||
def transport_coefficient(self, direction, iq, n, beta, method=None):
|
def transport_coefficient(self, direction, iq, n, beta, method=None):
|
||||||
r"""
|
r"""
|
||||||
Calculates the transport coefficient A_n in a given direction for a given :math:`\Omega`. The required members (Gamma_w, directions, Om_mesh) have to be obtained first
|
Calculates the transport coefficient A_n in a given direction for a given :math:`\Omega`. The required members (Gamma_w, directions, Om_mesh) have to be obtained first
|
||||||
by calling the function :meth:`transport_distribution <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.transport_distribution>`. For n>0 A is set to NaN if :math:`\Omega` is not 0.0.
|
by calling the function :meth:`transport_distribution <dft.sumk_dft_tools.SumkDFTTools.transport_distribution>`. For n>0 A is set to NaN if :math:`\Omega` is not 0.0.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
@ -755,23 +876,28 @@ class SumkDFTTools(SumkDFT):
|
|||||||
Transport coefficient.
|
Transport coefficient.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
|
||||||
assert hasattr(self,'Gamma_w'), "transport_coefficient: Run transport_distribution first or load data from h5!"
|
assert hasattr(
|
||||||
|
self, 'Gamma_w'), "transport_coefficient: Run transport_distribution first or load data from h5!"
|
||||||
|
|
||||||
if (self.Om_mesh[iq] == 0.0 or n == 0.0):
|
if (self.Om_mesh[iq] == 0.0 or n == 0.0):
|
||||||
A = 0.0
|
A = 0.0
|
||||||
# setup the integrand
|
# setup the integrand
|
||||||
if (self.Om_mesh[iq] == 0.0):
|
if (self.Om_mesh[iq] == 0.0):
|
||||||
A_int = self.Gamma_w[direction][iq] * (self.fermi_dis(self.omega,beta) * self.fermi_dis(-self.omega,beta)) * (self.omega*beta)**n
|
A_int = self.Gamma_w[direction][iq] * (self.fermi_dis(
|
||||||
|
self.omega, beta) * self.fermi_dis(-self.omega, beta)) * (self.omega * beta)**n
|
||||||
elif (n == 0.0):
|
elif (n == 0.0):
|
||||||
A_int = self.Gamma_w[direction][iq] * (self.fermi_dis(self.omega,beta) - self.fermi_dis(self.omega+self.Om_mesh[iq],beta))/(self.Om_mesh[iq]*beta)
|
A_int = self.Gamma_w[direction][iq] * (self.fermi_dis(self.omega, beta) - self.fermi_dis(
|
||||||
|
self.omega + self.Om_mesh[iq], beta)) / (self.Om_mesh[iq] * beta)
|
||||||
|
|
||||||
# w-integration
|
# w-integration
|
||||||
if method == 'quad':
|
if method == 'quad':
|
||||||
# quad on interpolated w-points with cubic spline
|
# quad on interpolated w-points with cubic spline
|
||||||
A_int_interp = interp1d(self.omega, A_int, kind='cubic')
|
A_int_interp = interp1d(self.omega, A_int, kind='cubic')
|
||||||
A = quad(A_int_interp, min(self.omega), max(self.omega), epsabs=1.0e-12,epsrel=1.0e-12,limit = 500)
|
A = quad(A_int_interp, min(self.omega), max(self.omega),
|
||||||
|
epsabs=1.0e-12, epsrel=1.0e-12, limit=500)
|
||||||
A = A[0]
|
A = A[0]
|
||||||
elif method == 'simps':
|
elif method == 'simps':
|
||||||
# simpson rule for w-grid
|
# simpson rule for w-grid
|
||||||
@ -789,13 +915,12 @@ class SumkDFTTools(SumkDFT):
|
|||||||
A = numpy.nan
|
A = numpy.nan
|
||||||
return A
|
return A
|
||||||
|
|
||||||
|
|
||||||
def conductivity_and_seebeck(self, beta, method=None):
|
def conductivity_and_seebeck(self, beta, method=None):
|
||||||
r"""
|
r"""
|
||||||
Calculates the Seebeck coefficient and the optical conductivity by calling
|
Calculates the Seebeck coefficient and the optical conductivity by calling
|
||||||
:meth:`transport_coefficient <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.transport_coefficient>`.
|
:meth:`transport_coefficient <dft.sumk_dft_tools.SumkDFTTools.transport_coefficient>`.
|
||||||
The required members (Gamma_w, directions, Om_mesh) have to be obtained first by calling the function
|
The required members (Gamma_w, directions, Om_mesh) have to be obtained first by calling the function
|
||||||
:meth:`transport_distribution <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.transport_distribution>`.
|
:meth:`transport_distribution <dft.sumk_dft_tools.SumkDFTTools.transport_distribution>`.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
@ -811,26 +936,36 @@ class SumkDFTTools(SumkDFT):
|
|||||||
Seebeck coefficient in each direction. If zero is not present in Om_mesh the Seebeck coefficient is set to NaN.
|
Seebeck coefficient in each direction. If zero is not present in Om_mesh the Seebeck coefficient is set to NaN.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not (mpi.is_master_node()): return
|
if not (mpi.is_master_node()):
|
||||||
|
return
|
||||||
|
|
||||||
assert hasattr(self,'Gamma_w'), "conductivity_and_seebeck: Run transport_distribution first or load data from h5!"
|
assert hasattr(
|
||||||
|
self, 'Gamma_w'), "conductivity_and_seebeck: Run transport_distribution first or load data from h5!"
|
||||||
n_q = self.Gamma_w[self.directions[0]].shape[0]
|
n_q = self.Gamma_w[self.directions[0]].shape[0]
|
||||||
|
|
||||||
A0 = {direction: numpy.full((n_q,),numpy.nan) for direction in self.directions}
|
A0 = {direction: numpy.full((n_q,), numpy.nan)
|
||||||
A1 = {direction: numpy.full((n_q,),numpy.nan) for direction in self.directions}
|
for direction in self.directions}
|
||||||
|
A1 = {direction: numpy.full((n_q,), numpy.nan)
|
||||||
|
for direction in self.directions}
|
||||||
self.seebeck = {direction: numpy.nan for direction in self.directions}
|
self.seebeck = {direction: numpy.nan for direction in self.directions}
|
||||||
self.optic_cond = {direction: numpy.full((n_q,),numpy.nan) for direction in self.directions}
|
self.optic_cond = {direction: numpy.full(
|
||||||
|
(n_q,), numpy.nan) for direction in self.directions}
|
||||||
|
|
||||||
for direction in self.directions:
|
for direction in self.directions:
|
||||||
for iq in xrange(n_q):
|
for iq in xrange(n_q):
|
||||||
A0[direction][iq] = self.transport_coefficient(direction, iq=iq, n=0, beta=beta, method=method)
|
A0[direction][iq] = self.transport_coefficient(
|
||||||
A1[direction][iq] = self.transport_coefficient(direction, iq=iq, n=1, beta=beta, method=method)
|
direction, iq=iq, n=0, beta=beta, method=method)
|
||||||
|
A1[direction][iq] = self.transport_coefficient(
|
||||||
|
direction, iq=iq, n=1, beta=beta, method=method)
|
||||||
print "A_0 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A0[direction][iq])
|
print "A_0 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A0[direction][iq])
|
||||||
print "A_1 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A1[direction][iq])
|
print "A_1 in direction %s for Omega = %.2f %e a.u." % (direction, self.Om_mesh[iq], A1[direction][iq])
|
||||||
if ~numpy.isnan(A1[direction][iq]):
|
if ~numpy.isnan(A1[direction][iq]):
|
||||||
# Seebeck is overwritten if there is more than one Omega = 0 in Om_mesh
|
# Seebeck is overwritten if there is more than one Omega =
|
||||||
self.seebeck[direction] = - A1[direction][iq] / A0[direction][iq] * 86.17
|
# 0 in Om_mesh
|
||||||
self.optic_cond[direction] = beta * A0[direction] * 10700.0 / numpy.pi
|
self.seebeck[direction] = - \
|
||||||
|
A1[direction][iq] / A0[direction][iq] * 86.17
|
||||||
|
self.optic_cond[direction] = beta * \
|
||||||
|
A0[direction] * 10700.0 / numpy.pi
|
||||||
for iq in xrange(n_q):
|
for iq in xrange(n_q):
|
||||||
print "Conductivity in direction %s for Omega = %.2f %f x 10^4 Ohm^-1 cm^-1" % (direction, self.Om_mesh[iq], self.optic_cond[direction][iq])
|
print "Conductivity in direction %s for Omega = %.2f %f x 10^4 Ohm^-1 cm^-1" % (direction, self.Om_mesh[iq], self.optic_cond[direction][iq])
|
||||||
if not (numpy.isnan(A1[direction][iq])):
|
if not (numpy.isnan(A1[direction][iq])):
|
||||||
@ -838,7 +973,6 @@ class SumkDFTTools(SumkDFT):
|
|||||||
|
|
||||||
return self.optic_cond, self.seebeck
|
return self.optic_cond, self.seebeck
|
||||||
|
|
||||||
|
|
||||||
def fermi_dis(self, w, beta):
|
def fermi_dis(self, w, beta):
|
||||||
r"""
|
r"""
|
||||||
Fermi distribution.
|
Fermi distribution.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
################################################################################
|
##########################################################################
|
||||||
#
|
#
|
||||||
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
# TRIQS: a Toolbox for Research in Interacting Quantum Systems
|
||||||
#
|
#
|
||||||
@ -18,14 +18,16 @@
|
|||||||
# You should have received a copy of the GNU General Public License along with
|
# You should have received a copy of the GNU General Public License along with
|
||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
################################################################################
|
##########################################################################
|
||||||
|
|
||||||
import copy,numpy
|
import copy
|
||||||
|
import numpy
|
||||||
from types import *
|
from types import *
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf.local import *
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
|
|
||||||
|
|
||||||
class Symmetry:
|
class Symmetry:
|
||||||
"""
|
"""
|
||||||
This class provides the routines for applying symmetry operations for the k sums.
|
This class provides the routines for applying symmetry operations for the k sums.
|
||||||
@ -46,10 +48,13 @@ class Symmetry:
|
|||||||
the data is stored at the root of the hdf5 archive.
|
the data is stored at the root of the hdf5 archive.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert type(hdf_file) == StringType, "Symmetry: hdf_file must be a filename."
|
assert type(
|
||||||
|
hdf_file) == StringType, "Symmetry: hdf_file must be a filename."
|
||||||
self.hdf_file = hdf_file
|
self.hdf_file = hdf_file
|
||||||
things_to_read = ['n_symm','n_atoms','perm','orbits','SO','SP','time_inv','mat','mat_tinv']
|
things_to_read = ['n_symm', 'n_atoms', 'perm',
|
||||||
for it in things_to_read: setattr(self,it,0)
|
'orbits', 'SO', 'SP', 'time_inv', 'mat', 'mat_tinv']
|
||||||
|
for it in things_to_read:
|
||||||
|
setattr(self, it, 0)
|
||||||
|
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
# Read the stuff on master:
|
# Read the stuff on master:
|
||||||
@ -59,25 +64,27 @@ class Symmetry:
|
|||||||
else:
|
else:
|
||||||
ar2 = ar[subgroup]
|
ar2 = ar[subgroup]
|
||||||
|
|
||||||
for it in things_to_read: setattr(self,it,ar2[it])
|
for it in things_to_read:
|
||||||
|
setattr(self, it, ar2[it])
|
||||||
del ar2
|
del ar2
|
||||||
del ar
|
del ar
|
||||||
|
|
||||||
# Broadcasting
|
# Broadcasting
|
||||||
for it in things_to_read: setattr(self,it,mpi.bcast(getattr(self,it)))
|
for it in things_to_read:
|
||||||
|
setattr(self, it, mpi.bcast(getattr(self, it)))
|
||||||
|
|
||||||
# now define the mapping of orbitals:
|
# now define the mapping of orbitals:
|
||||||
# self.orb_map[iorb] = jorb gives the permutation of the orbitals as given in the list, when the
|
# self.orb_map[iorb] = jorb gives the permutation of the orbitals as given in the list, when the
|
||||||
# permutation of the atoms is done:
|
# permutation of the atoms is done:
|
||||||
self.n_orbits = len(self.orbits)
|
self.n_orbits = len(self.orbits)
|
||||||
self.orb_map = [ [0 for iorb in range(self.n_orbits)] for i_symm in range(self.n_symm) ]
|
self.orb_map = [[0 for iorb in range(
|
||||||
|
self.n_orbits)] for i_symm in range(self.n_symm)]
|
||||||
for i_symm in range(self.n_symm):
|
for i_symm in range(self.n_symm):
|
||||||
for iorb in range(self.n_orbits):
|
for iorb in range(self.n_orbits):
|
||||||
srch = copy.deepcopy(self.orbits[iorb])
|
srch = copy.deepcopy(self.orbits[iorb])
|
||||||
srch['atom'] = self.perm[i_symm][self.orbits[iorb]['atom'] - 1]
|
srch['atom'] = self.perm[i_symm][self.orbits[iorb]['atom'] - 1]
|
||||||
self.orb_map[i_symm][iorb] = self.orbits.index(srch)
|
self.orb_map[i_symm][iorb] = self.orbits.index(srch)
|
||||||
|
|
||||||
|
|
||||||
def symmetrize(self, obj):
|
def symmetrize(self, obj):
|
||||||
"""
|
"""
|
||||||
Symmetrizes a given object.
|
Symmetrizes a given object.
|
||||||
@ -97,18 +104,24 @@ class Symmetry:
|
|||||||
Symmetrized object, of the same type as input object.
|
Symmetrized object, of the same type as input object.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert isinstance(obj,list), "symmetrize: obj has to be a list of objects."
|
assert isinstance(
|
||||||
assert len(obj) == self.n_orbits, "symmetrize: obj has to be a list of the same length as defined in the init."
|
obj, list), "symmetrize: obj has to be a list of objects."
|
||||||
|
assert len(
|
||||||
|
obj) == self.n_orbits, "symmetrize: obj has to be a list of the same length as defined in the init."
|
||||||
|
|
||||||
if isinstance(obj[0], BlockGf):
|
if isinstance(obj[0], BlockGf):
|
||||||
symm_obj = [ obj[i].copy() for i in range(len(obj)) ] # here the result is stored, it is a BlockGf!
|
# here the result is stored, it is a BlockGf!
|
||||||
for iorb in range(self.n_orbits): symm_obj[iorb].zero() # set to zero
|
symm_obj = [obj[i].copy() for i in range(len(obj))]
|
||||||
|
for iorb in range(self.n_orbits):
|
||||||
|
symm_obj[iorb].zero() # set to zero
|
||||||
else:
|
else:
|
||||||
# if not a BlockGf, we assume it is a matrix (density matrix), has to be complex since self.mat is complex!
|
# if not a BlockGf, we assume it is a matrix (density matrix), has
|
||||||
|
# to be complex since self.mat is complex!
|
||||||
symm_obj = [copy.deepcopy(obj[i]) for i in range(len(obj))]
|
symm_obj = [copy.deepcopy(obj[i]) for i in range(len(obj))]
|
||||||
for iorb in range(self.n_orbits):
|
for iorb in range(self.n_orbits):
|
||||||
if type(symm_obj[iorb]) == DictType:
|
if type(symm_obj[iorb]) == DictType:
|
||||||
for ii in symm_obj[iorb]: symm_obj[iorb][ii] *= 0.0
|
for ii in symm_obj[iorb]:
|
||||||
|
symm_obj[iorb][ii] *= 0.0
|
||||||
else:
|
else:
|
||||||
symm_obj[iorb] *= 0.0
|
symm_obj[iorb] *= 0.0
|
||||||
|
|
||||||
@ -121,8 +134,11 @@ class Symmetry:
|
|||||||
if isinstance(obj[0], BlockGf):
|
if isinstance(obj[0], BlockGf):
|
||||||
|
|
||||||
tmp = obj[iorb].copy()
|
tmp = obj[iorb].copy()
|
||||||
if self.time_inv[i_symm]: tmp << tmp.transpose()
|
if self.time_inv[i_symm]:
|
||||||
for bname,gf in tmp: tmp[bname].from_L_G_R(self.mat[i_symm][iorb],tmp[bname],self.mat[i_symm][iorb].conjugate().transpose())
|
tmp << tmp.transpose()
|
||||||
|
for bname, gf in tmp:
|
||||||
|
tmp[bname].from_L_G_R(self.mat[i_symm][iorb], tmp[bname], self.mat[
|
||||||
|
i_symm][iorb].conjugate().transpose())
|
||||||
tmp *= 1.0 / self.n_symm
|
tmp *= 1.0 / self.n_symm
|
||||||
symm_obj[jorb] += tmp
|
symm_obj[jorb] += tmp
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ import pytriqs.utility.mpi as mpi
|
|||||||
import numpy
|
import numpy
|
||||||
import copy
|
import copy
|
||||||
|
|
||||||
|
|
||||||
class TransBasis:
|
class TransBasis:
|
||||||
"""
|
"""
|
||||||
Computates rotations into a new basis, using the condition that a given property is diagonal in the new basis.
|
Computates rotations into a new basis, using the condition that a given property is diagonal in the new basis.
|
||||||
@ -39,14 +40,14 @@ class TransBasis:
|
|||||||
Converter.convert_dft_input()
|
Converter.convert_dft_input()
|
||||||
del Converter
|
del Converter
|
||||||
|
|
||||||
self.SK = SumkDFT(hdf_file=hdf_datafile+'.h5',use_dft_blocks=False)
|
self.SK = SumkDFT(hdf_file=hdf_datafile +
|
||||||
|
'.h5', use_dft_blocks=False)
|
||||||
else:
|
else:
|
||||||
self.SK = SK
|
self.SK = SK
|
||||||
|
|
||||||
self.T = copy.deepcopy(self.SK.T[0])
|
self.T = copy.deepcopy(self.SK.T[0])
|
||||||
self.w = numpy.identity(SK.corr_shells[0]['dim'])
|
self.w = numpy.identity(SK.corr_shells[0]['dim'])
|
||||||
|
|
||||||
|
|
||||||
def calculate_diagonalisation_matrix(self, prop_to_be_diagonal='eal'):
|
def calculate_diagonalisation_matrix(self, prop_to_be_diagonal='eal'):
|
||||||
"""
|
"""
|
||||||
Calculates the diagonalisation matrix w, and stores it as member of the class.
|
Calculates the diagonalisation matrix w, and stores it as member of the class.
|
||||||
@ -71,23 +72,25 @@ class TransBasis:
|
|||||||
elif prop_to_be_diagonal == 'dm':
|
elif prop_to_be_diagonal == 'dm':
|
||||||
prop = self.SK.density_matrix(method='using_point_integration')[0]
|
prop = self.SK.density_matrix(method='using_point_integration')[0]
|
||||||
else:
|
else:
|
||||||
mpi.report("trans_basis: not a valid quantitiy to be diagonal. Choices are 'eal' or 'dm'.")
|
mpi.report(
|
||||||
|
"trans_basis: not a valid quantitiy to be diagonal. Choices are 'eal' or 'dm'.")
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
if self.SK.SO == 0:
|
if self.SK.SO == 0:
|
||||||
self.eig, self.w = numpy.linalg.eigh(prop['up'])
|
self.eig, self.w = numpy.linalg.eigh(prop['up'])
|
||||||
# calculate new Transformation matrix
|
# calculate new Transformation matrix
|
||||||
self.T = numpy.dot(self.T.transpose().conjugate(),self.w).conjugate().transpose()
|
self.T = numpy.dot(self.T.transpose().conjugate(),
|
||||||
|
self.w).conjugate().transpose()
|
||||||
else:
|
else:
|
||||||
self.eig, self.w = numpy.linalg.eigh(prop['ud'])
|
self.eig, self.w = numpy.linalg.eigh(prop['ud'])
|
||||||
# calculate new Transformation matrix
|
# calculate new Transformation matrix
|
||||||
self.T = numpy.dot(self.T.transpose().conjugate(),self.w).conjugate().transpose()
|
self.T = numpy.dot(self.T.transpose().conjugate(),
|
||||||
|
self.w).conjugate().transpose()
|
||||||
|
|
||||||
# measure for the 'unity' of the transformation:
|
# measure for the 'unity' of the transformation:
|
||||||
wsqr = sum(abs(self.w.diagonal())**2) / self.w.diagonal().size
|
wsqr = sum(abs(self.w.diagonal())**2) / self.w.diagonal().size
|
||||||
return wsqr
|
return wsqr
|
||||||
|
|
||||||
|
|
||||||
def rotate_gf(self, gf_to_rot):
|
def rotate_gf(self, gf_to_rot):
|
||||||
"""
|
"""
|
||||||
Uses the diagonalisation matrix w to rotate a given GF into the new basis.
|
Uses the diagonalisation matrix w to rotate a given GF into the new basis.
|
||||||
@ -104,29 +107,33 @@ class TransBasis:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# build a full GF
|
# build a full GF
|
||||||
gfrotated = BlockGf( name_block_generator = [ (block,GfImFreq(indices = inner, mesh = gf_to_rot.mesh)) for block,inner in self.SK.gf_struct_sumk[0] ], make_copies = False)
|
gfrotated = BlockGf(name_block_generator=[(block, GfImFreq(
|
||||||
|
indices=inner, mesh=gf_to_rot.mesh)) for block, inner in self.SK.gf_struct_sumk[0]], make_copies=False)
|
||||||
|
|
||||||
# transform the CTQMC blocks to the full matrix:
|
# transform the CTQMC blocks to the full matrix:
|
||||||
ish = self.SK.corr_to_inequiv[0] # ish is the index of the inequivalent shell corresponding to icrsh
|
# ish is the index of the inequivalent shell corresponding to icrsh
|
||||||
|
ish = self.SK.corr_to_inequiv[0]
|
||||||
for block, inner in self.gf_struct_solver[ish].iteritems():
|
for block, inner in self.gf_struct_solver[ish].iteritems():
|
||||||
for ind1 in inner:
|
for ind1 in inner:
|
||||||
for ind2 in inner:
|
for ind2 in inner:
|
||||||
gfrotated[self.SK.solver_to_sumk_block[ish][block]][ind1,ind2] << gf_to_rot[block][ind1,ind2]
|
gfrotated[self.SK.solver_to_sumk_block[ish][block]][
|
||||||
|
ind1, ind2] << gf_to_rot[block][ind1, ind2]
|
||||||
|
|
||||||
# Rotate using the matrix w
|
# Rotate using the matrix w
|
||||||
for bname, gf in gfrotated:
|
for bname, gf in gfrotated:
|
||||||
gfrotated[bname].from_L_G_R(self.w.transpose().conjugate(),gfrotated[bname],self.w)
|
gfrotated[bname].from_L_G_R(
|
||||||
|
self.w.transpose().conjugate(), gfrotated[bname], self.w)
|
||||||
|
|
||||||
gfreturn = gf_to_rot.copy()
|
gfreturn = gf_to_rot.copy()
|
||||||
# Put back into CTQMC basis:
|
# Put back into CTQMC basis:
|
||||||
for block, inner in self.gf_struct_solver[ish].iteritems():
|
for block, inner in self.gf_struct_solver[ish].iteritems():
|
||||||
for ind1 in inner:
|
for ind1 in inner:
|
||||||
for ind2 in inner:
|
for ind2 in inner:
|
||||||
gfreturn[block][ind1,ind2] << gfrotated[self.SK.solver_to_sumk_block[0][block]][ind1,ind2]
|
gfreturn[block][ind1, ind2] << gfrotated[
|
||||||
|
self.SK.solver_to_sumk_block[0][block]][ind1, ind2]
|
||||||
|
|
||||||
return gfreturn
|
return gfreturn
|
||||||
|
|
||||||
|
|
||||||
def write_trans_file(self, filename):
|
def write_trans_file(self, filename):
|
||||||
"""
|
"""
|
||||||
Writes the new transformation T into a file readable by dmftproj. By that, the requested quantity is
|
Writes the new transformation T into a file readable by dmftproj. By that, the requested quantity is
|
||||||
|
@ -15,14 +15,17 @@ Please keep a copy of your old archive as this script is
|
|||||||
If you encounter any problem please report it on github!
|
If you encounter any problem please report it on github!
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def convert_shells(shells):
|
def convert_shells(shells):
|
||||||
shell_entries = ['atom', 'sort', 'l', 'dim']
|
shell_entries = ['atom', 'sort', 'l', 'dim']
|
||||||
return [{name: int(val) for name, val in zip(shell_entries, shells[ish])} for ish in range(len(shells))]
|
return [{name: int(val) for name, val in zip(shell_entries, shells[ish])} for ish in range(len(shells))]
|
||||||
|
|
||||||
|
|
||||||
def convert_corr_shells(corr_shells):
|
def convert_corr_shells(corr_shells):
|
||||||
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
corr_shell_entries = ['atom', 'sort', 'l', 'dim', 'SO', 'irep']
|
||||||
return [{name: int(val) for name, val in zip(corr_shell_entries, corr_shells[icrsh])} for icrsh in range(len(corr_shells))]
|
return [{name: int(val) for name, val in zip(corr_shell_entries, corr_shells[icrsh])} for icrsh in range(len(corr_shells))]
|
||||||
|
|
||||||
|
|
||||||
def det_shell_equivalence(corr_shells):
|
def det_shell_equivalence(corr_shells):
|
||||||
corr_to_inequiv = [0 for i in range(len(corr_shells))]
|
corr_to_inequiv = [0 for i in range(len(corr_shells))]
|
||||||
inequiv_to_corr = [0]
|
inequiv_to_corr = [0]
|
||||||
@ -61,7 +64,8 @@ old_to_new = {'SumK_LDA':'dft_input', 'SumK_LDA_ParProj':'dft_parproj_input',
|
|||||||
'SymmCorr': 'dft_symmcorr_input', 'SymmPar': 'dft_symmpar_input', 'SumK_LDA_Bands': 'dft_bands_input'}
|
'SymmCorr': 'dft_symmcorr_input', 'SymmPar': 'dft_symmpar_input', 'SumK_LDA_Bands': 'dft_bands_input'}
|
||||||
|
|
||||||
for old, new in old_to_new.iteritems():
|
for old, new in old_to_new.iteritems():
|
||||||
if old not in A.keys(): continue
|
if old not in A.keys():
|
||||||
|
continue
|
||||||
print "Changing %s to %s ..." % (old, new)
|
print "Changing %s to %s ..." % (old, new)
|
||||||
A.copy(old, new)
|
A.copy(old, new)
|
||||||
del(A[old])
|
del(A[old])
|
||||||
@ -70,7 +74,8 @@ for old, new in old_to_new.iteritems():
|
|||||||
move_to_output = ['chemical_potential', 'dc_imp', 'dc_energ']
|
move_to_output = ['chemical_potential', 'dc_imp', 'dc_energ']
|
||||||
for obj in move_to_output:
|
for obj in move_to_output:
|
||||||
if obj in A['dft_input'].keys():
|
if obj in A['dft_input'].keys():
|
||||||
if 'user_data' not in A: A.create_group('user_data')
|
if 'user_data' not in A:
|
||||||
|
A.create_group('user_data')
|
||||||
print "Moving %s to user_data ..." % obj
|
print "Moving %s to user_data ..." % obj
|
||||||
A.copy('dft_input/' + obj, 'user_data/' + obj)
|
A.copy('dft_input/' + obj, 'user_data/' + obj)
|
||||||
del(A['dft_input'][obj])
|
del(A['dft_input'][obj])
|
||||||
@ -104,8 +109,10 @@ if 'n_inequiv_shells' not in A['dft_input']:
|
|||||||
# Rename variables
|
# Rename variables
|
||||||
groups = ['dft_symmcorr_input', 'dft_symmpar_input']
|
groups = ['dft_symmcorr_input', 'dft_symmpar_input']
|
||||||
for group in groups:
|
for group in groups:
|
||||||
if group not in A.keys(): continue
|
if group not in A.keys():
|
||||||
if 'n_s' not in A[group]: continue
|
continue
|
||||||
|
if 'n_s' not in A[group]:
|
||||||
|
continue
|
||||||
print "Changing n_s to n_symm ..."
|
print "Changing n_s to n_symm ..."
|
||||||
A[group].move('n_s', 'n_symm')
|
A[group].move('n_s', 'n_symm')
|
||||||
# Convert orbits to list of dicts
|
# Convert orbits to list of dicts
|
||||||
@ -118,8 +125,10 @@ for group in groups:
|
|||||||
|
|
||||||
groups = ['dft_parproj_input']
|
groups = ['dft_parproj_input']
|
||||||
for group in groups:
|
for group in groups:
|
||||||
if group not in A.keys(): continue
|
if group not in A.keys():
|
||||||
if 'proj_mat_pc' not in A[group]: continue
|
continue
|
||||||
|
if 'proj_mat_pc' not in A[group]:
|
||||||
|
continue
|
||||||
print "Changing proj_mat_pc to proj_mat_all ..."
|
print "Changing proj_mat_pc to proj_mat_all ..."
|
||||||
A[group].move('proj_mat_pc', 'proj_mat_all')
|
A[group].move('proj_mat_pc', 'proj_mat_all')
|
||||||
|
|
||||||
|
@ -21,10 +21,10 @@
|
|||||||
|
|
||||||
version = "@DFT_TOOLS_VERSION@"
|
version = "@DFT_TOOLS_VERSION@"
|
||||||
triqs_hash = "@TRIQS_GIT_HASH@"
|
triqs_hash = "@TRIQS_GIT_HASH@"
|
||||||
cthyb_hash = "@CTHYB_GIT_HASH@"
|
dft_tools_hash = "@DFT_TOOLS_GIT_HASH@"
|
||||||
|
|
||||||
def show_version():
|
def show_version():
|
||||||
print "\nYou are using the dft_tools version %s\n"%version
|
print "\nYou are using the dft_tools version %s\n"%version
|
||||||
|
|
||||||
def show_git_hash():
|
def show_git_hash():
|
||||||
print "\nYou are using the dft_tools git hash %s based on triqs git hash %s\n"%(cthyb_hash, triqs_hash)
|
print "\nYou are using the dft_tools git hash %s based on triqs git hash %s\n"%(dft_tools_hash, triqs_hash)
|
||||||
|
@ -14,5 +14,8 @@ triqs_add_python_test(sumkdft_basic)
|
|||||||
triqs_add_python_test(srvo3_Gloc)
|
triqs_add_python_test(srvo3_Gloc)
|
||||||
triqs_add_python_test(srvo3_transp)
|
triqs_add_python_test(srvo3_transp)
|
||||||
triqs_add_python_test(sigma_from_file)
|
triqs_add_python_test(sigma_from_file)
|
||||||
|
triqs_add_python_test(blockstructure)
|
||||||
|
|
||||||
|
# VASP converter tests
|
||||||
add_subdirectory(plovasp)
|
add_subdirectory(plovasp)
|
||||||
|
|
||||||
|
BIN
test/blockstructure.in.h5
Normal file
BIN
test/blockstructure.in.h5
Normal file
Binary file not shown.
83
test/blockstructure.py
Normal file
83
test/blockstructure.py
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
from pytriqs.applications.dft.sumk_dft import *
|
||||||
|
from pytriqs.utility.h5diff import h5diff
|
||||||
|
from pytriqs.gf.local import *
|
||||||
|
from pytriqs.utility.comparison_tests import assert_block_gfs_are_close
|
||||||
|
from pytriqs.applications.dft import BlockStructure
|
||||||
|
|
||||||
|
SK = SumkDFT('blockstructure.in.h5',use_dft_blocks=True)
|
||||||
|
|
||||||
|
original_bs = SK.block_structure
|
||||||
|
|
||||||
|
# check pick_gf_struct_solver
|
||||||
|
pick1 = original_bs.copy()
|
||||||
|
pick1.pick_gf_struct_solver([{'up_0': [1], 'up_1': [0], 'down_1': [0]}])
|
||||||
|
|
||||||
|
# check loading a block_structure from file
|
||||||
|
SK.block_structure = SK.load(['block_structure'],'mod')[0]
|
||||||
|
assert SK.block_structure == pick1, 'loading SK block structure from file failed'
|
||||||
|
|
||||||
|
# check SumkDFT backward compatibility
|
||||||
|
sk_pick1 = BlockStructure(gf_struct_sumk = SK.gf_struct_sumk,
|
||||||
|
gf_struct_solver = SK.gf_struct_solver,
|
||||||
|
solver_to_sumk = SK.solver_to_sumk,
|
||||||
|
sumk_to_solver = SK.sumk_to_solver,
|
||||||
|
solver_to_sumk_block = SK.solver_to_sumk_block)
|
||||||
|
assert sk_pick1 == pick1, 'constructing block structure from SumkDFT properties failed'
|
||||||
|
|
||||||
|
# check pick_gf_struct_sumk
|
||||||
|
pick2 = original_bs.copy()
|
||||||
|
pick2.pick_gf_struct_sumk([{'up': [1, 2], 'down': [0,1]}])
|
||||||
|
|
||||||
|
# check map_gf_struct_solver
|
||||||
|
mapping = [{ ('down_0', 0):('down', 0),
|
||||||
|
('down_0', 1):('down', 2),
|
||||||
|
('down_1', 0):('down', 1),
|
||||||
|
('up_0', 0) :('down_1', 0),
|
||||||
|
('up_0', 1) :('up_0', 0) }]
|
||||||
|
map1 = original_bs.copy()
|
||||||
|
map1.map_gf_struct_solver(mapping)
|
||||||
|
|
||||||
|
# check create_gf
|
||||||
|
G1 = original_bs.create_gf(beta=40,n_points=3)
|
||||||
|
i = 1
|
||||||
|
for block,gf in G1:
|
||||||
|
gf << SemiCircular(i)
|
||||||
|
i+=1
|
||||||
|
|
||||||
|
# check approximate_as_diagonal
|
||||||
|
offd = original_bs.copy()
|
||||||
|
offd.approximate_as_diagonal()
|
||||||
|
|
||||||
|
# check map_gf_struct_solver
|
||||||
|
G2 = map1.convert_gf(G1,original_bs,beta=40,n_points=3,show_warnings=False)
|
||||||
|
|
||||||
|
# check full_structure
|
||||||
|
full = BlockStructure.full_structure([{'up_0': [0, 1], 'up_1': [0], 'down_1': [0], 'down_0': [0, 1]}],None)
|
||||||
|
|
||||||
|
# check __eq__
|
||||||
|
assert full==full, 'equality not correct (equal structures not equal)'
|
||||||
|
assert pick1==pick1, 'equality not correct (equal structures not equal)'
|
||||||
|
assert pick1!=pick2, 'equality not correct (different structures not different)'
|
||||||
|
assert original_bs!=offd, 'equality not correct (different structures not different)'
|
||||||
|
|
||||||
|
if mpi.is_master_node():
|
||||||
|
with HDFArchive('blockstructure.out.h5','w') as ar:
|
||||||
|
ar['original_bs'] = original_bs
|
||||||
|
ar['pick1'] = pick1
|
||||||
|
ar['pick2'] = pick2
|
||||||
|
ar['map1'] = map1
|
||||||
|
ar['offd'] = offd
|
||||||
|
ar['G1'] = G1
|
||||||
|
ar['G2'] = G2
|
||||||
|
ar['full'] = full
|
||||||
|
|
||||||
|
# cannot use h5diff because BlockStructure testing is not implemented
|
||||||
|
# there (and seems difficult to implement because it would mix triqs
|
||||||
|
# and dft_tools)
|
||||||
|
with HDFArchive('blockstructure.out.h5','r') as ar,\
|
||||||
|
HDFArchive('blockstructure.ref.h5','r') as ar2:
|
||||||
|
for k in ar2:
|
||||||
|
if isinstance(ar[k],BlockGf):
|
||||||
|
assert_block_gfs_are_close(ar[k],ar2[k],1.e-6)
|
||||||
|
else:
|
||||||
|
assert ar[k]==ar2[k], '{} not equal'.format(k)
|
BIN
test/blockstructure.ref.h5
Normal file
BIN
test/blockstructure.ref.h5
Normal file
Binary file not shown.
@ -51,5 +51,4 @@ SK.set_Sigma([Sigma_txt])
|
|||||||
SK.hdf_file = 'sigma_from_file.out.h5'
|
SK.hdf_file = 'sigma_from_file.out.h5'
|
||||||
SK.save(['Sigma_imp_w'])
|
SK.save(['Sigma_imp_w'])
|
||||||
|
|
||||||
if ((Sigma_txt - Sigma_hdf).real < 1e-6) & ((Sigma_txt - Sigma_hdf).imag < 1e-6):
|
assert_block_gfs_are_close(Sigma_txt, Sigma_hdf)
|
||||||
print 'Conversion: HDF -> TRIQS -> TXT -> TRIQS successful!'
|
|
||||||
|
Loading…
Reference in New Issue
Block a user