mirror of
https://github.com/triqs/dft_tools
synced 2024-11-07 06:33:48 +01:00
Merge tag '1.5'
Release 1.5
This commit is contained in:
commit
1bab92c721
45
.clang-format
Normal file
45
.clang-format
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
BasedOnStyle: LLVM
|
||||||
|
|
||||||
|
AccessModifierOffset: 0
|
||||||
|
AlignAfterOpenBracket: Align
|
||||||
|
AlignConsecutiveAssignments: true
|
||||||
|
AlignConsecutiveDeclarations: false
|
||||||
|
AlignEscapedNewlinesLeft: false
|
||||||
|
AlignOperands: false
|
||||||
|
AlignTrailingComments: true
|
||||||
|
AllowAllParametersOfDeclarationOnNextLine: false
|
||||||
|
AllowShortBlocksOnASingleLine: true
|
||||||
|
AllowShortCaseLabelsOnASingleLine: true
|
||||||
|
AllowShortFunctionsOnASingleLine: All
|
||||||
|
AllowShortIfStatementsOnASingleLine: true
|
||||||
|
AllowShortLoopsOnASingleLine: true
|
||||||
|
AlwaysBreakBeforeMultilineStrings: true
|
||||||
|
AlwaysBreakTemplateDeclarations: false
|
||||||
|
BinPackArguments: true
|
||||||
|
BinPackParameters: true
|
||||||
|
BreakBeforeBinaryOperators: NonAssignment
|
||||||
|
BreakBeforeBraces: Attach
|
||||||
|
BreakBeforeTernaryOperators: false
|
||||||
|
BreakConstructorInitializersBeforeComma: false
|
||||||
|
BreakStringLiterals: false
|
||||||
|
ColumnLimit: 150
|
||||||
|
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
||||||
|
ConstructorInitializerIndentWidth: 3
|
||||||
|
ContinuationIndentWidth: 3
|
||||||
|
Cpp11BracedListStyle: true
|
||||||
|
DerivePointerBinding : false
|
||||||
|
IndentCaseLabels: true
|
||||||
|
IndentWidth: 2
|
||||||
|
Language: Cpp
|
||||||
|
MaxEmptyLinesToKeep: 1
|
||||||
|
NamespaceIndentation : All
|
||||||
|
PointerAlignment: Right
|
||||||
|
ReflowComments: false
|
||||||
|
SortIncludes: false
|
||||||
|
SpaceAfterControlStatementKeyword: true
|
||||||
|
SpaceBeforeAssignmentOperators: true
|
||||||
|
SpaceInEmptyParentheses: false
|
||||||
|
SpacesInParentheses: false
|
||||||
|
Standard: Cpp11
|
||||||
|
TabWidth: 2
|
||||||
|
UseTab: Never
|
3
.dockerignore
Normal file
3
.dockerignore
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
.git
|
||||||
|
Dockerfile
|
||||||
|
Jenkinsfile
|
48
.travis.yml
Normal file
48
.travis.yml
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
|
||||||
|
language: cpp
|
||||||
|
sudo: required
|
||||||
|
dist: trusty
|
||||||
|
|
||||||
|
compiler:
|
||||||
|
- gcc
|
||||||
|
# - clang
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- sudo add-apt-repository 'deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-5.0 main' -y
|
||||||
|
- wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
|
||||||
|
- sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y
|
||||||
|
- sudo apt-get update
|
||||||
|
- sudo apt-get install -y --allow-unauthenticated g++-7 clang-5.0
|
||||||
|
- export LIBRARY_PATH=/usr/lib/llvm-5.0/lib:$LIBRARY_PATH
|
||||||
|
- sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 60 --slave /usr/bin/g++ g++ /usr/bin/g++-7
|
||||||
|
- sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-5.0 60 --slave /usr/bin/clang++ clang++ /usr/bin/clang++-5.0
|
||||||
|
- sudo apt-get install -y --allow-unauthenticated libboost-all-dev cmake git libgfortran3 gfortran openmpi-bin openmpi-common openmpi-doc libopenmpi-dev libblas-dev liblapack-dev libfftw3-dev libgmp-dev hdf5-tools libhdf5-serial-dev python-h5py python-dev python-numpy python-scipy python-jinja2 python-virtualenv python-matplotlib python-tornado python-zmq python-mpi4py python-mako clang-format-5.0 libclang-5.0-dev python-clang-5.0 python-sphinx libjs-mathjax valgrind libnfft3-dev
|
||||||
|
|
||||||
|
install: true
|
||||||
|
|
||||||
|
script:
|
||||||
|
# ===== Set up Cpp2Py
|
||||||
|
- git clone https://github.com/triqs/cpp2py
|
||||||
|
- mkdir cpp2py/build && cd cpp2py/build
|
||||||
|
- git checkout master
|
||||||
|
- cmake .. -DCMAKE_CXX_COMPILER=/usr/bin/${CXX} -DPYTHON_INTERPRETER=/usr/bin/python -DCMAKE_INSTALL_PREFIX=$TRAVIS_BUILD_DIR/root_install
|
||||||
|
- make -j8 install
|
||||||
|
- cd $TRAVIS_BUILD_DIR
|
||||||
|
- source root_install/share/cpp2pyvars.sh
|
||||||
|
# ===== Set up TRIQS
|
||||||
|
- git clone https://github.com/TRIQS/triqs --branch unstable
|
||||||
|
- mkdir triqs/build && cd triqs/build
|
||||||
|
- git checkout unstable
|
||||||
|
- cmake .. -DCMAKE_CXX_COMPILER=/usr/bin/${CXX} -DBuild_Tests=OFF -DCMAKE_INSTALL_PREFIX=$TRAVIS_BUILD_DIR/root_install -DCMAKE_BUILD_TYPE=Debug
|
||||||
|
- make -j8 install
|
||||||
|
- cd $TRAVIS_BUILD_DIR
|
||||||
|
- source root_install/share/triqsvars.sh
|
||||||
|
# ===== Set up dft_tools and Test using fsanitize=address
|
||||||
|
- mkdir build && cd build
|
||||||
|
- cmake .. -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=/usr/bin/${CXX} -DCMAKE_CXX_FLAGS='-fsanitize=address -fno-omit-frame-pointer -fuse-ld=gold'
|
||||||
|
- make -j8
|
||||||
|
- export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-5.0/bin/llvm-symbolizer
|
||||||
|
- export ASAN_OPTIONS=symbolize=1:detect_leaks=0
|
||||||
|
- export CTEST_OUTPUT_ON_FAILURE=1
|
||||||
|
- if [ "$CXX" = g++ ]; then export LD_PRELOAD=/usr/lib/gcc/x86_64-linux-gnu/7/libasan.so; elif [ "$CXX" = clang++ ]; then export LD_PRELOAD=/usr/lib/llvm-5.0/lib/clang/5.0.1/lib/linux/libclang_rt.asan-x86_64.so; fi
|
||||||
|
- cd test && ctest
|
@ -1,50 +1,83 @@
|
|||||||
# Version number of the application
|
# Version number of the application
|
||||||
set (DFT_TOOLS_VERSION "1.4")
|
set (DFT_TOOLS_VERSION "1.5")
|
||||||
set (DFT_TOOLS_RELEASE "1.4.0")
|
set (DFT_TOOLS_RELEASE "1.5.0")
|
||||||
|
|
||||||
# Append triqs installed files to the cmake load path
|
# Default to Release build type
|
||||||
list(APPEND CMAKE_MODULE_PATH ${TRIQS_PATH}/share/triqs/cmake)
|
if(NOT CMAKE_BUILD_TYPE)
|
||||||
|
set(CMAKE_BUILD_TYPE Release CACHE STRING "Type of build" FORCE)
|
||||||
# start configuration
|
|
||||||
cmake_minimum_required(VERSION 2.8)
|
|
||||||
project(dft_tools CXX Fortran)
|
|
||||||
set(CMAKE_BUILD_TYPE Release)
|
|
||||||
enable_testing()
|
|
||||||
|
|
||||||
# Load TRIQS, including all predefined variables from TRIQS installation
|
|
||||||
find_package(TRIQS REQUIRED)
|
|
||||||
|
|
||||||
# Check that versions are compatible
|
|
||||||
if(NOT DFT_TOOLS_VERSION EQUAL TRIQS_VERSION)
|
|
||||||
message(FATAL_ERROR "The application version is not compatible with the TRIQS library (TRIQS library version: ${TRIQS_VERSION} while this application version: ${DFT_TOOLS_VERSION})")
|
|
||||||
endif()
|
endif()
|
||||||
|
message( STATUS "-------- BUILD-TYPE: ${CMAKE_BUILD_TYPE} -------------")
|
||||||
|
|
||||||
|
# start configuration
|
||||||
|
cmake_minimum_required(VERSION 2.8)
|
||||||
|
project(dft_tools C CXX Fortran)
|
||||||
|
|
||||||
|
# Use shared libraries
|
||||||
|
set(BUILD_SHARED_LIBS ON)
|
||||||
|
|
||||||
|
# Load TRIQS and Cpp2Py
|
||||||
|
find_package(TRIQS 1.5 EXACT REQUIRED)
|
||||||
|
find_package(Cpp2Py REQUIRED)
|
||||||
|
|
||||||
if (NOT ${TRIQS_WITH_PYTHON_SUPPORT})
|
if (NOT ${TRIQS_WITH_PYTHON_SUPPORT})
|
||||||
MESSAGE(FATAL_ERROR "dft_tools require Python support in TRIQS")
|
MESSAGE(FATAL_ERROR "dft_tools require Python support in TRIQS")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Get hash
|
# Default Install directory to TRIQS_ROOT if not given. Checks an absolute name is given.
|
||||||
triqs_get_git_hash(${CMAKE_SOURCE_DIR} "DFT_TOOLS")
|
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT OR (NOT IS_ABSOLUTE ${CMAKE_INSTALL_PREFIX}))
|
||||||
if(${GIT_RESULT} EQUAL 0)
|
message(STATUS " No install prefix given (or invalid). Defaulting to TRIQS_ROOT")
|
||||||
message(STATUS "Hash: ${DFT_TOOLS_GIT_HASH}")
|
set(CMAKE_INSTALL_PREFIX ${TRIQS_ROOT} CACHE PATH "default install path" FORCE)
|
||||||
endif(${GIT_RESULT} EQUAL 0)
|
endif()
|
||||||
|
message(STATUS "-------- CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX} -------------")
|
||||||
|
|
||||||
# We want to be installed in the TRIQS tree
|
# Macro defined in TRIQS which picks the hash of repo.
|
||||||
set(CMAKE_INSTALL_PREFIX ${TRIQS_PATH})
|
triqs_get_git_hash_of_source_dir(DFT_TOOLS_GIT_HASH)
|
||||||
|
message(STATUS "Git hash: ${DFT_TOOLS_GIT_HASH}")
|
||||||
|
|
||||||
add_subdirectory(fortran/dmftproj)
|
add_subdirectory(fortran/dmftproj)
|
||||||
|
|
||||||
# Add the compiling options (-D... ) for C++
|
# Add the compiling options (-D... ) for C++
|
||||||
message(STATUS "TRIQS : Adding compilation flags detected by the library (C++11/14, libc++, etc...) ")
|
message(STATUS "TRIQS : Adding compilation flags detected by the library (C++11/14, libc++, etc...) ")
|
||||||
add_definitions(${TRIQS_CXX_DEFINITIONS})
|
|
||||||
|
|
||||||
add_subdirectory(c++)
|
add_subdirectory(c++)
|
||||||
add_subdirectory(python)
|
add_subdirectory(python)
|
||||||
add_subdirectory(shells)
|
add_subdirectory(shells)
|
||||||
add_subdirectory(test)
|
|
||||||
option(BUILD_DOC "Build documentation" OFF)
|
#------------------------
|
||||||
if(${BUILD_DOC})
|
# tests
|
||||||
if(NOT TRIQS_WITH_DOCUMENTATION)
|
#------------------------
|
||||||
|
|
||||||
|
enable_testing()
|
||||||
|
|
||||||
|
option(Build_Tests "Build the tests of the library " ON)
|
||||||
|
if (Build_Tests)
|
||||||
|
message(STATUS "-------- Preparing tests -------------")
|
||||||
|
add_subdirectory(test)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
#------------------------
|
||||||
|
# Documentation
|
||||||
|
#------------------------
|
||||||
|
option(Build_Documentation "Build documentation" OFF)
|
||||||
|
if(${Build_Documentation})
|
||||||
|
if(NOT ${TRIQS_WITH_DOCUMENTATION})
|
||||||
message("Error: TRIQS library has not been compiled with its documentation")
|
message("Error: TRIQS library has not been compiled with its documentation")
|
||||||
endif()
|
endif()
|
||||||
add_subdirectory(doc)
|
add_subdirectory(doc)
|
||||||
endif(${BUILD_DOC})
|
endif()
|
||||||
|
|
||||||
|
#--------------------------------------------------------
|
||||||
|
# Packaging
|
||||||
|
#--------------------------------------------------------
|
||||||
|
option(BUILD_DEBIAN_PACKAGE "Build a deb package" OFF)
|
||||||
|
if(BUILD_DEBIAN_PACKAGE)
|
||||||
|
if(NOT CMAKE_INSTALL_PREFIX STREQUAL "/usr")
|
||||||
|
message(FATAL_ERROR "CMAKE_INSTALL_PREFIX must be /usr for packaging")
|
||||||
|
endif()
|
||||||
|
SET(CPACK_GENERATOR "DEB")
|
||||||
|
SET(CPACK_PACKAGE_VERSION ${DFT_TOOLS_VERSION})
|
||||||
|
SET(CPACK_PACKAGE_CONTACT "https://github.com/TRIQS/dft_tools")
|
||||||
|
EXECUTE_PROCESS(COMMAND dpkg --print-architecture OUTPUT_VARIABLE CMAKE_DEBIAN_PACKAGE_ARCHITECTURE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
SET(CPACK_DEBIAN_PACKAGE_DEPENDS "libc6 (>= 2.23), libgcc1 (>= 1:6), libstdc++6, python, libpython2.7, libopenmpi1.10, libhdf5-10, libgmp10, libfftw3-double3, libibverbs1, libgfortran3, zlib1g, libsz2, libhwloc5, libquadmath0, libaec0, libnuma1, libltdl7, libblas3, liblapack3, python-numpy, python-h5py, python-jinja2, python-mako, python-mpi4py, python-matplotlib, python-scipy, cpp2py (= ${DFT_TOOLS_VERSION}), triqs (= ${DFT_TOOLS_VERSION})")
|
||||||
|
INCLUDE(CPack)
|
||||||
|
endif()
|
||||||
|
12
Dockerfile
Normal file
12
Dockerfile
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# See ../triqs/packaging for other options
|
||||||
|
FROM flatironinstitute/triqs:master-ubuntu-clang
|
||||||
|
|
||||||
|
ARG APPNAME=dft_tools
|
||||||
|
COPY . $SRC/$APPNAME
|
||||||
|
WORKDIR $BUILD/$APPNAME
|
||||||
|
RUN chown build .
|
||||||
|
USER build
|
||||||
|
ARG BUILD_DOC=0
|
||||||
|
RUN cmake $SRC/$APPNAME -DTRIQS_ROOT=${INSTALL} -DBuild_Documentation=${BUILD_DOC} && make -j2 && make test
|
||||||
|
USER root
|
||||||
|
RUN make install
|
140
Jenkinsfile
vendored
Normal file
140
Jenkinsfile
vendored
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
def projectName = "dft_tools"
|
||||||
|
def documentationPlatform = "ubuntu-clang"
|
||||||
|
def triqsBranch = env.CHANGE_TARGET ?: env.BRANCH_NAME
|
||||||
|
def triqsProject = '/TRIQS/triqs/' + triqsBranch.replaceAll('/', '%2F')
|
||||||
|
def publish = !env.BRANCH_NAME.startsWith("PR-")
|
||||||
|
|
||||||
|
properties([
|
||||||
|
disableConcurrentBuilds(),
|
||||||
|
buildDiscarder(logRotator(numToKeepStr: '10', daysToKeepStr: '30')),
|
||||||
|
pipelineTriggers([
|
||||||
|
upstream(
|
||||||
|
threshold: 'SUCCESS',
|
||||||
|
upstreamProjects: triqsProject
|
||||||
|
)
|
||||||
|
])
|
||||||
|
])
|
||||||
|
|
||||||
|
/* map of all builds to run, populated below */
|
||||||
|
def platforms = [:]
|
||||||
|
|
||||||
|
def dockerPlatforms = ["ubuntu-clang", "ubuntu-gcc", "centos-gcc"]
|
||||||
|
/* .each is currently broken in jenkins */
|
||||||
|
for (int i = 0; i < dockerPlatforms.size(); i++) {
|
||||||
|
def platform = dockerPlatforms[i]
|
||||||
|
platforms[platform] = { -> node('docker') {
|
||||||
|
stage(platform) { timeout(time: 1, unit: 'HOURS') {
|
||||||
|
checkout scm
|
||||||
|
/* construct a Dockerfile for this base */
|
||||||
|
sh """
|
||||||
|
( echo "FROM flatironinstitute/triqs:${triqsBranch}-${env.STAGE_NAME}" ; sed '0,/^FROM /d' Dockerfile ) > Dockerfile.jenkins
|
||||||
|
mv -f Dockerfile.jenkins Dockerfile
|
||||||
|
"""
|
||||||
|
/* build and tag */
|
||||||
|
def img = docker.build("flatironinstitute/${projectName}:${env.BRANCH_NAME}-${env.STAGE_NAME}", "--build-arg BUILD_DOC=${platform==documentationPlatform} .")
|
||||||
|
if (!publish || platform != documentationPlatform) {
|
||||||
|
/* but we don't need the tag so clean it up (except for documentation) */
|
||||||
|
sh "docker rmi --no-prune ${img.imageName()}"
|
||||||
|
}
|
||||||
|
} }
|
||||||
|
} }
|
||||||
|
}
|
||||||
|
|
||||||
|
def osxPlatforms = [
|
||||||
|
["gcc", ['CC=gcc-7', 'CXX=g++-7']],
|
||||||
|
["clang", ['CC=/usr/local/opt/llvm/bin/clang', 'CXX=/usr/local/opt/llvm/bin/clang++', 'CXXFLAGS=-I/usr/local/opt/llvm/include', 'LDFLAGS=-L/usr/local/opt/llvm/lib']]
|
||||||
|
]
|
||||||
|
for (int i = 0; i < osxPlatforms.size(); i++) {
|
||||||
|
def platformEnv = osxPlatforms[i]
|
||||||
|
def platform = platformEnv[0]
|
||||||
|
platforms["osx-$platform"] = { -> node('osx && triqs') {
|
||||||
|
stage("osx-$platform") { timeout(time: 1, unit: 'HOURS') {
|
||||||
|
def srcDir = pwd()
|
||||||
|
def tmpDir = pwd(tmp:true)
|
||||||
|
def buildDir = "$tmpDir/build"
|
||||||
|
def installDir = "$tmpDir/install"
|
||||||
|
def triqsDir = "${env.HOME}/install/triqs/${triqsBranch}/${platform}"
|
||||||
|
dir(installDir) {
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
checkout scm
|
||||||
|
dir(buildDir) { withEnv(platformEnv[1]+[
|
||||||
|
"PATH=$triqsDir/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin",
|
||||||
|
"CPATH=$triqsDir/include",
|
||||||
|
"LIBRARY_PATH=$triqsDir/lib",
|
||||||
|
"CMAKE_PREFIX_PATH=$triqsDir/share/cmake"]) {
|
||||||
|
deleteDir()
|
||||||
|
sh "cmake $srcDir -DCMAKE_INSTALL_PREFIX=$installDir -DTRIQS_ROOT=$triqsDir"
|
||||||
|
sh "make -j3"
|
||||||
|
try {
|
||||||
|
sh "make test"
|
||||||
|
} catch (exc) {
|
||||||
|
archiveArtifacts(artifacts: 'Testing/Temporary/LastTest.log')
|
||||||
|
throw exc
|
||||||
|
}
|
||||||
|
sh "make install"
|
||||||
|
} }
|
||||||
|
} }
|
||||||
|
} }
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
parallel platforms
|
||||||
|
if (publish) { node("docker") {
|
||||||
|
stage("publish") { timeout(time: 1, unit: 'HOURS') {
|
||||||
|
def commit = sh(returnStdout: true, script: "git rev-parse HEAD").trim()
|
||||||
|
def workDir = pwd()
|
||||||
|
dir("$workDir/gh-pages") {
|
||||||
|
def subdir = env.BRANCH_NAME
|
||||||
|
git(url: "ssh://git@github.com/TRIQS/${projectName}.git", branch: "gh-pages", credentialsId: "ssh", changelog: false)
|
||||||
|
sh "rm -rf ${subdir}"
|
||||||
|
docker.image("flatironinstitute/${projectName}:${env.BRANCH_NAME}-${documentationPlatform}").inside() {
|
||||||
|
sh "cp -rp \$INSTALL/share/doc/${projectName} ${subdir}"
|
||||||
|
}
|
||||||
|
sh "git add -A ${subdir}"
|
||||||
|
sh """
|
||||||
|
git commit --author='Flatiron Jenkins <jenkins@flatironinstitute.org>' --allow-empty -m 'Generated documentation for ${env.BRANCH_NAME}' -m '${env.BUILD_TAG} ${commit}'
|
||||||
|
"""
|
||||||
|
// note: credentials used above don't work (need JENKINS-28335)
|
||||||
|
sh "git push origin gh-pages"
|
||||||
|
}
|
||||||
|
dir("$workDir/docker") { try {
|
||||||
|
git(url: "ssh://git@github.com/TRIQS/docker.git", branch: env.BRANCH_NAME, credentialsId: "ssh", changelog: false)
|
||||||
|
sh "echo '160000 commit ${commit}\t${projectName}' | git update-index --index-info"
|
||||||
|
sh """
|
||||||
|
git commit --author='Flatiron Jenkins <jenkins@flatironinstitute.org>' --allow-empty -m 'Autoupdate ${projectName}' -m '${env.BUILD_TAG}'
|
||||||
|
"""
|
||||||
|
// note: credentials used above don't work (need JENKINS-28335)
|
||||||
|
sh "git push origin ${env.BRANCH_NAME}"
|
||||||
|
} catch (err) {
|
||||||
|
echo "Failed to update docker repo"
|
||||||
|
} }
|
||||||
|
} }
|
||||||
|
} }
|
||||||
|
} catch (err) {
|
||||||
|
if (env.BRANCH_NAME != "jenkins") emailext(
|
||||||
|
subject: "\$PROJECT_NAME - Build # \$BUILD_NUMBER - FAILED",
|
||||||
|
body: """\$PROJECT_NAME - Build # \$BUILD_NUMBER - FAILED
|
||||||
|
|
||||||
|
$err
|
||||||
|
|
||||||
|
Check console output at \$BUILD_URL to view full results.
|
||||||
|
|
||||||
|
Building \$BRANCH_NAME for \$CAUSE
|
||||||
|
\$JOB_DESCRIPTION
|
||||||
|
|
||||||
|
Chages:
|
||||||
|
\$CHANGES
|
||||||
|
|
||||||
|
End of build log:
|
||||||
|
\${BUILD_LOG,maxLines=60}
|
||||||
|
""",
|
||||||
|
to: 'mzingl@flatironinstitute.org, hstrand@flatironinstitute.org, nils.wentzell@gmail.com, dsimon@flatironinstitute.org',
|
||||||
|
recipientProviders: [
|
||||||
|
[$class: 'DevelopersRecipientProvider'],
|
||||||
|
],
|
||||||
|
replyTo: '$DEFAULT_REPLYTO'
|
||||||
|
)
|
||||||
|
throw err
|
||||||
|
}
|
@ -1,7 +1,6 @@
|
|||||||
# Linking and include info
|
|
||||||
add_library(atm_c dos_tetra3d.hpp dos_tetra3d.cpp argsort.hpp argsort.cpp)
|
add_library(atm_c dos_tetra3d.hpp dos_tetra3d.cpp argsort.hpp argsort.cpp)
|
||||||
set_target_properties(atm_c PROPERTIES LINKER_LANGUAGE CXX)
|
target_link_libraries(atm_c triqs)
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/c++/plovasp/atm ${TRIQS_INCLUDE_ALL})
|
target_compile_options(atm_c PRIVATE -std=c++17)
|
||||||
|
|
||||||
install(TARGETS atm_c DESTINATION lib)
|
install(TARGETS atm_c DESTINATION lib)
|
||||||
|
|
||||||
|
@ -1,24 +1,13 @@
|
|||||||
find_package(TriqsTest)
|
|
||||||
enable_testing()
|
enable_testing()
|
||||||
|
|
||||||
# Linking and include info
|
|
||||||
#add_library(atm_c dos_tetra3d.hpp dos_tetra3d.cpp argsort.h argsort.c)
|
|
||||||
#set_target_properties(atm_c PROPERTIES LINKER_LANGUAGE CXX)
|
|
||||||
#include_directories(${CMAKE_CURRENT_SOURCE_DIR}/c++/plovasp/atm ${TRIQS_INCLUDE_ALL})
|
|
||||||
|
|
||||||
FILE(GLOB TestList RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp)
|
FILE(GLOB TestList RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp)
|
||||||
FOREACH( TestName1 ${TestList} )
|
FOREACH( TestName1 ${TestList} )
|
||||||
STRING(REPLACE ".cpp" "" TestName ${TestName1})
|
STRING(REPLACE ".cpp" "" TestName ${TestName1})
|
||||||
add_executable( ${TestName} ${CMAKE_CURRENT_SOURCE_DIR}/${TestName}.cpp )
|
add_executable( ${TestName} ${TestName}.cpp )
|
||||||
target_link_libraries( ${TestName} atm_c ${TRIQS_LIBRARY_ALL} )
|
target_link_libraries( ${TestName} atm_c triqs)
|
||||||
triqs_set_rpath_for_target( ${TestName} )
|
triqs_set_rpath_for_target( ${TestName} )
|
||||||
triqs_add_cpp_test( ${TestName} )
|
add_test(NAME ${TestName} COMMAND ${CMAKE_CURRENT_BINARY_DIR}/${t})
|
||||||
if (TESTS_C_WITH_VALGRIND)
|
|
||||||
add_test ( ${TestName}_valgrind valgrind --error-exitcode=1 ${CMAKE_CURRENT_BINARY_DIR}/${TestName})
|
|
||||||
endif()
|
|
||||||
ENDFOREACH( TestName1 ${TestList} )
|
ENDFOREACH( TestName1 ${TestList} )
|
||||||
|
|
||||||
#add_executable(test_atm test2py.cpp)
|
|
||||||
#target_link_libraries(test_atm atm_c)
|
|
||||||
|
|
||||||
#add_subdirectory(test)
|
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
from pytriqs.operators.util import *
|
from pytriqs.operators.util import *
|
||||||
from pytriqs.archive import HDFArchive
|
from pytriqs.archive import HDFArchive
|
||||||
from pytriqs.applications.impurity_solvers.cthyb import *
|
from triqs_cthyb import *
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
from pytriqs.applications.dft.sumk_dft import *
|
from triqs_dft_tools.sumk_dft import *
|
||||||
from pytriqs.applications.dft.converters.wien2k_converter import *
|
from triqs_dft_tools.converters.wien2k_converter import *
|
||||||
|
|
||||||
dft_filename='Gd_fcc'
|
dft_filename='Gd_fcc'
|
||||||
U = 9.6
|
U = 9.6
|
||||||
@ -52,7 +52,7 @@ spin_names = ["up","down"]
|
|||||||
orb_names = [i for i in range(n_orb)]
|
orb_names = [i for i in range(n_orb)]
|
||||||
|
|
||||||
# Use GF structure determined by DFT blocks
|
# Use GF structure determined by DFT blocks
|
||||||
gf_struct = SK.gf_struct_solver[0]
|
gf_struct = [(block, indices) for block, indices in SK.gf_struct_solver[0].iteritems()]
|
||||||
# Construct U matrix for density-density calculations
|
# Construct U matrix for density-density calculations
|
||||||
Umat, Upmat = U_matrix_kanamori(n_orb=n_orb, U_int=U, J_hund=J)
|
Umat, Upmat = U_matrix_kanamori(n_orb=n_orb, U_int=U, J_hund=J)
|
||||||
# Construct Hamiltonian and solver
|
# Construct Hamiltonian and solver
|
||||||
|
@ -1,29 +1,23 @@
|
|||||||
# generate the conf.py
|
# generate the conf.py
|
||||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in ${CMAKE_CURRENT_BINARY_DIR}/conf.py)
|
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in ${CMAKE_CURRENT_BINARY_DIR}/conf.py @ONLY)
|
||||||
|
|
||||||
# all rst files of the documentation
|
# ---------------------------------
|
||||||
file(GLOB_RECURSE doc_sources *.rst)
|
# Top Sphinx target
|
||||||
|
# ---------------------------------
|
||||||
|
# Sources
|
||||||
|
file(GLOB_RECURSE sources *.rst)
|
||||||
|
|
||||||
# create documentation target
|
# create documentation target
|
||||||
set(sphinx_top ${CMAKE_CURRENT_BINARY_DIR}/html/index.html)
|
set(sphinx_top ${CMAKE_CURRENT_BINARY_DIR}/html/index.html)
|
||||||
add_custom_command(OUTPUT ${sphinx_top} DEPENDS ${doc_sources} py_copy
|
add_custom_command(OUTPUT ${sphinx_top} DEPENDS ${sources}
|
||||||
COMMAND ${CMAKE_BINARY_DIR}/build_pytriqs ${TRIQS_SPHINXBUILD_EXECUTABLE} -c . -b html ${CMAKE_CURRENT_SOURCE_DIR} html)
|
COMMAND ${TRIQS_SPHINXBUILD_EXECUTABLE} -c . -j8 -b html ${CMAKE_CURRENT_SOURCE_DIR} html)
|
||||||
add_custom_target(doc_sphinx ALL DEPENDS ${sphinx_top})
|
add_custom_target(doc_sphinx ALL DEPENDS ${sphinx_top} ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
# install
|
# ---------------------------------
|
||||||
|
# Install
|
||||||
|
# ---------------------------------
|
||||||
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html/ COMPONENT documentation DESTINATION share/doc/dft_tools
|
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html/ COMPONENT documentation DESTINATION share/doc/dft_tools
|
||||||
FILES_MATCHING
|
FILES_MATCHING
|
||||||
PATTERN "*.html"
|
REGEX "\\.(html|pdf|png|gif|jpg|js|xsl|css|py|txt|inv|bib)$"
|
||||||
PATTERN "*.png"
|
|
||||||
PATTERN "*.js"
|
|
||||||
PATTERN "_*"
|
PATTERN "_*"
|
||||||
PATTERN "*.jpg"
|
|
||||||
PATTERN "*.gif"
|
|
||||||
PATTERN "*.xsl"
|
|
||||||
PATTERN "*.css"
|
|
||||||
PATTERN "*.pdf"
|
|
||||||
PATTERN "*.py"
|
|
||||||
PATTERN "*.txt"
|
|
||||||
PATTERN "*.inv"
|
|
||||||
PATTERN "*.bib"
|
|
||||||
)
|
)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
.. module:: pytriqs.applications.dft
|
.. module:: triqs_dft_tools
|
||||||
|
|
||||||
.. _documentation:
|
.. _documentation:
|
||||||
|
|
||||||
|
@ -23,11 +23,11 @@ Loading modules
|
|||||||
|
|
||||||
First, we load the necessary modules::
|
First, we load the necessary modules::
|
||||||
|
|
||||||
from pytriqs.applications.dft.sumk_dft import *
|
from triqs_dft_tools.sumk_dft import *
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
from pytriqs.archive import HDFArchive
|
from pytriqs.archive import HDFArchive
|
||||||
from pytriqs.operators.util import *
|
from pytriqs.operators.util import *
|
||||||
from pytriqs.applications.impurity_solvers.cthyb import *
|
from triqs_cthyb import *
|
||||||
|
|
||||||
The last two lines load the modules for the construction of the
|
The last two lines load the modules for the construction of the
|
||||||
:ref:`CTHYB solver <triqscthyb:welcome>`.
|
:ref:`CTHYB solver <triqscthyb:welcome>`.
|
||||||
@ -56,7 +56,7 @@ Initializing the solver
|
|||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
We also have to specify the :ref:`CTHYB solver <triqscthyb:welcome>` related settings.
|
We also have to specify the :ref:`CTHYB solver <triqscthyb:welcome>` related settings.
|
||||||
We assume that the DMFT script for SrVO3 is executed on 16 cores. A sufficient set
|
We assume that the DMFT script for SrVO3 is executed on 16 cores. A sufficient set
|
||||||
of parameters for a first guess is::
|
of parameters for a first guess is::
|
||||||
|
|
||||||
p = {}
|
p = {}
|
||||||
@ -80,7 +80,7 @@ each material individually. A guide on how to set the tail fit parameters is giv
|
|||||||
|
|
||||||
|
|
||||||
The next step is to initialize the
|
The next step is to initialize the
|
||||||
:class:`solver class <pytriqs.applications.impurity_solvers.cthyb.Solver>`.
|
:class:`solver class <triqs_cthyb.Solver>`.
|
||||||
It consist of two parts:
|
It consist of two parts:
|
||||||
|
|
||||||
#. Calculating the multi-band interaction matrix, and constructing the
|
#. Calculating the multi-band interaction matrix, and constructing the
|
||||||
@ -94,7 +94,7 @@ The first step is done using methods of the :ref:`TRIQS <triqslibs:welcome>` lib
|
|||||||
spin_names = ["up","down"]
|
spin_names = ["up","down"]
|
||||||
orb_names = [i for i in range(n_orb)]
|
orb_names = [i for i in range(n_orb)]
|
||||||
# Use GF structure determined by DFT blocks:
|
# Use GF structure determined by DFT blocks:
|
||||||
gf_struct = SK.gf_struct_solver[0]
|
gf_struct = [(block, indices) for block, indices in SK.gf_struct_solver[0].iteritems()]
|
||||||
# Construct U matrix for density-density calculations:
|
# Construct U matrix for density-density calculations:
|
||||||
Umat, Upmat = U_matrix_kanamori(n_orb=n_orb, U_int=U, J_hund=J)
|
Umat, Upmat = U_matrix_kanamori(n_orb=n_orb, U_int=U, J_hund=J)
|
||||||
|
|
||||||
@ -102,7 +102,7 @@ We assumed here that we want to use an interaction matrix with
|
|||||||
Kanamori definitions of :math:`U` and :math:`J`.
|
Kanamori definitions of :math:`U` and :math:`J`.
|
||||||
|
|
||||||
Next, we construct the Hamiltonian and the solver::
|
Next, we construct the Hamiltonian and the solver::
|
||||||
|
|
||||||
h_int = h_int_density(spin_names, orb_names, map_operator_structure=SK.sumk_to_solver[0], U=Umat, Uprime=Upmat)
|
h_int = h_int_density(spin_names, orb_names, map_operator_structure=SK.sumk_to_solver[0], U=Umat, Uprime=Upmat)
|
||||||
S = Solver(beta=beta, gf_struct=gf_struct)
|
S = Solver(beta=beta, gf_struct=gf_struct)
|
||||||
|
|
||||||
@ -125,7 +125,7 @@ some additional refinements::
|
|||||||
|
|
||||||
for iteration_number in range(1,loops+1):
|
for iteration_number in range(1,loops+1):
|
||||||
if mpi.is_master_node(): print "Iteration = ", iteration_number
|
if mpi.is_master_node(): print "Iteration = ", iteration_number
|
||||||
|
|
||||||
SK.symm_deg_gf(S.Sigma_iw,orb=0) # symmetrizing Sigma
|
SK.symm_deg_gf(S.Sigma_iw,orb=0) # symmetrizing Sigma
|
||||||
SK.set_Sigma([ S.Sigma_iw ]) # put Sigma into the SumK class
|
SK.set_Sigma([ S.Sigma_iw ]) # put Sigma into the SumK class
|
||||||
chemical_potential = SK.calc_mu( precision = prec_mu ) # find the chemical potential for given density
|
chemical_potential = SK.calc_mu( precision = prec_mu ) # find the chemical potential for given density
|
||||||
@ -137,17 +137,17 @@ some additional refinements::
|
|||||||
dm = S.G_iw.density()
|
dm = S.G_iw.density()
|
||||||
SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type)
|
SK.calc_dc(dm, U_interact = U, J_hund = J, orb = 0, use_dc_formula = dc_type)
|
||||||
S.Sigma_iw << SK.dc_imp[0]['up'][0,0]
|
S.Sigma_iw << SK.dc_imp[0]['up'][0,0]
|
||||||
|
|
||||||
# Calculate new G0_iw to input into the solver:
|
# Calculate new G0_iw to input into the solver:
|
||||||
S.G0_iw << S.Sigma_iw + inverse(S.G_iw)
|
S.G0_iw << S.Sigma_iw + inverse(S.G_iw)
|
||||||
S.G0_iw << inverse(S.G0_iw)
|
S.G0_iw << inverse(S.G0_iw)
|
||||||
|
|
||||||
# Solve the impurity problem:
|
# Solve the impurity problem:
|
||||||
S.solve(h_int=h_int, **p)
|
S.solve(h_int=h_int, **p)
|
||||||
|
|
||||||
# Solved. Now do post-solution stuff:
|
# Solved. Now do post-solution stuff:
|
||||||
mpi.report("Total charge of impurity problem : %.6f"%S.G_iw.total_density())
|
mpi.report("Total charge of impurity problem : %.6f"%S.G_iw.total_density())
|
||||||
|
|
||||||
# Now mix Sigma and G with factor mix, if wanted:
|
# Now mix Sigma and G with factor mix, if wanted:
|
||||||
if (iteration_number>1 or previous_present):
|
if (iteration_number>1 or previous_present):
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
@ -158,7 +158,7 @@ some additional refinements::
|
|||||||
del ar
|
del ar
|
||||||
S.G_iw << mpi.bcast(S.G_iw)
|
S.G_iw << mpi.bcast(S.G_iw)
|
||||||
S.Sigma_iw << mpi.bcast(S.Sigma_iw)
|
S.Sigma_iw << mpi.bcast(S.Sigma_iw)
|
||||||
|
|
||||||
# Write the final Sigma and G to the hdf5 archive:
|
# Write the final Sigma and G to the hdf5 archive:
|
||||||
if mpi.is_master_node():
|
if mpi.is_master_node():
|
||||||
ar = HDFArchive(dft_filename+'.h5','a')
|
ar = HDFArchive(dft_filename+'.h5','a')
|
||||||
@ -188,8 +188,8 @@ to start with a lower statistics (less measurements), but then increase it at a
|
|||||||
point close to converged results (e.g. after a few initial iterations). This helps
|
point close to converged results (e.g. after a few initial iterations). This helps
|
||||||
to keep computational costs low during the first iterations.
|
to keep computational costs low during the first iterations.
|
||||||
|
|
||||||
Using the Kanamori Hamiltonian and the parameters above (but on 16 cores),
|
Using the Kanamori Hamiltonian and the parameters above (but on 16 cores),
|
||||||
your self energy after the **first iteration** should look like the
|
your self energy after the **first iteration** should look like the
|
||||||
self energy shown below.
|
self energy shown below.
|
||||||
|
|
||||||
.. image:: images_scripts/SrVO3_Sigma_iw_it1.png
|
.. image:: images_scripts/SrVO3_Sigma_iw_it1.png
|
||||||
@ -208,12 +208,12 @@ Therefore disabled the tail fitting first::
|
|||||||
p["perform_tail_fit"] = False
|
p["perform_tail_fit"] = False
|
||||||
|
|
||||||
and perform only one DMFT iteration. The resulting self energy can be tail fitted by hand::
|
and perform only one DMFT iteration. The resulting self energy can be tail fitted by hand::
|
||||||
|
|
||||||
Sigma_iw_fit = S.Sigma_iw.copy()
|
Sigma_iw_fit = S.Sigma_iw.copy()
|
||||||
Sigma_iw_fit << tail_fit(S.Sigma_iw, fit_max_moment = 4, fit_min_n = 40, fit_max_n = 160)[0]
|
Sigma_iw_fit << tail_fit(S.Sigma_iw, fit_max_moment = 4, fit_min_n = 40, fit_max_n = 160)[0]
|
||||||
|
|
||||||
Plot the self energy and adjust the tail fit parameters such that you obtain a
|
Plot the self energy and adjust the tail fit parameters such that you obtain a
|
||||||
proper fit. The :meth:`tail_fit function <pytriqs.gf.local.tools.tail_fit>` is part
|
proper fit. The :meth:`fit_tail function <pytriqs.gf.tools.tail_fit>` is part
|
||||||
of the :ref:`TRIQS <triqslibs:welcome>` library.
|
of the :ref:`TRIQS <triqslibs:welcome>` library.
|
||||||
|
|
||||||
For a self energy which is going to zero for :math:`i\omega \rightarrow 0` our suggestion is
|
For a self energy which is going to zero for :math:`i\omega \rightarrow 0` our suggestion is
|
||||||
|
@ -27,7 +27,7 @@ Initialisation
|
|||||||
All tools described below are collected in an extension of the :class:`SumkDFT <dft.sumk_dft.SumkDFT>` class and are
|
All tools described below are collected in an extension of the :class:`SumkDFT <dft.sumk_dft.SumkDFT>` class and are
|
||||||
loaded by importing the module :class:`SumkDFTTools <dft.sumk_dft_tools.SumkDFTTools>`::
|
loaded by importing the module :class:`SumkDFTTools <dft.sumk_dft_tools.SumkDFTTools>`::
|
||||||
|
|
||||||
from pytriqs.applications.dft.sumk_dft_tools import *
|
from triqs_dft_tools.sumk_dft_tools import *
|
||||||
|
|
||||||
The initialisation of the class is equivalent to that of the :class:`SumkDFT <dft.sumk_dft.SumkDFT>`
|
The initialisation of the class is equivalent to that of the :class:`SumkDFT <dft.sumk_dft.SumkDFT>`
|
||||||
class::
|
class::
|
||||||
@ -37,7 +37,7 @@ class::
|
|||||||
Note that all routines available in :class:`SumkDFT <dft.sumk_dft.SumkDFT>` are also available here.
|
Note that all routines available in :class:`SumkDFT <dft.sumk_dft.SumkDFT>` are also available here.
|
||||||
|
|
||||||
If required, we have to load and initialise the real frequency self energy. Most conveniently,
|
If required, we have to load and initialise the real frequency self energy. Most conveniently,
|
||||||
you have your self energy already stored as a real frequency :class:`BlockGf <pytriqs.gf.local.BlockGf>` object
|
you have your self energy already stored as a real frequency :class:`BlockGf <pytriqs.gf.BlockGf>` object
|
||||||
in a hdf5 file::
|
in a hdf5 file::
|
||||||
|
|
||||||
ar = HDFArchive('case.h5', 'a')
|
ar = HDFArchive('case.h5', 'a')
|
||||||
@ -45,10 +45,10 @@ in a hdf5 file::
|
|||||||
|
|
||||||
You may also have your self energy stored in text files. For this case the :ref:`TRIQS <triqslibs:welcome>` library offers
|
You may also have your self energy stored in text files. For this case the :ref:`TRIQS <triqslibs:welcome>` library offers
|
||||||
the function :meth:`read_gf_from_txt`, which is able to load the data from text files of one Greens function block
|
the function :meth:`read_gf_from_txt`, which is able to load the data from text files of one Greens function block
|
||||||
into a real frequency :class:`ReFreqGf <pytriqs.gf.local.ReFreqGf>` object. Loading each block separately and
|
into a real frequency :class:`ReFreqGf <pytriqs.gf.ReFreqGf>` object. Loading each block separately and
|
||||||
building up a :class:´BlockGf <pytriqs.gf.local.BlockGf>´ is done with::
|
building up a :class:´BlockGf <pytriqs.gf.BlockGf>´ is done with::
|
||||||
|
|
||||||
from pytriqs.gf.local.tools import *
|
from pytriqs.gf.tools import *
|
||||||
# get block names
|
# get block names
|
||||||
n_list = [n for n,nl in SK.gf_struct_solver[0].iteritems()]
|
n_list = [n for n,nl in SK.gf_struct_solver[0].iteritems()]
|
||||||
# load sigma for each block - in this example sigma is composed of 1x1 blocks
|
# load sigma for each block - in this example sigma is composed of 1x1 blocks
|
||||||
|
@ -107,7 +107,7 @@ Now we convert these files into an hdf5 file that can be used for the
|
|||||||
DMFT calculations. For this purpose we
|
DMFT calculations. For this purpose we
|
||||||
use the python module :class:`Wien2kConverter <dft.converters.wien2k_converter.Wien2kConverter>`. It is initialized as::
|
use the python module :class:`Wien2kConverter <dft.converters.wien2k_converter.Wien2kConverter>`. It is initialized as::
|
||||||
|
|
||||||
from pytriqs.applications.dft.converters.wien2k_converter import *
|
from triqs_dft_tools.converters.wien2k_converter import *
|
||||||
Converter = Wien2kConverter(filename = case)
|
Converter = Wien2kConverter(filename = case)
|
||||||
|
|
||||||
The only necessary parameter to this construction is the parameter `filename`.
|
The only necessary parameter to this construction is the parameter `filename`.
|
||||||
@ -338,7 +338,7 @@ matrix of the imaginary part, and then move on to the next :math:`\mathbf{k}`-po
|
|||||||
|
|
||||||
The converter itself is used as::
|
The converter itself is used as::
|
||||||
|
|
||||||
from pytriqs.applications.dft.converters.hk_converter import *
|
from triqs_dft_tools.converters.hk_converter import *
|
||||||
Converter = HkConverter(filename = hkinputfile)
|
Converter = HkConverter(filename = hkinputfile)
|
||||||
Converter.convert_dft_input()
|
Converter.convert_dft_input()
|
||||||
|
|
||||||
@ -372,7 +372,7 @@ as a placeholder for the actual prefix chosen by the user when creating the
|
|||||||
input for :program:`wannier90`.
|
input for :program:`wannier90`.
|
||||||
Once these two files are available, one can use the converter as follows::
|
Once these two files are available, one can use the converter as follows::
|
||||||
|
|
||||||
from pytriqs.applications.dft.converters import Wannier90Converter
|
from triqs_dft_tools.converters import Wannier90Converter
|
||||||
Converter = Wannier90Converter(seedname='seedname')
|
Converter = Wannier90Converter(seedname='seedname')
|
||||||
Converter.convert_dft_input()
|
Converter.convert_dft_input()
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ The first thing is the :class:`SumkDFT <dft.sumk_dft.SumkDFT>` class.
|
|||||||
It contains all basic routines that are necessary to perform a summation in k-space
|
It contains all basic routines that are necessary to perform a summation in k-space
|
||||||
to get the local quantities used in DMFT. It is initialized by::
|
to get the local quantities used in DMFT. It is initialized by::
|
||||||
|
|
||||||
from pytriqs.applications.dft.sumk_dft import *
|
from triqs_dft_tools.sumk_dft import *
|
||||||
SK = SumkDFT(hdf_file = filename + '.h5')
|
SK = SumkDFT(hdf_file = filename + '.h5')
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from pytriqs.applications.dft.sumk_dft import *
|
from triqs_dft_tools.sumk_dft import *
|
||||||
from pytriqs.applications.dft.converters.wien2k_converter import *
|
from triqs_dft_tools.converters.wien2k_converter import *
|
||||||
from pytriqs.applications.impurity_solvers.hubbard_I.hubbard_solver import Solver
|
from pytriqs.applications.impurity_solvers.hubbard_I.hubbard_solver import Solver
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from pytriqs.applications.dft.sumk_dft_tools import *
|
from triqs_dft_tools.sumk_dft_tools import *
|
||||||
from pytriqs.applications.dft.converters.wien2k_converter import *
|
from triqs_dft_tools.converters.wien2k_converter import *
|
||||||
from pytriqs.applications.impurity_solvers.hubbard_I.hubbard_solver import Solver
|
from pytriqs.applications.impurity_solvers.hubbard_I.hubbard_solver import Solver
|
||||||
|
|
||||||
# Creates the data directory, cd into it:
|
# Creates the data directory, cd into it:
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
from pytriqs.operators.util import *
|
from pytriqs.operators.util import *
|
||||||
from pytriqs.archive import HDFArchive
|
from pytriqs.archive import HDFArchive
|
||||||
from pytriqs.applications.impurity_solvers.cthyb import *
|
from triqs_cthyb import *
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
from pytriqs.applications.dft.sumk_dft import *
|
from triqs_dft_tools.sumk_dft import *
|
||||||
|
|
||||||
dft_filename='SrVO3'
|
dft_filename='SrVO3'
|
||||||
U = 4.0
|
U = 4.0
|
||||||
@ -30,7 +30,7 @@ p["fit_min_n"] = 30
|
|||||||
p["fit_max_n"] = 60
|
p["fit_max_n"] = 60
|
||||||
|
|
||||||
# If conversion step was not done, we could do it here. Uncomment the lines it you want to do this.
|
# If conversion step was not done, we could do it here. Uncomment the lines it you want to do this.
|
||||||
#from pytriqs.applications.dft.converters.wien2k_converter import *
|
#from triqs_dft_tools.converters.wien2k_converter import *
|
||||||
#Converter = Wien2kConverter(filename=dft_filename, repacking=True)
|
#Converter = Wien2kConverter(filename=dft_filename, repacking=True)
|
||||||
#Converter.convert_dft_input()
|
#Converter.convert_dft_input()
|
||||||
#mpi.barrier()
|
#mpi.barrier()
|
||||||
@ -58,7 +58,7 @@ spin_names = ["up","down"]
|
|||||||
orb_names = [i for i in range(n_orb)]
|
orb_names = [i for i in range(n_orb)]
|
||||||
|
|
||||||
# Use GF structure determined by DFT blocks
|
# Use GF structure determined by DFT blocks
|
||||||
gf_struct = SK.gf_struct_solver[0]
|
gf_struct = [(block, indices) for block, indices in SK.gf_struct_solver[0].iteritems()]
|
||||||
|
|
||||||
# Construct U matrix for density-density calculations
|
# Construct U matrix for density-density calculations
|
||||||
Umat, Upmat = U_matrix_kanamori(n_orb=n_orb, U_int=U, J_hund=J)
|
Umat, Upmat = U_matrix_kanamori(n_orb=n_orb, U_int=U, J_hund=J)
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
from pytriqs.operators.util import *
|
from pytriqs.operators.util import *
|
||||||
from pytriqs.archive import HDFArchive
|
from pytriqs.archive import HDFArchive
|
||||||
from pytriqs.applications.impurity_solvers.cthyb import *
|
from triqs_cthyb import *
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
from pytriqs.applications.dft.sumk_dft import *
|
from triqs_dft_tools.sumk_dft import *
|
||||||
from pytriqs.applications.dft.converters.wien2k_converter import *
|
from triqs_dft_tools.converters.wien2k_converter import *
|
||||||
|
|
||||||
dft_filename='SrVO3'
|
dft_filename='SrVO3'
|
||||||
U = 9.6
|
U = 9.6
|
||||||
@ -31,7 +31,7 @@ p["fit_min_n"] = 30
|
|||||||
p["fit_max_n"] = 60
|
p["fit_max_n"] = 60
|
||||||
|
|
||||||
# If conversion step was not done, we could do it here. Uncomment the lines it you want to do this.
|
# If conversion step was not done, we could do it here. Uncomment the lines it you want to do this.
|
||||||
#from pytriqs.applications.dft.converters.wien2k_converter import *
|
#from triqs_dft_tools.converters.wien2k_converter import *
|
||||||
#Converter = Wien2kConverter(filename=dft_filename, repacking=True)
|
#Converter = Wien2kConverter(filename=dft_filename, repacking=True)
|
||||||
#Converter.convert_dft_input()
|
#Converter.convert_dft_input()
|
||||||
#mpi.barrier()
|
#mpi.barrier()
|
||||||
@ -59,7 +59,7 @@ spin_names = ["up","down"]
|
|||||||
orb_names = [i for i in range(n_orb)]
|
orb_names = [i for i in range(n_orb)]
|
||||||
|
|
||||||
# Use GF structure determined by DFT blocks
|
# Use GF structure determined by DFT blocks
|
||||||
gf_struct = SK.gf_struct_solver[0]
|
gf_struct = [(block, indices) for block, indices in SK.gf_struct_solver[0].iteritems()]
|
||||||
|
|
||||||
# Construct Slater U matrix
|
# Construct Slater U matrix
|
||||||
Umat = U_matrix(n_orb=n_orb, U_int=U, J_hund=J, basis='cubic',)
|
Umat = U_matrix(n_orb=n_orb, U_int=U, J_hund=J, basis='cubic',)
|
||||||
|
@ -84,8 +84,8 @@ Using the transport code
|
|||||||
|
|
||||||
First we have to read the Wien2k files and store the relevant information in the hdf5 archive::
|
First we have to read the Wien2k files and store the relevant information in the hdf5 archive::
|
||||||
|
|
||||||
from pytriqs.applications.dft.converters.wien2k_converter import *
|
from triqs_dft_tools.converters.wien2k_converter import *
|
||||||
from pytriqs.applications.dft.sumk_dft_tools import *
|
from triqs_dft_tools.sumk_dft_tools import *
|
||||||
|
|
||||||
Converter = Wien2kConverter(filename='case', repacking=True)
|
Converter = Wien2kConverter(filename='case', repacking=True)
|
||||||
Converter.convert_transport_input()
|
Converter.convert_transport_input()
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
.. index:: DFTTools
|
.. index:: DFTTools
|
||||||
|
|
||||||
.. module:: pytriqs.applications.dft
|
.. module:: triqs_dft_tools
|
||||||
|
|
||||||
.. _dft:
|
.. _dft:
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ Finally, you will have to change the calls to :program:`python_with_DMFT` to
|
|||||||
|
|
||||||
|
|
||||||
Version compatibility
|
Version compatibility
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
Be careful that the version of the TRIQS library and of the dft tools must be
|
Be careful that the version of the TRIQS library and of the dft tools must be
|
||||||
compatible (more information on the :ref:`TRIQS website <triqslibs:welcome>`.
|
compatible (more information on the :ref:`TRIQS website <triqslibs:welcome>`.
|
||||||
@ -97,3 +97,18 @@ Checkout the version of the code that you want, for instance::
|
|||||||
$ git co 1.2
|
$ git co 1.2
|
||||||
|
|
||||||
Then follow the steps 2 to 5 described above to compile the code.
|
Then follow the steps 2 to 5 described above to compile the code.
|
||||||
|
|
||||||
|
Custom CMake options
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
Functionality of ``dft_tools`` can be tweaked using extra compile-time options passed to CMake::
|
||||||
|
|
||||||
|
cmake -DOPTION1=value1 -DOPTION2=value2 ... ../cthyb.src
|
||||||
|
|
||||||
|
+---------------------------------------------------------------+-----------------------------------------------+
|
||||||
|
| Options | Syntax |
|
||||||
|
+===============================================================+===============================================+
|
||||||
|
| Disable testing (not recommended) | -DBuild_Tests=OFF |
|
||||||
|
+---------------------------------------------------------------+-----------------------------------------------+
|
||||||
|
| Build the documentation locally | -DBuild_Documentation=ON |
|
||||||
|
+---------------------------------------------------------------+-----------------------------------------------+
|
||||||
|
@ -6,7 +6,8 @@ set(SOURCES modules.f dmftproj.f readcomline.f set_ang_trans.f setsym.f
|
|||||||
|
|
||||||
# The main target and what to link with...
|
# The main target and what to link with...
|
||||||
add_executable(dmftproj ${SOURCES})
|
add_executable(dmftproj ${SOURCES})
|
||||||
target_link_libraries(dmftproj ${TRIQS_LIBRARY_LAPACK})
|
find_package(LAPACK)
|
||||||
|
target_link_libraries(dmftproj ${LAPACK_LIBRARIES})
|
||||||
|
|
||||||
# where to install
|
# where to install
|
||||||
install (TARGETS dmftproj DESTINATION bin)
|
install (TARGETS dmftproj DESTINATION bin)
|
||||||
|
@ -1,16 +1,19 @@
|
|||||||
# where will the python end up in triqs?
|
# where will the python end up in triqs?
|
||||||
set(python_destination pytriqs/applications/dft)
|
set(PYTHON_LIB_DEST ${CPP2PY_PYTHON_LIB_DEST_ROOT}/triqs_dft_tools)
|
||||||
|
|
||||||
# site_customize for build
|
# site_customize for build
|
||||||
set(package_name "pytriqs.applications")
|
set(package_name "triqs_dft_tools")
|
||||||
configure_file(${CMAKE_SOURCE_DIR}/cmake/sitecustomize.py ${CMAKE_CURRENT_BINARY_DIR}/sitecustomize.py @ONLY)
|
|
||||||
|
|
||||||
# make a local pytriqs copy
|
# Create a temporary copy of the python modules so that we can run before installation with the test
|
||||||
triqs_prepare_local_pytriqs(${python_destination})
|
FILE(GLOB PYTHON_SOURCES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.py )
|
||||||
|
foreach(f ${PYTHON_SOURCES})
|
||||||
# VASP converter
|
configure_file(${f} ${f} COPYONLY)
|
||||||
add_subdirectory(converters/plovasp)
|
endforeach()
|
||||||
|
|
||||||
# add version file
|
# add version file
|
||||||
configure_file(version.py.in version.py)
|
configure_file(version.py.in version.py @ONLY)
|
||||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/version.py DESTINATION ${TRIQS_PYTHON_LIB_DEST_ROOT}/${python_destination})
|
|
||||||
|
# install files
|
||||||
|
install(FILES ${PYTHON_SOURCES} ${CMAKE_CURRENT_BINARY_DIR}/version.py DESTINATION ${PYTHON_LIB_DEST})
|
||||||
|
|
||||||
|
add_subdirectory(converters)
|
||||||
|
@ -1,13 +1,14 @@
|
|||||||
import copy
|
import copy
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.gf.local import GfImFreq, BlockGf
|
from pytriqs.gf import GfImFreq, BlockGf
|
||||||
from ast import literal_eval
|
from ast import literal_eval
|
||||||
|
import pytriqs.utility.mpi as mpi
|
||||||
from warnings import warn
|
from warnings import warn
|
||||||
|
|
||||||
class BlockStructure(object):
|
class BlockStructure(object):
|
||||||
""" Contains information about the Green function structure.
|
""" Contains information about the Green function structure.
|
||||||
|
|
||||||
This class contains information about the structure of the solver
|
This class contains information about the structure of the solver
|
||||||
and sumk Green functions and the mapping between them.
|
and sumk Green functions and the mapping between them.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@ -33,19 +34,21 @@ class BlockStructure(object):
|
|||||||
solver_to_sumk_block : list of dict
|
solver_to_sumk_block : list of dict
|
||||||
solver_to_sumk_block[ish][from_block] = to_block
|
solver_to_sumk_block[ish][from_block] = to_block
|
||||||
|
|
||||||
maps from the solver block to the sumk block
|
maps from the solver block to the sumk block
|
||||||
for *inequivalent* correlated shell ish
|
for *inequivalent* correlated shell ish
|
||||||
"""
|
"""
|
||||||
def __init__(self,gf_struct_sumk=None,
|
def __init__(self,gf_struct_sumk=None,
|
||||||
gf_struct_solver=None,
|
gf_struct_solver=None,
|
||||||
solver_to_sumk=None,
|
solver_to_sumk=None,
|
||||||
sumk_to_solver=None,
|
sumk_to_solver=None,
|
||||||
solver_to_sumk_block=None):
|
solver_to_sumk_block=None,
|
||||||
|
deg_shells=None):
|
||||||
self.gf_struct_sumk = gf_struct_sumk
|
self.gf_struct_sumk = gf_struct_sumk
|
||||||
self.gf_struct_solver = gf_struct_solver
|
self.gf_struct_solver = gf_struct_solver
|
||||||
self.solver_to_sumk = solver_to_sumk
|
self.solver_to_sumk = solver_to_sumk
|
||||||
self.sumk_to_solver = sumk_to_solver
|
self.sumk_to_solver = sumk_to_solver
|
||||||
self.solver_to_sumk_block = solver_to_sumk_block
|
self.solver_to_sumk_block = solver_to_sumk_block
|
||||||
|
self.deg_shells = deg_shells
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def full_structure(cls,gf_struct,corr_to_inequiv):
|
def full_structure(cls,gf_struct,corr_to_inequiv):
|
||||||
@ -99,20 +102,21 @@ class BlockStructure(object):
|
|||||||
gf_struct_sumk = gs_sumk_all,
|
gf_struct_sumk = gs_sumk_all,
|
||||||
solver_to_sumk = copy.deepcopy(solver_to_sumk),
|
solver_to_sumk = copy.deepcopy(solver_to_sumk),
|
||||||
sumk_to_solver = solver_to_sumk,
|
sumk_to_solver = solver_to_sumk,
|
||||||
solver_to_sumk_block = s2sblock)
|
solver_to_sumk_block = s2sblock,
|
||||||
|
deg_shells = [[] for ish in range(len(gf_struct))])
|
||||||
|
|
||||||
def pick_gf_struct_solver(self,new_gf_struct):
|
def pick_gf_struct_solver(self,new_gf_struct):
|
||||||
""" Pick selected orbitals within blocks.
|
""" Pick selected orbitals within blocks.
|
||||||
|
|
||||||
This throws away parts of the Green's function that (for some
|
This throws away parts of the Green's function that (for some
|
||||||
reason - be sure that you know what you're doing) shouldn't be
|
reason - be sure that you know what you're doing) shouldn't be
|
||||||
included in the calculation.
|
included in the calculation.
|
||||||
|
|
||||||
To drop an entire block, just don't include it.
|
To drop an entire block, just don't include it.
|
||||||
To drop a certain index within a block, just don't include it.
|
To drop a certain index within a block, just don't include it.
|
||||||
|
|
||||||
If it was before:
|
If it was before:
|
||||||
|
|
||||||
[{'up':[0,1],'down':[0,1],'left':[0,1]}]
|
[{'up':[0,1],'down':[0,1],'left':[0,1]}]
|
||||||
|
|
||||||
to choose the 0th index of the up block and the 1st index of
|
to choose the 0th index of the up block and the 1st index of
|
||||||
@ -130,11 +134,11 @@ class BlockStructure(object):
|
|||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
new_gf_struct : list of dict
|
new_gf_struct : list of dict
|
||||||
formatted the same as gf_struct_solver:
|
formatted the same as gf_struct_solver:
|
||||||
|
|
||||||
new_gf_struct[ish][block]=list of indices in that block.
|
new_gf_struct[ish][block]=list of indices in that block.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
for ish in range(len(self.gf_struct_solver)):
|
for ish in range(len(self.gf_struct_solver)):
|
||||||
gf_struct = new_gf_struct[ish]
|
gf_struct = new_gf_struct[ish]
|
||||||
|
|
||||||
@ -154,24 +158,24 @@ class BlockStructure(object):
|
|||||||
new_ind = gf_struct[blk].index(ind)
|
new_ind = gf_struct[blk].index(ind)
|
||||||
self.sumk_to_solver[ish][k]=(blk,new_ind)
|
self.sumk_to_solver[ish][k]=(blk,new_ind)
|
||||||
else:
|
else:
|
||||||
self.sumk_to_solver[ish][k]=(None,None)
|
self.sumk_to_solver[ish][k]=(None,None)
|
||||||
# reindexing gf_struct so that it starts with 0
|
# reindexing gf_struct so that it starts with 0
|
||||||
for k in gf_struct:
|
for k in gf_struct:
|
||||||
gf_struct[k]=range(len(gf_struct[k]))
|
gf_struct[k]=range(len(gf_struct[k]))
|
||||||
self.gf_struct_solver[ish]=gf_struct
|
self.gf_struct_solver[ish]=gf_struct
|
||||||
|
|
||||||
def pick_gf_struct_sumk(self,new_gf_struct):
|
def pick_gf_struct_sumk(self,new_gf_struct):
|
||||||
""" Pick selected orbitals within blocks.
|
""" Pick selected orbitals within blocks.
|
||||||
|
|
||||||
This throws away parts of the Green's function that (for some
|
This throws away parts of the Green's function that (for some
|
||||||
reason - be sure that you know what you're doing) shouldn't be
|
reason - be sure that you know what you're doing) shouldn't be
|
||||||
included in the calculation.
|
included in the calculation.
|
||||||
|
|
||||||
To drop an entire block, just don't include it.
|
To drop an entire block, just don't include it.
|
||||||
To drop a certain index within a block, just don't include it.
|
To drop a certain index within a block, just don't include it.
|
||||||
|
|
||||||
If it was before:
|
If it was before:
|
||||||
|
|
||||||
[{'up':[0,1],'down':[0,1],'left':[0,1]}]
|
[{'up':[0,1],'down':[0,1],'left':[0,1]}]
|
||||||
|
|
||||||
to choose the 0th index of the up block and the 1st index of
|
to choose the 0th index of the up block and the 1st index of
|
||||||
@ -188,11 +192,11 @@ class BlockStructure(object):
|
|||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
new_gf_struct : list of dict
|
new_gf_struct : list of dict
|
||||||
formatted the same as gf_struct_solver:
|
formatted the same as gf_struct_solver:
|
||||||
|
|
||||||
new_gf_struct[ish][block]=list of indices in that block.
|
new_gf_struct[ish][block]=list of indices in that block.
|
||||||
|
|
||||||
However, the indices are not according to the solver Gf
|
However, the indices are not according to the solver Gf
|
||||||
but the sumk Gf.
|
but the sumk Gf.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -218,7 +222,7 @@ class BlockStructure(object):
|
|||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
mapping : list of dict
|
mapping : list of dict
|
||||||
the dict consists of elements
|
the dict consists of elements
|
||||||
(from_block,from_index) : (to_block,to_index)
|
(from_block,from_index) : (to_block,to_index)
|
||||||
that maps from one structure to the other
|
that maps from one structure to the other
|
||||||
"""
|
"""
|
||||||
@ -254,7 +258,7 @@ class BlockStructure(object):
|
|||||||
def create_gf(self,ish=0,gf_function=GfImFreq,**kwargs):
|
def create_gf(self,ish=0,gf_function=GfImFreq,**kwargs):
|
||||||
""" Create a zero BlockGf having the gf_struct_solver structure.
|
""" Create a zero BlockGf having the gf_struct_solver structure.
|
||||||
|
|
||||||
When using GfImFreq as gf_function, typically you have to
|
When using GfImFreq as gf_function, typically you have to
|
||||||
supply beta as keyword argument.
|
supply beta as keyword argument.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@ -284,7 +288,7 @@ class BlockStructure(object):
|
|||||||
.. warning::
|
.. warning::
|
||||||
|
|
||||||
Elements that are zero in the new structure due to
|
Elements that are zero in the new structure due to
|
||||||
the new block structure will be just ignored, thus
|
the new block structure will be just ignored, thus
|
||||||
approximated to zero.
|
approximated to zero.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@ -292,15 +296,24 @@ class BlockStructure(object):
|
|||||||
G : BlockGf
|
G : BlockGf
|
||||||
the Gf that should be converted
|
the Gf that should be converted
|
||||||
G_struct : GfStructure
|
G_struct : GfStructure
|
||||||
the structure ofthat G
|
the structure of that G
|
||||||
ish : int
|
ish : int
|
||||||
shell index
|
shell index
|
||||||
show_warnings : bool
|
show_warnings : bool or float
|
||||||
whether to show warnings when elements of the Green's
|
whether to show warnings when elements of the Green's
|
||||||
function get thrown away
|
function get thrown away
|
||||||
|
if float, set the threshold for the magnitude of an element
|
||||||
|
about to be thrown away to trigger a warning
|
||||||
|
(default: 1.e-10)
|
||||||
**kwargs :
|
**kwargs :
|
||||||
options passed to the constructor for the new Gf
|
options passed to the constructor for the new Gf
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
warning_threshold = 1.e-10
|
||||||
|
if isinstance(show_warnings, float):
|
||||||
|
warning_threshold = show_warnings
|
||||||
|
show_warnings = True
|
||||||
|
|
||||||
G_new = self.create_gf(ish=ish,**kwargs)
|
G_new = self.create_gf(ish=ish,**kwargs)
|
||||||
for block in G_struct.gf_struct_solver[ish].keys():
|
for block in G_struct.gf_struct_solver[ish].keys():
|
||||||
for i1 in G_struct.gf_struct_solver[ish][block]:
|
for i1 in G_struct.gf_struct_solver[ish][block]:
|
||||||
@ -311,22 +324,24 @@ class BlockStructure(object):
|
|||||||
i2_sol = self.sumk_to_solver[ish][i2_sumk]
|
i2_sol = self.sumk_to_solver[ish][i2_sumk]
|
||||||
if i1_sol[0] is None or i2_sol[0] is None:
|
if i1_sol[0] is None or i2_sol[0] is None:
|
||||||
if show_warnings:
|
if show_warnings:
|
||||||
warn(('Element {},{} of block {} of G is not present '+
|
if mpi.is_master_node():
|
||||||
'in the new structure').format(i1,i2,block))
|
warn(('Element {},{} of block {} of G is not present '+
|
||||||
|
'in the new structure').format(i1,i2,block))
|
||||||
continue
|
continue
|
||||||
if i1_sol[0]!=i2_sol[0]:
|
if i1_sol[0]!=i2_sol[0]:
|
||||||
if show_warnings:
|
if show_warnings and np.max(np.abs(G[block][i1,i2].data)) > warning_threshold:
|
||||||
warn(('Element {},{} of block {} of G is approximated '+
|
if mpi.is_master_node():
|
||||||
'to zero to match the new structure.').format(
|
warn(('Element {},{} of block {} of G is approximated '+
|
||||||
i1,i2,block))
|
'to zero to match the new structure. Max abs value: {}').format(
|
||||||
|
i1,i2,block,np.max(np.abs(G[block][i1,i2].data))))
|
||||||
continue
|
continue
|
||||||
G_new[i1_sol[0]][i1_sol[1],i2_sol[1]] = \
|
G_new[i1_sol[0]][i1_sol[1],i2_sol[1]] = \
|
||||||
G[block][i1,i2]
|
G[block][i1,i2]
|
||||||
return G_new
|
return G_new
|
||||||
|
|
||||||
def approximate_as_diagonal(self):
|
def approximate_as_diagonal(self):
|
||||||
""" Create a structure for a GF with zero off-diagonal elements.
|
""" Create a structure for a GF with zero off-diagonal elements.
|
||||||
|
|
||||||
.. warning::
|
.. warning::
|
||||||
|
|
||||||
In general, this will throw away non-zero elements of the
|
In general, this will throw away non-zero elements of the
|
||||||
@ -351,7 +366,8 @@ class BlockStructure(object):
|
|||||||
def __eq__(self,other):
|
def __eq__(self,other):
|
||||||
def compare(one,two):
|
def compare(one,two):
|
||||||
if type(one)!=type(two):
|
if type(one)!=type(two):
|
||||||
return False
|
if not (isinstance(one, (bool, np.bool_)) and isinstance(two, (bool, np.bool_))):
|
||||||
|
return False
|
||||||
if one is None and two is None:
|
if one is None and two is None:
|
||||||
return True
|
return True
|
||||||
if isinstance(one,list) or isinstance(one,tuple):
|
if isinstance(one,list) or isinstance(one,tuple):
|
||||||
@ -361,10 +377,10 @@ class BlockStructure(object):
|
|||||||
if not compare(x,y):
|
if not compare(x,y):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
elif isinstance(one,int):
|
elif isinstance(one,(int,bool, str, np.bool_)):
|
||||||
return one==two
|
|
||||||
elif isinstance(one,str):
|
|
||||||
return one==two
|
return one==two
|
||||||
|
elif isinstance(one,np.ndarray):
|
||||||
|
return np.all(one==two)
|
||||||
elif isinstance(one,dict):
|
elif isinstance(one,dict):
|
||||||
if set(one.keys()) != set(two.keys()):
|
if set(one.keys()) != set(two.keys()):
|
||||||
return False
|
return False
|
||||||
@ -375,8 +391,9 @@ class BlockStructure(object):
|
|||||||
warn('Cannot compare {}'.format(type(one)))
|
warn('Cannot compare {}'.format(type(one)))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
for prop in [ "gf_struct_sumk", "gf_struct_solver",
|
for prop in [ "gf_struct_sumk", "gf_struct_solver",
|
||||||
"solver_to_sumk", "sumk_to_solver", "solver_to_sumk_block"]:
|
"solver_to_sumk", "sumk_to_solver", "solver_to_sumk_block",
|
||||||
|
"deg_shells"]:
|
||||||
if not compare(getattr(self,prop),getattr(other,prop)):
|
if not compare(getattr(self,prop),getattr(other,prop)):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
@ -388,8 +405,8 @@ class BlockStructure(object):
|
|||||||
""" Reduce to dict for HDF5 export."""
|
""" Reduce to dict for HDF5 export."""
|
||||||
|
|
||||||
ret = {}
|
ret = {}
|
||||||
for element in [ "gf_struct_sumk", "gf_struct_solver",
|
for element in [ "gf_struct_sumk", "gf_struct_solver",
|
||||||
"solver_to_sumk_block"]:
|
"solver_to_sumk_block","deg_shells"]:
|
||||||
ret[element] = getattr(self,element)
|
ret[element] = getattr(self,element)
|
||||||
|
|
||||||
def construct_mapping(mapping):
|
def construct_mapping(mapping):
|
||||||
@ -436,6 +453,18 @@ class BlockStructure(object):
|
|||||||
keys = sorted(element[ish].keys(),key=keyfun)
|
keys = sorted(element[ish].keys(),key=keyfun)
|
||||||
for k in keys:
|
for k in keys:
|
||||||
s+=' '+str(k)+str(element[ish][k])+'\n'
|
s+=' '+str(k)+str(element[ish][k])+'\n'
|
||||||
|
s += "deg_shells\n"
|
||||||
|
for ish in range(len(self.deg_shells)):
|
||||||
|
s+=' shell '+str(ish)+'\n'
|
||||||
|
for l in range(len(self.deg_shells[ish])):
|
||||||
|
s+=' equivalent group '+str(l)+'\n'
|
||||||
|
if isinstance(self.deg_shells[ish][l],dict):
|
||||||
|
for key, val in self.deg_shells[ish][l].iteritems():
|
||||||
|
s+=' '+key+('*' if val[1] else '')+':\n'
|
||||||
|
s+=' '+str(val[0]).replace('\n','\n ')+'\n'
|
||||||
|
else:
|
||||||
|
for key in self.deg_shells[ish][l]:
|
||||||
|
s+=' '+key+'\n'
|
||||||
return s
|
return s
|
||||||
|
|
||||||
from pytriqs.archive.hdf_archive_schemes import register_class
|
from pytriqs.archive.hdf_archive_schemes import register_class
|
||||||
|
10
python/converters/CMakeLists.txt
Normal file
10
python/converters/CMakeLists.txt
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# Create a temporary copy of the python modules so that we can run before installation with the test
|
||||||
|
FILE(GLOB PYTHON_SOURCES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.py)
|
||||||
|
foreach(f ${PYTHON_SOURCES})
|
||||||
|
configure_file(${f} ${f} COPYONLY)
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
# install files
|
||||||
|
install(FILES ${PYTHON_SOURCES} DESTINATION ${PYTHON_LIB_DEST}/converters)
|
||||||
|
|
||||||
|
add_subdirectory(plovasp)
|
@ -19,10 +19,8 @@
|
|||||||
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
# TRIQS. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
##########################################################################
|
##########################################################################
|
||||||
from pytriqs.cmake_info import hdf5_command_path
|
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
|
|
||||||
|
|
||||||
class ConverterTools:
|
class ConverterTools:
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -73,7 +71,7 @@ class ConverterTools:
|
|||||||
mpi.report("Repacking the file %s" % self.hdf_file)
|
mpi.report("Repacking the file %s" % self.hdf_file)
|
||||||
|
|
||||||
retcode = subprocess.call(
|
retcode = subprocess.call(
|
||||||
[hdf5_command_path + "/h5repack", "-i%s" % self.hdf_file, "-otemphgfrt.h5"])
|
["h5repack", "-i%s" % self.hdf_file, "-otemphgfrt.h5"])
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
mpi.report("h5repack failed!")
|
mpi.report("h5repack failed!")
|
||||||
else:
|
else:
|
||||||
|
@ -1,11 +1,19 @@
|
|||||||
set(python_destination pytriqs/applications/dft/converters/plovasp)
|
# === Build and install atm module
|
||||||
|
add_cpp2py_module(atm)
|
||||||
|
target_link_libraries(atm atm_c triqs)
|
||||||
|
target_compile_options(atm PRIVATE -std=c++17)
|
||||||
|
target_include_directories(atm PRIVATE ${CMAKE_SOURCE_DIR}/c++)
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${TRIQS_INCLUDE_ALL})
|
install(TARGETS atm DESTINATION ${PYTHON_LIB_DEST}/converters/plovasp)
|
||||||
triqs_python_extension(atm ${python_destination})
|
|
||||||
target_link_libraries(atm atm_c ${TRIQS_LIBRARY_ALL})
|
# === Copy Python files to current build directory and register for install
|
||||||
triqs_set_rpath_for_target(atm)
|
set(PYTHON_SOURCES __init__.py converter.py elstruct.py inpconf.py plotools.py proj_group.py proj_shell.py sc_dmft.py vaspio.py)
|
||||||
|
foreach(f ${PYTHON_SOURCES})
|
||||||
|
configure_file(${f} ${f} COPYONLY)
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
# install files
|
||||||
|
install(FILES ${PYTHON_SOURCES} DESTINATION ${PYTHON_LIB_DEST}/converters/plovasp)
|
||||||
|
|
||||||
# This we need in order for tests to work
|
# This we need in order for tests to work
|
||||||
add_custom_command(TARGET atm POST_BUILD COMMAND ln -fs ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}/atm.so ${CMAKE_BINARY_DIR}/python/dft/converters/plovasp)
|
#add_custom_command(TARGET atm POST_BUILD COMMAND ln -fs ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}/atm.so ${CMAKE_BINARY_DIR}/python/dft/converters/plovasp)
|
||||||
|
|
||||||
install (TARGETS atm DESTINATION ${TRIQS_PYTHON_LIB_DEST_ROOT}/${python_destination})
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Generated automatically using the command :
|
# Generated automatically using the command :
|
||||||
# c++2py.py -m atm -o atm --moduledoc "Analytical Tetrahedron Method for DOS" ../../../c++/plovasp/atm/dos_tetra3d.hpp
|
# c++2py.py -m atm -o atm --moduledoc "Analytical Tetrahedron Method for DOS" ../../../c++/plovasp/atm/dos_tetra3d.hpp
|
||||||
from wrap_generator import *
|
from cpp2py.wrap_generator import *
|
||||||
|
|
||||||
# The module
|
# The module
|
||||||
module = module_(full_name = "atm", doc = "Analytical Tetrahedron Method for calculating DOS", app_name = "atm")
|
module = module_(full_name = "atm", doc = "Analytical Tetrahedron Method for calculating DOS", app_name = "atm")
|
||||||
@ -8,11 +8,11 @@ module = module_(full_name = "atm", doc = "Analytical Tetrahedron Method for cal
|
|||||||
# All the triqs C++/Python modules
|
# All the triqs C++/Python modules
|
||||||
|
|
||||||
# Add here all includes beyond what is automatically included by the triqs modules
|
# Add here all includes beyond what is automatically included by the triqs modules
|
||||||
module.add_include("../../../c++/plovasp/atm/dos_tetra3d.hpp")
|
module.add_include("plovasp/atm/dos_tetra3d.hpp")
|
||||||
|
|
||||||
# Add here anything to add in the C++ code at the start, e.g. namespace using
|
# Add here anything to add in the C++ code at the start, e.g. namespace using
|
||||||
module.add_preamble("""
|
module.add_preamble("""
|
||||||
#include <triqs/python_tools/converters/arrays.hpp>
|
#include <triqs/cpp2py_converters/arrays.hpp>
|
||||||
""")
|
""")
|
||||||
|
|
||||||
module.add_function ("array_view<double,2> dos_tetra_weights_3d (array_view<double,1> eigk, double en, array_view<long,2> itt)", doc = """DOS of a band by analytical tetrahedron method\n\n Returns corner weights for all tetrahedra for a given band and real energy.""")
|
module.add_function ("array_view<double,2> dos_tetra_weights_3d (array_view<double,1> eigk, double en, array_view<long,2> itt)", doc = """DOS of a band by analytical tetrahedron method\n\n Returns corner weights for all tetrahedra for a given band and real energy.""")
|
||||||
|
@ -21,9 +21,9 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
from types import *
|
from types import *
|
||||||
#from pytriqs.applications.dft.U_matrix import *
|
#from triqs_dft_tools.U_matrix import *
|
||||||
from U_matrix import *
|
from U_matrix import *
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
#from hubbard_I import gf_hi_fullu, sigma_atomic_fullu
|
#from hubbard_I import gf_hi_fullu, sigma_atomic_fullu
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
from itertools import izip
|
from itertools import izip
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#from pytriqs.applications.dft.sumk_dft import *
|
#from triqs_dft_tools.sumk_dft import *
|
||||||
from sumk_dft import *
|
from sumk_dft import *
|
||||||
#from pytriqs.applications.dft.converters.wien2k_converter import *
|
#from triqs_dft_tools.converters.wien2k_converter import *
|
||||||
from converters.vasp_converter import *
|
from converters.vasp_converter import *
|
||||||
#from pytriqs.applications.impurity_solvers.hubbard_I.hubbard_solver import Solver
|
#from pytriqs.applications.impurity_solvers.hubbard_I.hubbard_solver import Solver
|
||||||
from hf_solver import Solver
|
from hf_solver import Solver
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
#from sumk_dft import SumkDFT
|
#from sumk_dft import SumkDFT
|
||||||
from sumk_dft_tools import SumkDFTTools
|
from sumk_dft_tools import SumkDFTTools
|
||||||
from converters.vasp_converter import VaspConverter
|
from converters.vasp_converter import VaspConverter
|
||||||
|
@ -502,7 +502,7 @@ class Wien2kConverter(ConverterTools):
|
|||||||
- symmetries from :file:`case.outputs`,
|
- symmetries from :file:`case.outputs`,
|
||||||
|
|
||||||
if those Wien2k files are present and stores the data in the hdf5 archive.
|
if those Wien2k files are present and stores the data in the hdf5 archive.
|
||||||
This function is automatically called by :meth:`convert_dft_input <pytriqs.applications.dft.converters.wien2k_converter.Wien2kConverter.convert_dft_input>`.
|
This function is automatically called by :meth:`convert_dft_input <triqs_dft_tools.converters.wien2k_converter.Wien2kConverter.convert_dft_input>`.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -23,14 +23,17 @@
|
|||||||
from types import *
|
from types import *
|
||||||
import numpy
|
import numpy
|
||||||
import pytriqs.utility.dichotomy as dichotomy
|
import pytriqs.utility.dichotomy as dichotomy
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
|
from pytriqs.utility.comparison_tests import assert_arrays_are_close
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
from symmetry import *
|
from symmetry import *
|
||||||
from block_structure import BlockStructure
|
from block_structure import BlockStructure
|
||||||
from sets import Set
|
from sets import Set
|
||||||
from itertools import product
|
from itertools import product
|
||||||
from warnings import warn
|
from warnings import warn
|
||||||
|
from scipy import compress
|
||||||
|
from scipy.optimize import minimize
|
||||||
|
|
||||||
|
|
||||||
class SumkDFT(object):
|
class SumkDFT(object):
|
||||||
@ -521,7 +524,7 @@ class SumkDFT(object):
|
|||||||
set_up_G_latt = True
|
set_up_G_latt = True
|
||||||
else: # Check that existing GF is consistent
|
else: # Check that existing GF is consistent
|
||||||
G_latt = getattr(self, "G_latt_" + iw_or_w)
|
G_latt = getattr(self, "G_latt_" + iw_or_w)
|
||||||
GFsize = [gf.N1 for bname, gf in G_latt]
|
GFsize = [gf.target_shape[0] for bname, gf in G_latt]
|
||||||
unchangedsize = all([self.n_orbitals[ik, ntoi[spn[isp]]] == GFsize[
|
unchangedsize = all([self.n_orbitals[ik, ntoi[spn[isp]]] == GFsize[
|
||||||
isp] for isp in range(self.n_spin_blocks[self.SO])])
|
isp] for isp in range(self.n_spin_blocks[self.SO])])
|
||||||
if not unchangedsize:
|
if not unchangedsize:
|
||||||
@ -593,13 +596,13 @@ class SumkDFT(object):
|
|||||||
Sigma_imp) == self.n_inequiv_shells, "put_Sigma: give exactly one Sigma for each inequivalent corr. shell!"
|
Sigma_imp) == self.n_inequiv_shells, "put_Sigma: give exactly one Sigma for each inequivalent corr. shell!"
|
||||||
|
|
||||||
# init self.Sigma_imp_(i)w:
|
# init self.Sigma_imp_(i)w:
|
||||||
if all(type(gf) == GfImFreq for bname, gf in Sigma_imp[0]):
|
if all( (isinstance(gf, Gf) and isinstance (gf.mesh, MeshImFreq)) for bname, gf in Sigma_imp[0]):
|
||||||
# Imaginary frequency Sigma:
|
# Imaginary frequency Sigma:
|
||||||
self.Sigma_imp_iw = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, mesh=Sigma_imp[0].mesh))
|
self.Sigma_imp_iw = [BlockGf(name_block_generator=[(block, GfImFreq(indices=inner, mesh=Sigma_imp[0].mesh))
|
||||||
for block, inner in self.gf_struct_sumk[icrsh]], make_copies=False)
|
for block, inner in self.gf_struct_sumk[icrsh]], make_copies=False)
|
||||||
for icrsh in range(self.n_corr_shells)]
|
for icrsh in range(self.n_corr_shells)]
|
||||||
SK_Sigma_imp = self.Sigma_imp_iw
|
SK_Sigma_imp = self.Sigma_imp_iw
|
||||||
elif all(type(gf) == GfReFreq for bname, gf in Sigma_imp[0]):
|
elif all( isinstance(gf, Gf) and isinstance (gf.mesh, MeshReFreq) for bname, gf in Sigma_imp[0]):
|
||||||
# Real frequency Sigma:
|
# Real frequency Sigma:
|
||||||
self.Sigma_imp_w = [BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=Sigma_imp[0].mesh))
|
self.Sigma_imp_w = [BlockGf(name_block_generator=[(block, GfReFreq(indices=inner, mesh=Sigma_imp[0].mesh))
|
||||||
for block, inner in self.gf_struct_sumk[icrsh]], make_copies=False)
|
for block, inner in self.gf_struct_sumk[icrsh]], make_copies=False)
|
||||||
@ -848,6 +851,412 @@ class SumkDFT(object):
|
|||||||
elif (ind1 < 0) and (ind2 < 0):
|
elif (ind1 < 0) and (ind2 < 0):
|
||||||
self.deg_shells[ish].append([block1, block2])
|
self.deg_shells[ish].append([block1, block2])
|
||||||
|
|
||||||
|
def _get_hermitian_quantity_from_gf(self, G):
|
||||||
|
""" Convert G to a Hermitian quantity
|
||||||
|
|
||||||
|
For G(tau) and G(iw), G(tau) is returned.
|
||||||
|
For G(t) and G(w), the spectral function is returned.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
G : list of BlockGf of GfImFreq, GfImTime, GfReFreq or GfReTime
|
||||||
|
the input Green's function
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
gf : list of BlockGf of GfImTime or GfReFreq
|
||||||
|
the output G(tau) or A(w)
|
||||||
|
"""
|
||||||
|
# make a GfImTime from the supplied GfImFreq
|
||||||
|
if all(isinstance(g_sh._first(), GfImFreq) for g_sh in G):
|
||||||
|
gf = [BlockGf(name_block_generator = [(name, GfImTime(beta=block.mesh.beta,
|
||||||
|
indices=block.indices,n_points=len(block.mesh)+1)) for name, block in g_sh],
|
||||||
|
make_copies=False) for g_sh in G]
|
||||||
|
for ish in range(len(gf)):
|
||||||
|
for name, g in gf[ish]:
|
||||||
|
g.set_from_inverse_fourier(G[ish][name])
|
||||||
|
# keep a GfImTime from the supplied GfImTime
|
||||||
|
elif all(isinstance(g_sh._first(), GfImTime) for g_sh in G):
|
||||||
|
gf = G
|
||||||
|
# make a spectral function from the supplied GfReFreq
|
||||||
|
elif all(isinstance(g_sh._first(), GfReFreq) for g_sh in G):
|
||||||
|
gf = [g_sh.copy() for g_sh in G]
|
||||||
|
for ish in range(len(gf)):
|
||||||
|
for name, g in gf[ish]:
|
||||||
|
g << 1.0j*(g-g.conjugate().transpose())/2.0/numpy.pi
|
||||||
|
elif all(isinstance(g_sh._first(), GfReTime) for g_sh in G):
|
||||||
|
def get_delta_from_mesh(mesh):
|
||||||
|
w0 = None
|
||||||
|
for w in mesh:
|
||||||
|
if w0 is None:
|
||||||
|
w0 = w
|
||||||
|
else:
|
||||||
|
return w-w0
|
||||||
|
gf = [BlockGf(name_block_generator = [(name, GfReFreq(
|
||||||
|
window=(-numpy.pi*(len(block.mesh)-1) / (len(block.mesh)*get_delta_from_mesh(block.mesh)),
|
||||||
|
numpy.pi*(len(block.mesh)-1) / (len(block.mesh)*get_delta_from_mesh(block.mesh))),
|
||||||
|
n_points=len(block.mesh), indices=block.indices)) for name, block in g_sh], make_copies=False)
|
||||||
|
for g_sh in G]
|
||||||
|
|
||||||
|
for ish in range(len(gf)):
|
||||||
|
for name, g in gf[ish]:
|
||||||
|
g.set_from_fourier(G[ish][name])
|
||||||
|
g << 1.0j*(g-g.conjugate().transpose())/2.0/numpy.pi
|
||||||
|
else:
|
||||||
|
raise Exception("G must be a list of BlockGf of either GfImFreq, GfImTime, GfReFreq or GfReTime")
|
||||||
|
return gf
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def analyse_block_structure_from_gf(self, G, threshold=1.e-5, include_shells=None, analyse_deg_shells = True):
|
||||||
|
r"""
|
||||||
|
Determines the block structure of local Green's functions by analysing
|
||||||
|
the structure of the corresponding non-interacting Green's function.
|
||||||
|
The resulting block structures for correlated shells are
|
||||||
|
stored in the :class:`SumkDFT.block_structure <dft.block_structure.BlockStructure>`
|
||||||
|
attribute.
|
||||||
|
|
||||||
|
This is a safer alternative to analyse_block_structure, because
|
||||||
|
the full non-interacting Green's function is taken into account
|
||||||
|
and not just the density matrix and Hloc.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
G : list of BlockGf of GfImFreq, GfImTime, GfReFreq or GfReTime
|
||||||
|
the non-interacting Green's function for each inequivalent correlated shell
|
||||||
|
threshold : real, optional
|
||||||
|
If the difference between matrix elements is below threshold,
|
||||||
|
they are considered to be equal.
|
||||||
|
include_shells : list of integers, optional
|
||||||
|
List of correlated shells to be analysed.
|
||||||
|
If include_shells is not provided all correlated shells will be analysed.
|
||||||
|
analyse_deg_shells : bool
|
||||||
|
Whether to call the analyse_deg_shells function
|
||||||
|
after having finished the block structure analysis
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
G : list of BlockGf of GfImFreq or GfImTime
|
||||||
|
the Green's function transformed into the new block structure
|
||||||
|
"""
|
||||||
|
|
||||||
|
gf = self._get_hermitian_quantity_from_gf(G)
|
||||||
|
|
||||||
|
# initialize the variables
|
||||||
|
self.gf_struct_solver = [{} for ish in range(self.n_inequiv_shells)]
|
||||||
|
self.sumk_to_solver = [{} for ish in range(self.n_inequiv_shells)]
|
||||||
|
self.solver_to_sumk = [{} for ish in range(self.n_inequiv_shells)]
|
||||||
|
self.solver_to_sumk_block = [{}
|
||||||
|
for ish in range(self.n_inequiv_shells)]
|
||||||
|
|
||||||
|
# the maximum value of each matrix element of each block and shell
|
||||||
|
max_gf = [{name:numpy.max(numpy.abs(g.data),0) for name, g in gf[ish]} for ish in range(self.n_inequiv_shells)]
|
||||||
|
|
||||||
|
if include_shells is None:
|
||||||
|
# include all shells
|
||||||
|
include_shells = range(self.n_inequiv_shells)
|
||||||
|
|
||||||
|
for ish in include_shells:
|
||||||
|
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]:
|
||||||
|
n_orb = self.corr_shells[self.inequiv_to_corr[ish]]['dim']
|
||||||
|
|
||||||
|
# gives an index list of entries larger that threshold
|
||||||
|
maxgf_bool = (abs(max_gf[ish][sp]) > threshold)
|
||||||
|
|
||||||
|
# Determine off-diagonal entries in upper triangular part of the
|
||||||
|
# Green's function
|
||||||
|
offdiag = Set([])
|
||||||
|
for i in range(n_orb):
|
||||||
|
for j in range(i + 1, n_orb):
|
||||||
|
if maxgf_bool[i, j]:
|
||||||
|
offdiag.add((i, j))
|
||||||
|
|
||||||
|
# Determine the number of non-hybridising blocks in the gf
|
||||||
|
blocs = [[i] for i in range(n_orb)]
|
||||||
|
while len(offdiag) != 0:
|
||||||
|
pair = offdiag.pop()
|
||||||
|
for b1, b2 in product(blocs, blocs):
|
||||||
|
if (pair[0] in b1) and (pair[1] in b2):
|
||||||
|
if blocs.index(b1) != blocs.index(b2): # In separate blocks?
|
||||||
|
# Merge two blocks
|
||||||
|
b1.extend(blocs.pop(blocs.index(b2)))
|
||||||
|
break # Move on to next pair in offdiag
|
||||||
|
|
||||||
|
# Set the gf_struct for the solver accordingly
|
||||||
|
num_blocs = len(blocs)
|
||||||
|
for i in range(num_blocs):
|
||||||
|
blocs[i].sort()
|
||||||
|
self.gf_struct_solver[ish].update(
|
||||||
|
[('%s_%s' % (sp, i), range(len(blocs[i])))])
|
||||||
|
|
||||||
|
# Construct sumk_to_solver taking (sumk_block, sumk_index) --> (solver_block, solver_inner)
|
||||||
|
# and solver_to_sumk taking (solver_block, solver_inner) -->
|
||||||
|
# (sumk_block, sumk_index)
|
||||||
|
for i in range(num_blocs):
|
||||||
|
for j in range(len(blocs[i])):
|
||||||
|
block_sumk = sp
|
||||||
|
inner_sumk = blocs[i][j]
|
||||||
|
block_solv = '%s_%s' % (sp, i)
|
||||||
|
inner_solv = j
|
||||||
|
self.sumk_to_solver[ish][(block_sumk, inner_sumk)] = (
|
||||||
|
block_solv, inner_solv)
|
||||||
|
self.solver_to_sumk[ish][(block_solv, inner_solv)] = (
|
||||||
|
block_sumk, inner_sumk)
|
||||||
|
self.solver_to_sumk_block[ish][block_solv] = block_sumk
|
||||||
|
|
||||||
|
# transform G to the new structure
|
||||||
|
full_structure = BlockStructure.full_structure(
|
||||||
|
[{sp:range(self.corr_shells[self.inequiv_to_corr[ish]]['dim'])
|
||||||
|
for sp in self.spin_block_names[self.corr_shells[self.inequiv_to_corr[ish]]['SO']]}
|
||||||
|
for ish in range(self.n_inequiv_shells)],None)
|
||||||
|
G_transformed = [
|
||||||
|
self.block_structure.convert_gf(G[ish],
|
||||||
|
full_structure, ish, mesh=G[ish].mesh.copy(), show_warnings=threshold,
|
||||||
|
gf_function=type(G[ish]._first()))
|
||||||
|
for ish in range(self.n_inequiv_shells)]
|
||||||
|
|
||||||
|
if analyse_deg_shells:
|
||||||
|
self.analyse_deg_shells(G_transformed, threshold, include_shells)
|
||||||
|
return G_transformed
|
||||||
|
|
||||||
|
def analyse_deg_shells(self, G, threshold=1.e-5, include_shells=None):
|
||||||
|
r"""
|
||||||
|
Determines the degenerate shells of local Green's functions by analysing
|
||||||
|
the structure of the corresponding non-interacting Green's function.
|
||||||
|
The results are stored in the
|
||||||
|
:class:`SumkDFT.block_structure <dft.block_structure.BlockStructure>`
|
||||||
|
attribute.
|
||||||
|
|
||||||
|
Due to the implementation and numerics, the maximum difference between
|
||||||
|
two matrix elements that are detected as equal can be a bit higher
|
||||||
|
(e.g. a factor of two) than the actual threshold.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
G : list of BlockGf of GfImFreq or GfImTime
|
||||||
|
the non-interacting Green's function for each inequivalent correlated shell
|
||||||
|
threshold : real, optional
|
||||||
|
If the difference between matrix elements is below threshold,
|
||||||
|
they are considered to be equal.
|
||||||
|
include_shells : list of integers, optional
|
||||||
|
List of correlated shells to be analysed.
|
||||||
|
If include_shells is not provided all correlated shells will be analysed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# initialize
|
||||||
|
self.deg_shells = [[] for ish in range(self.n_inequiv_shells)]
|
||||||
|
|
||||||
|
# helper function
|
||||||
|
def null(A, eps=1e-15):
|
||||||
|
""" Calculate the null-space of matrix A """
|
||||||
|
u, s, vh = numpy.linalg.svd(A)
|
||||||
|
null_mask = (s <= eps)
|
||||||
|
null_space = compress(null_mask, vh, axis=0)
|
||||||
|
return null_space.conjugate().transpose()
|
||||||
|
|
||||||
|
gf = self._get_hermitian_quantity_from_gf(G)
|
||||||
|
|
||||||
|
if include_shells is None:
|
||||||
|
# include all shells
|
||||||
|
include_shells = range(self.n_inequiv_shells)
|
||||||
|
|
||||||
|
# We consider two blocks equal, if their Green's functions obey
|
||||||
|
# maybe_conjugate1( v1^dagger G1 v1 ) = maybe_conjugate2( v2^dagger G2 v2 )
|
||||||
|
# where maybe_conjugate is a function that conjugates the Green's
|
||||||
|
# function if the flag 'conjugate' is set and the v are unitary
|
||||||
|
# matrices
|
||||||
|
#
|
||||||
|
# for each pair of blocks, we check whether there is a transformation
|
||||||
|
# maybe_conjugate( T G1 T^dagger ) = G2
|
||||||
|
# where our goal is to find T
|
||||||
|
# we just try whether there is such a T with and without conjugation
|
||||||
|
for ish in include_shells:
|
||||||
|
for block1 in self.gf_struct_solver[ish].iterkeys():
|
||||||
|
for block2 in self.gf_struct_solver[ish].iterkeys():
|
||||||
|
if block1==block2: continue
|
||||||
|
|
||||||
|
# check if the blocks are already present in the deg_shells
|
||||||
|
ind1 = -1
|
||||||
|
ind2 = -2
|
||||||
|
for n, ind in enumerate(self.deg_shells[ish]):
|
||||||
|
if block1 in ind:
|
||||||
|
ind1 = n
|
||||||
|
v1 = ind[block1]
|
||||||
|
if block2 in ind:
|
||||||
|
ind2 = n
|
||||||
|
v2 = ind[block2]
|
||||||
|
|
||||||
|
# if both are already present, go to the next pair of blocks
|
||||||
|
if ind1 >= 0 and ind2 >= 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
gf1 = gf[ish][block1]
|
||||||
|
gf2 = gf[ish][block2]
|
||||||
|
|
||||||
|
# the two blocks have to have the same shape
|
||||||
|
if gf1.target_shape != gf2.target_shape:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Instead of directly comparing the two blocks, we
|
||||||
|
# compare its eigenvalues. As G(tau) is Hermitian,
|
||||||
|
# they are real and the eigenvector matrix is unitary.
|
||||||
|
# Thus, if the eigenvalues are equal we can transform
|
||||||
|
# one block to make it equal to the other (at least
|
||||||
|
# for tau=0).
|
||||||
|
|
||||||
|
e1 = numpy.linalg.eigvalsh(gf1.data[0])
|
||||||
|
e2 = numpy.linalg.eigvalsh(gf2.data[0])
|
||||||
|
if numpy.any(abs(e1-e2) > threshold): continue
|
||||||
|
|
||||||
|
for conjugate in [False,True]:
|
||||||
|
if conjugate:
|
||||||
|
gf2 = gf2.conjugate()
|
||||||
|
|
||||||
|
# we want T gf1 T^dagger = gf2
|
||||||
|
# while for a given tau, T could be calculated
|
||||||
|
# by diagonalizing gf1 and gf2, this does not
|
||||||
|
# work for all taus simultaneously because of
|
||||||
|
# numerical imprecisions
|
||||||
|
|
||||||
|
# rather, we rewrite the equation to
|
||||||
|
# T gf1 = gf2 T
|
||||||
|
# which is the Sylvester equation.
|
||||||
|
# For that equation, one can use the Kronecker
|
||||||
|
# product to get a linear problem, which consists
|
||||||
|
# of finding the null space of M vec T = 0.
|
||||||
|
|
||||||
|
M = numpy.kron(numpy.eye(*gf1.target_shape),gf2.data[0])-numpy.kron(gf1.data[0].transpose(),numpy.eye(*gf1.target_shape))
|
||||||
|
N = null(M, threshold)
|
||||||
|
|
||||||
|
# now we get the intersection of the null spaces
|
||||||
|
# of all values of tau
|
||||||
|
for i in range(1,len(gf1.data)):
|
||||||
|
M = numpy.kron(numpy.eye(*gf1.target_shape),gf2.data[i])-numpy.kron(gf1.data[i].transpose(),numpy.eye(*gf1.target_shape))
|
||||||
|
# transform M into current null space
|
||||||
|
M = numpy.dot(M, N)
|
||||||
|
N = numpy.dot(N, null(M, threshold))
|
||||||
|
if numpy.size(N) == 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
# no intersection of the null spaces -> no symmetry
|
||||||
|
if numpy.size(N) == 0: continue
|
||||||
|
|
||||||
|
# reshape N: it then has the indices matrix, matrix, number of basis vectors of the null space
|
||||||
|
N = N.reshape(gf1.target_shape[0], gf1.target_shape[1], -1).transpose([1, 0, 2])
|
||||||
|
|
||||||
|
"""
|
||||||
|
any matrix in the null space can now be constructed as
|
||||||
|
M = 0
|
||||||
|
for i in range(N.shape[-1]):
|
||||||
|
M += y[i]*N[:,:,i]
|
||||||
|
with coefficients (complex numbers) y[i].
|
||||||
|
|
||||||
|
We want to get a set of coefficients y so that M is unitary.
|
||||||
|
Unitary means M M^dagger = 1.
|
||||||
|
Thus,
|
||||||
|
sum y[i] N[:,:,i] y[j].conjugate() N[:,:,j].conjugate().transpose() = eye.
|
||||||
|
The object N[:,:,i] N[:,:,j] is a four-index object which we call Z.
|
||||||
|
"""
|
||||||
|
Z = numpy.einsum('aci,bcj->abij', N, N.conjugate())
|
||||||
|
|
||||||
|
"""
|
||||||
|
function chi2
|
||||||
|
This function takes a real parameter vector y and reinterprets it as complex.
|
||||||
|
Then, it calculates the chi2 of
|
||||||
|
sum y[i] N[:,:,i] y[j].conjugate() N[:,:,j].conjugate().transpose() - eye.
|
||||||
|
"""
|
||||||
|
def chi2(y):
|
||||||
|
# reinterpret y as complex number
|
||||||
|
y = y.view(numpy.complex_)
|
||||||
|
ret = 0.0
|
||||||
|
for a in range(Z.shape[0]):
|
||||||
|
for b in range(Z.shape[1]):
|
||||||
|
ret += numpy.abs(numpy.dot(y, numpy.dot(Z[a, b], y.conjugate()))
|
||||||
|
- (1.0 if a == b else 0.0))**2
|
||||||
|
return ret
|
||||||
|
|
||||||
|
# use the minimization routine from scipy
|
||||||
|
res = minimize(chi2, numpy.ones(2 * N.shape[-1]))
|
||||||
|
|
||||||
|
# if the minimization fails, there is probably no symmetry
|
||||||
|
if not res.success: continue
|
||||||
|
# check if the minimization returned zero within the tolerance
|
||||||
|
if res.fun > threshold: continue
|
||||||
|
|
||||||
|
# reinterpret the solution as a complex number
|
||||||
|
y = res.x.view(numpy.complex_)
|
||||||
|
|
||||||
|
# reconstruct the T matrix
|
||||||
|
T = numpy.zeros(N.shape[:-1], dtype=numpy.complex_)
|
||||||
|
for i in range(len(y)):
|
||||||
|
T += N[:, :, i] * y[i]
|
||||||
|
|
||||||
|
# transform gf1 using T
|
||||||
|
G_transformed = gf1.copy()
|
||||||
|
G_transformed.from_L_G_R(T, gf1, T.conjugate().transpose())
|
||||||
|
|
||||||
|
# it does not make sense to check the tails for an
|
||||||
|
# absolute error because it will usually not hold;
|
||||||
|
# we could just check the relative error
|
||||||
|
# (here, we ignore it, reasoning that if the data
|
||||||
|
# is the same, the tails have to coincide as well)
|
||||||
|
try:
|
||||||
|
assert_arrays_are_close(G_transformed.data, gf2.data, threshold)
|
||||||
|
except (RuntimeError, AssertionError):
|
||||||
|
# the symmetry does not hold
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Now that we have found a valid T, we have to
|
||||||
|
# rewrite it to match the convention that
|
||||||
|
# C1(v1^dagger G1 v1) = C2(v2^dagger G2 v2),
|
||||||
|
# where C conjugates if the flag is True
|
||||||
|
|
||||||
|
# For each group of degenerate shells, the list
|
||||||
|
# SK.deg_shells[ish] contains a dict. The keys
|
||||||
|
# of the dict are the block names, the values
|
||||||
|
# are tuples. The first entry of the tuple is
|
||||||
|
# the transformation matrix v, the second entry
|
||||||
|
# is the conjugation flag
|
||||||
|
|
||||||
|
# the second block is already present
|
||||||
|
# set v1 and C1 so that they are compatible with
|
||||||
|
# C(T gf1 T^dagger) = gf2
|
||||||
|
# and with
|
||||||
|
# C1(v1^dagger G1 v1) = C2(v2^dagger G2 v2)
|
||||||
|
if (ind1 < 0) and (ind2 >= 0):
|
||||||
|
if conjugate:
|
||||||
|
self.deg_shells[ish][ind2][block1] = numpy.dot(T.conjugate().transpose(), v2[0].conjugate()), not v2[1]
|
||||||
|
else:
|
||||||
|
self.deg_shells[ish][ind2][block1] = numpy.dot(T.conjugate().transpose(), v2[0]), v2[1]
|
||||||
|
# the first block is already present
|
||||||
|
# set v2 and C2 so that they are compatible with
|
||||||
|
# C(T gf1 T^dagger) = gf2
|
||||||
|
# and with
|
||||||
|
# C1(v1^dagger G1 v1) = C2(v2^dagger G2 v2)
|
||||||
|
elif (ind1 >= 0) and (ind2 < 0):
|
||||||
|
if conjugate:
|
||||||
|
self.deg_shells[ish][ind1][block2] = numpy.dot(T.conjugate(), v1[0].conjugate()), not v1[1]
|
||||||
|
else:
|
||||||
|
self.deg_shells[ish][ind1][block2] = numpy.dot(T, v1[0]), v1[1]
|
||||||
|
# the blocks are not already present
|
||||||
|
# we arbitrarily choose v1=eye and C1=False and
|
||||||
|
# set v2 and C2 so that they are compatible with
|
||||||
|
# C(T gf1 T^dagger) = gf2
|
||||||
|
# and with
|
||||||
|
# C1(v1^dagger G1 v1) = C2(v2^dagger G2 v2)
|
||||||
|
elif (ind1 < 0) and (ind2 < 0):
|
||||||
|
d = dict()
|
||||||
|
d[block1] = numpy.eye(*gf1.target_shape), False
|
||||||
|
if conjugate:
|
||||||
|
d[block2] = T.conjugate(), True
|
||||||
|
else:
|
||||||
|
d[block2] = T, False
|
||||||
|
self.deg_shells[ish].append(d)
|
||||||
|
|
||||||
|
# a block was found, break out of the loop
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
def density_matrix(self, method='using_gf', beta=40.0):
|
def density_matrix(self, method='using_gf', beta=40.0):
|
||||||
"""Calculate density matrices in one of two ways.
|
"""Calculate density matrices in one of two ways.
|
||||||
|
|
||||||
@ -1212,20 +1621,52 @@ class SumkDFT(object):
|
|||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
gf_to_symm : gf_struct_solver like
|
gf_to_symm : gf_struct_solver like
|
||||||
Input GF.
|
Input and output GF (i.e., it gets overwritten)
|
||||||
orb : int
|
orb : int
|
||||||
Index of an inequivalent shell.
|
Index of an inequivalent shell.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# when reading block_structures written with older versions from
|
||||||
|
# an h5 file, self.deg_shells might be None
|
||||||
|
if self.deg_shells is None: return
|
||||||
|
|
||||||
for degsh in self.deg_shells[orb]:
|
for degsh in self.deg_shells[orb]:
|
||||||
ss = gf_to_symm[degsh[0]].copy()
|
# ss will hold the averaged orbitals in the basis where the
|
||||||
ss.zero()
|
# blocks are all equal
|
||||||
|
# i.e. maybe_conjugate(v^dagger gf v)
|
||||||
|
ss = None
|
||||||
n_deg = len(degsh)
|
n_deg = len(degsh)
|
||||||
for bl in degsh:
|
for key in degsh:
|
||||||
ss += gf_to_symm[bl] / (1.0 * n_deg)
|
if ss is None:
|
||||||
for bl in degsh:
|
ss = gf_to_symm[key].copy()
|
||||||
gf_to_symm[bl] << ss
|
ss.zero()
|
||||||
|
helper = ss.copy()
|
||||||
|
# get the transformation matrix
|
||||||
|
if isinstance(degsh, dict):
|
||||||
|
v, C = degsh[key]
|
||||||
|
else:
|
||||||
|
# for backward compatibility, allow degsh to be a list
|
||||||
|
v = numpy.eye(*ss.target_shape)
|
||||||
|
C = False
|
||||||
|
# the helper is in the basis where the blocks are all equal
|
||||||
|
helper.from_L_G_R(v.conjugate().transpose(), gf_to_symm[key], v)
|
||||||
|
if C:
|
||||||
|
helper << helper.transpose()
|
||||||
|
# average over all shells
|
||||||
|
ss += helper / (1.0 * n_deg)
|
||||||
|
# now put back the averaged gf to all shells
|
||||||
|
for key in degsh:
|
||||||
|
if isinstance(degsh, dict):
|
||||||
|
v, C = degsh[key]
|
||||||
|
else:
|
||||||
|
# for backward compatibility, allow degsh to be a list
|
||||||
|
v = numpy.eye(*ss.target_shape)
|
||||||
|
C = False
|
||||||
|
if C:
|
||||||
|
gf_to_symm[key].from_L_G_R(v, ss.transpose(), v.conjugate().transpose())
|
||||||
|
else:
|
||||||
|
gf_to_symm[key].from_L_G_R(v, ss, v.conjugate().transpose())
|
||||||
|
|
||||||
def total_density(self, mu=None, iw_or_w="iw", with_Sigma=True, with_dc=True, broadening=None):
|
def total_density(self, mu=None, iw_or_w="iw", with_Sigma=True, with_dc=True, broadening=None):
|
||||||
r"""
|
r"""
|
||||||
@ -1610,3 +2051,9 @@ class SumkDFT(object):
|
|||||||
def __set_solver_to_sumk_block(self,value):
|
def __set_solver_to_sumk_block(self,value):
|
||||||
self.block_structure.solver_to_sumk_block = value
|
self.block_structure.solver_to_sumk_block = value
|
||||||
solver_to_sumk_block = property(__get_solver_to_sumk_block,__set_solver_to_sumk_block)
|
solver_to_sumk_block = property(__get_solver_to_sumk_block,__set_solver_to_sumk_block)
|
||||||
|
|
||||||
|
def __get_deg_shells(self):
|
||||||
|
return self.block_structure.deg_shells
|
||||||
|
def __set_deg_shells(self,value):
|
||||||
|
self.block_structure.deg_shells = value
|
||||||
|
deg_shells = property(__get_deg_shells,__set_deg_shells)
|
||||||
|
@ -21,13 +21,16 @@
|
|||||||
import sys
|
import sys
|
||||||
from types import *
|
from types import *
|
||||||
import numpy
|
import numpy
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
from symmetry import *
|
from symmetry import *
|
||||||
from sumk_dft import SumkDFT
|
from sumk_dft import SumkDFT
|
||||||
from scipy.integrate import *
|
from scipy.integrate import *
|
||||||
from scipy.interpolate import *
|
from scipy.interpolate import *
|
||||||
|
|
||||||
|
if not hasattr(numpy, 'full'):
|
||||||
|
# polyfill full for older numpy:
|
||||||
|
numpy.full = lambda a, f: numpy.zeros(a) + f
|
||||||
|
|
||||||
class SumkDFTTools(SumkDFT):
|
class SumkDFTTools(SumkDFT):
|
||||||
"""
|
"""
|
||||||
@ -767,8 +770,8 @@ class SumkDFTTools(SumkDFT):
|
|||||||
self.Sigma_imp_w[icrsh] = BlockGf(
|
self.Sigma_imp_w[icrsh] = BlockGf(
|
||||||
name_list=spn, block_list=glist(), make_copies=False)
|
name_list=spn, block_list=glist(), make_copies=False)
|
||||||
for i, g in self.Sigma_imp_w[icrsh]:
|
for i, g in self.Sigma_imp_w[icrsh]:
|
||||||
for iL in g.indices:
|
for iL in g.indices[0]:
|
||||||
for iR in g.indices:
|
for iR in g.indices[0]:
|
||||||
for iom in xrange(n_om):
|
for iom in xrange(n_om):
|
||||||
g.data[iom, int(iL), int(iR)] = Sigma_save[
|
g.data[iom, int(iL), int(iR)] = Sigma_save[
|
||||||
i].data[ioffset + iom, int(iL), int(iR)]
|
i].data[ioffset + iom, int(iL), int(iR)]
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
import copy
|
import copy
|
||||||
import numpy
|
import numpy
|
||||||
from types import *
|
from types import *
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from pytriqs.applications.dft.sumk_dft import *
|
from triqs_dft_tools.sumk_dft import *
|
||||||
from pytriqs.applications.dft.converters import Wien2kConverter
|
from triqs_dft_tools.converters import Wien2kConverter
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
import numpy
|
import numpy
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
@CMAKE_INSTALL_PREFIX@/bin/pytriqs -m pytriqs.applications.dft.converters.plovasp.converter $@
|
@CMAKE_INSTALL_PREFIX@/bin/pytriqs -m triqs_dft_tools.converters.plovasp.converter $@
|
||||||
|
|
||||||
|
@ -83,5 +83,5 @@ stdbuf -o 0 $MPIRUN_CMD -np $NPROC "$VASP_DIR" &
|
|||||||
|
|
||||||
PYTRIQS=@CMAKE_INSTALL_PREFIX@/bin/pytriqs
|
PYTRIQS=@CMAKE_INSTALL_PREFIX@/bin/pytriqs
|
||||||
|
|
||||||
$MPIRUN_CMD -np $NPROC $PYTRIQS -m pytriqs.applications.dft.converters.plovasp.sc_dmft $(jobs -p) $NITER $DMFT_SCRIPT 'plo.cfg' || kill %1
|
$MPIRUN_CMD -np $NPROC $PYTRIQS -m triqs_dft_tools.converters.plovasp.sc_dmft $(jobs -p) $NITER $DMFT_SCRIPT 'plo.cfg' || kill %1
|
||||||
|
|
||||||
|
@ -1,20 +1,19 @@
|
|||||||
# load triqs helper to set up tests
|
|
||||||
find_package(TriqsTest)
|
|
||||||
|
|
||||||
# Copy h5 files to binary dir
|
# Copy h5 files to binary dir
|
||||||
FILE(GLOB all_h5_files RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.h5)
|
FILE(GLOB all_h5_files RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.h5)
|
||||||
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/${all_h5_files} DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
|
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/${all_h5_files} DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
# Copy other files
|
# Copy other files
|
||||||
FILE(COPY SrVO3.pmat SrVO3.struct SrVO3.outputs SrVO3.oubwin SrVO3.ctqmcout SrVO3.symqmc SrVO3.sympar SrVO3.parproj hk_convert_hamiltonian.hk LaVO3-Pnma_hr.dat LaVO3-Pnma.inp DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
|
FILE(COPY SrVO3.pmat SrVO3.struct SrVO3.outputs SrVO3.oubwin SrVO3.ctqmcout SrVO3.symqmc SrVO3.sympar SrVO3.parproj SrIrO3_rot.h5 hk_convert_hamiltonian.hk LaVO3-Pnma_hr.dat LaVO3-Pnma.inp DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
|
# List all tests
|
||||||
|
set(all_tests wien2k_convert hk_convert w90_convert sumkdft_basic srvo3_Gloc srvo3_transp sigma_from_file blockstructure analyze_block_structure_from_gf analyze_block_structure_from_gf2)
|
||||||
|
|
||||||
|
foreach(t ${all_tests})
|
||||||
|
add_test(NAME ${t} COMMAND python ${CMAKE_CURRENT_SOURCE_DIR}/${t}.py)
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
# Set the PythonPath : put the build dir first (in case there is an installed version).
|
||||||
|
set_property(TEST ${all_tests} PROPERTY ENVIRONMENT PYTHONPATH=${CMAKE_BINARY_DIR}/python:$ENV{PYTHONPATH} )
|
||||||
|
|
||||||
triqs_add_python_test(wien2k_convert)
|
|
||||||
triqs_add_python_test(hk_convert)
|
|
||||||
triqs_add_python_test(w90_convert)
|
|
||||||
triqs_add_python_test(sumkdft_basic)
|
|
||||||
triqs_add_python_test(srvo3_Gloc)
|
|
||||||
triqs_add_python_test(srvo3_transp)
|
|
||||||
triqs_add_python_test(sigma_from_file)
|
|
||||||
triqs_add_python_test(blockstructure)
|
|
||||||
|
|
||||||
# VASP converter tests
|
# VASP converter tests
|
||||||
add_subdirectory(plovasp)
|
add_subdirectory(plovasp)
|
||||||
|
BIN
test/SrIrO3_rot.h5
Normal file
BIN
test/SrIrO3_rot.h5
Normal file
Binary file not shown.
232
test/analyze_block_structure_from_gf.py
Normal file
232
test/analyze_block_structure_from_gf.py
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
from pytriqs.gf import *
|
||||||
|
from sumk_dft import SumkDFT
|
||||||
|
from scipy.linalg import expm
|
||||||
|
import numpy as np
|
||||||
|
from pytriqs.utility.comparison_tests import assert_gfs_are_close, assert_arrays_are_close, assert_block_gfs_are_close
|
||||||
|
from pytriqs.archive import *
|
||||||
|
import itertools
|
||||||
|
|
||||||
|
# The full test checks all different possible combinations of conjugated
|
||||||
|
# blocks. This takes a few minutes. For a quick test, just checking one
|
||||||
|
# random value suffices.
|
||||||
|
# (this parameter affects the second test)
|
||||||
|
full_test = False
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# First test #
|
||||||
|
# where we check the analyse_block_structure_from_gf function #
|
||||||
|
# for the SrIrO3_rot.h5 file #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
beta = 40
|
||||||
|
SK = SumkDFT(hdf_file = 'SrIrO3_rot.h5')
|
||||||
|
Sigma = SK.block_structure.create_gf(beta=beta)
|
||||||
|
SK.put_Sigma([Sigma])
|
||||||
|
G = SK.extract_G_loc()
|
||||||
|
|
||||||
|
# the original block structure
|
||||||
|
block_structure1 = SK.block_structure.copy()
|
||||||
|
|
||||||
|
G_new = SK.analyse_block_structure_from_gf(G)
|
||||||
|
|
||||||
|
# the new block structure
|
||||||
|
block_structure2 = SK.block_structure.copy()
|
||||||
|
|
||||||
|
with HDFArchive('analyze_block_structure_from_gf.out.h5','w') as ar:
|
||||||
|
ar['bs1'] = block_structure1
|
||||||
|
ar['bs2'] = block_structure2
|
||||||
|
|
||||||
|
# check whether the block structure is the same as in the reference
|
||||||
|
with HDFArchive('analyze_block_structure_from_gf.out.h5','r') as ar,\
|
||||||
|
HDFArchive('analyze_block_structure_from_gf.ref.h5','r') as ar2:
|
||||||
|
assert ar['bs1'] == ar2['bs1'], 'bs1 not equal'
|
||||||
|
a1 = ar['bs2']
|
||||||
|
a2 = ar2['bs2']
|
||||||
|
assert a1==block_structure2, "writing/reading block structure incorrect"
|
||||||
|
# we set the deg_shells to None because the transformation matrices
|
||||||
|
# have a phase freedom and will, therefore, not be equal in general
|
||||||
|
a1.deg_shells = None
|
||||||
|
a2.deg_shells = None
|
||||||
|
assert a1==a2, 'bs2 not equal'
|
||||||
|
|
||||||
|
# check if deg shells are correct
|
||||||
|
assert len(SK.deg_shells[0])==1, "wrong number of equivalent groups"
|
||||||
|
|
||||||
|
# check if the Green's functions that are found to be equal in the
|
||||||
|
# routine are indeed equal
|
||||||
|
for d in SK.deg_shells[0]:
|
||||||
|
assert len(d)==2, "wrong number of shells in equivalent group"
|
||||||
|
# the convention is that for every degenerate shell, the transformation
|
||||||
|
# matrix v and the conjugate bool is saved
|
||||||
|
# then,
|
||||||
|
# maybe_conjugate1( v1^dagger G1 v1 ) = maybe_conjugate2( v2^dagger G2 v2 )
|
||||||
|
# therefore, to test, we calculate
|
||||||
|
# maybe_conjugate( v^dagger G v )
|
||||||
|
# for all degenerate shells and check that they are all equal
|
||||||
|
normalized_gfs = []
|
||||||
|
for key in d:
|
||||||
|
normalized_gf = G_new[0][key].copy()
|
||||||
|
normalized_gf.from_L_G_R(d[key][0].conjugate().transpose(), G_new[0][key], d[key][0])
|
||||||
|
if d[key][1]:
|
||||||
|
normalized_gf << normalized_gf.transpose()
|
||||||
|
normalized_gfs.append(normalized_gf)
|
||||||
|
for i in range(len(normalized_gfs)):
|
||||||
|
for j in range(i+1,len(normalized_gfs)):
|
||||||
|
assert_arrays_are_close(normalized_gfs[i].data, normalized_gfs[j].data, 1.e-5)
|
||||||
|
# the tails have to be compared using a relative error
|
||||||
|
for o in range(normalized_gfs[i].tail.order_min,normalized_gfs[i].tail.order_max+1):
|
||||||
|
if np.abs(normalized_gfs[i].tail[o][0,0]) < 1.e-10:
|
||||||
|
continue
|
||||||
|
assert np.max(np.abs((normalized_gfs[i].tail[o]-normalized_gfs[j].tail[o])/(normalized_gfs[i].tail[o][0,0]))) < 1.e-5, \
|
||||||
|
"tails are different"
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# Second test #
|
||||||
|
# where a Green's function is constructed from a random model #
|
||||||
|
# and the analyse_block_structure_from_gf function is tested for that #
|
||||||
|
# model #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
# helper function to get random Hermitian matrix
|
||||||
|
def get_random_hermitian(dim):
|
||||||
|
herm = np.random.rand(dim,dim)+1.0j*np.random.rand(dim,dim)
|
||||||
|
herm = herm + herm.conjugate().transpose()
|
||||||
|
return herm
|
||||||
|
|
||||||
|
# helper function to get random unitary matrix
|
||||||
|
def get_random_transformation(dim):
|
||||||
|
herm = get_random_hermitian(dim)
|
||||||
|
T = expm(1.0j*herm)
|
||||||
|
return T
|
||||||
|
|
||||||
|
# we will conjugate the Green's function blocks according to the entries
|
||||||
|
# of conjugate_values
|
||||||
|
# for each of the 5 blocks that will be constructed, there is an entry
|
||||||
|
# True or False that says whether it will be conjugated
|
||||||
|
if full_test:
|
||||||
|
# in the full test we check all combinations
|
||||||
|
conjugate_values = list(itertools.product([False, True], repeat=5))
|
||||||
|
else:
|
||||||
|
# in the quick test we check a random combination
|
||||||
|
conjugate_values = [np.random.rand(5)>0.5]
|
||||||
|
|
||||||
|
for conjugate in conjugate_values:
|
||||||
|
# construct a random block-diagonal Hloc
|
||||||
|
Hloc = np.zeros((10,10), dtype=np.complex_)
|
||||||
|
# the Hloc of the first three 2x2 blocks is equal
|
||||||
|
Hloc0 = get_random_hermitian(2)
|
||||||
|
Hloc[:2,:2] = Hloc0
|
||||||
|
Hloc[2:4,2:4] = Hloc0
|
||||||
|
Hloc[4:6,4:6] = Hloc0
|
||||||
|
# the Hloc of the last two 2x2 blocks is equal
|
||||||
|
Hloc1 = get_random_hermitian(2)
|
||||||
|
Hloc[6:8,6:8] = Hloc1
|
||||||
|
Hloc[8:,8:] = Hloc1
|
||||||
|
# construct the hybridization delta
|
||||||
|
# this is equal for all 2x2 blocks
|
||||||
|
V = get_random_hermitian(2) # the hopping elements from impurity to bath
|
||||||
|
b1 = np.random.rand() # the bath energy of the first bath level
|
||||||
|
b2 = np.random.rand() # the bath energy of the second bath level
|
||||||
|
delta = G[0]['ud'][:2,:2].copy()
|
||||||
|
delta[0,0] << (V[0,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[0,1].conjugate()*inverse(Omega-b2))/2.0
|
||||||
|
delta[0,1] << (V[0,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[1,1].conjugate()*inverse(Omega-b2))/2.0
|
||||||
|
delta[1,0] << (V[1,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[0,1].conjugate()*inverse(Omega-b2))/2.0
|
||||||
|
delta[1,1] << (V[1,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[1,1].conjugate()*inverse(Omega-b2))/2.0
|
||||||
|
# construct G
|
||||||
|
G[0].zero()
|
||||||
|
for i in range(0,10,2):
|
||||||
|
G[0]['ud'][i:i+2,i:i+2] << inverse(Omega-delta)
|
||||||
|
G[0]['ud'] << inverse(inverse(G[0]['ud']) - Hloc)
|
||||||
|
|
||||||
|
# for testing symm_deg_gf below, we need this
|
||||||
|
# we construct it so that for every group of degenerate blocks of G[0], the
|
||||||
|
# mean of the blocks of G_noisy is equal to G[0]
|
||||||
|
G_noisy = G[0].copy()
|
||||||
|
noise1 = np.random.randn(*delta.target_shape)
|
||||||
|
G_noisy['ud'][:2,:2].data[:,:,:] += noise1
|
||||||
|
G_noisy['ud'][2:4,2:4].data[:,:,:] -= noise1/2.0
|
||||||
|
G_noisy['ud'][4:6,4:6].data[:,:,:] -= noise1/2.0
|
||||||
|
noise2 = np.random.randn(*delta.target_shape)
|
||||||
|
G_noisy['ud'][6:8,6:8].data[:,:,:] += noise2
|
||||||
|
G_noisy['ud'][8:,8:].data[:,:,:] -= noise2
|
||||||
|
|
||||||
|
# for testing backward-compatibility in symm_deg_gf, we need the
|
||||||
|
# un-transformed Green's functions
|
||||||
|
G_pre_transform = G[0].copy()
|
||||||
|
G_noisy_pre_transform = G_noisy.copy()
|
||||||
|
|
||||||
|
# transform each block using a random transformation matrix
|
||||||
|
for i in range(0,10,2):
|
||||||
|
T = get_random_transformation(2)
|
||||||
|
G[0]['ud'][i:i+2,i:i+2].from_L_G_R(T, G[0]['ud'][i:i+2,i:i+2], T.conjugate().transpose())
|
||||||
|
G_noisy['ud'][i:i+2,i:i+2].from_L_G_R(T, G_noisy['ud'][i:i+2,i:i+2], T.conjugate().transpose())
|
||||||
|
# if that block shall be conjugated, go ahead and do it
|
||||||
|
if conjugate[i//2]:
|
||||||
|
G[0]['ud'][i:i+2,i:i+2] << G[0]['ud'][i:i+2,i:i+2].transpose()
|
||||||
|
G_noisy['ud'][i:i+2,i:i+2] << G_noisy['ud'][i:i+2,i:i+2].transpose()
|
||||||
|
|
||||||
|
# analyse the block structure
|
||||||
|
G_new = SK.analyse_block_structure_from_gf(G, 1.e-7)
|
||||||
|
|
||||||
|
# transform G_noisy etc. to the new block structure
|
||||||
|
G_noisy = SK.block_structure.convert_gf(G_noisy, block_structure1, beta = G_noisy.mesh.beta)
|
||||||
|
G_pre_transform = SK.block_structure.convert_gf(G_pre_transform, block_structure1, beta = G_noisy.mesh.beta)
|
||||||
|
G_noisy_pre_transform = SK.block_structure.convert_gf(G_noisy_pre_transform, block_structure1, beta = G_noisy.mesh.beta)
|
||||||
|
|
||||||
|
assert len(SK.deg_shells[0]) == 2, "wrong number of equivalent groups found"
|
||||||
|
assert sorted([len(d) for d in SK.deg_shells[0]]) == [2,3], "wrong number of members in the equivalent groups found"
|
||||||
|
for d in SK.deg_shells[0]:
|
||||||
|
if len(d)==2:
|
||||||
|
assert 'ud_3' in d, "shell ud_3 missing"
|
||||||
|
assert 'ud_4' in d, "shell ud_4 missing"
|
||||||
|
if len(d)==3:
|
||||||
|
assert 'ud_0' in d, "shell ud_0 missing"
|
||||||
|
assert 'ud_1' in d, "shell ud_1 missing"
|
||||||
|
assert 'ud_2' in d, "shell ud_2 missing"
|
||||||
|
|
||||||
|
# the convention is that for every degenerate shell, the transformation
|
||||||
|
# matrix v and the conjugate bool is saved
|
||||||
|
# then,
|
||||||
|
# maybe_conjugate1( v1^dagger G1 v1 ) = maybe_conjugate2( v2^dagger G2 v2 )
|
||||||
|
# therefore, to test, we calculate
|
||||||
|
# maybe_conjugate( v^dagger G v )
|
||||||
|
# for all degenerate shells and check that they are all equal
|
||||||
|
normalized_gfs = []
|
||||||
|
for key in d:
|
||||||
|
normalized_gf = G_new[0][key].copy()
|
||||||
|
normalized_gf.from_L_G_R(d[key][0].conjugate().transpose(), G_new[0][key], d[key][0])
|
||||||
|
if d[key][1]:
|
||||||
|
normalized_gf << normalized_gf.transpose()
|
||||||
|
normalized_gfs.append(normalized_gf)
|
||||||
|
for i in range(len(normalized_gfs)):
|
||||||
|
for j in range(i+1,len(normalized_gfs)):
|
||||||
|
# here, we use a threshold that is 1 order of magnitude less strict
|
||||||
|
# because of numerics
|
||||||
|
assert_gfs_are_close(normalized_gfs[i], normalized_gfs[j], 1.e-6)
|
||||||
|
|
||||||
|
# now we check symm_deg_gf
|
||||||
|
# symmetrizing the GF has is has to leave it unchanged
|
||||||
|
G_new_symm = G_new[0].copy()
|
||||||
|
SK.symm_deg_gf(G_new_symm, 0)
|
||||||
|
assert_block_gfs_are_close(G_new[0], G_new_symm, 1.e-6)
|
||||||
|
|
||||||
|
# symmetrizing the noisy GF, which was carefully constructed,
|
||||||
|
# has to give the same result as G_new[0]
|
||||||
|
SK.symm_deg_gf(G_noisy, 0)
|
||||||
|
assert_block_gfs_are_close(G_new[0], G_noisy, 1.e-6)
|
||||||
|
|
||||||
|
# check backward compatibility of symm_deg_gf
|
||||||
|
# first, construct the old format of the deg shells
|
||||||
|
for ish in range(len(SK.deg_shells)):
|
||||||
|
for gr in range(len(SK.deg_shells[ish])):
|
||||||
|
SK.deg_shells[ish][gr] = SK.deg_shells[ish][gr].keys()
|
||||||
|
|
||||||
|
# symmetrizing the GF as is has to leave it unchanged
|
||||||
|
G_new_symm << G_pre_transform
|
||||||
|
SK.symm_deg_gf(G_new_symm, 0)
|
||||||
|
assert_block_gfs_are_close(G_new_symm, G_pre_transform, 1.e-6)
|
||||||
|
|
||||||
|
# symmetrizing the noisy GF pre transform, which was carefully constructed,
|
||||||
|
# has to give the same result as G_pre_transform
|
||||||
|
SK.symm_deg_gf(G_noisy_pre_transform, 0)
|
||||||
|
assert_block_gfs_are_close(G_noisy_pre_transform, G_pre_transform, 1.e-6)
|
BIN
test/analyze_block_structure_from_gf.ref.h5
Normal file
BIN
test/analyze_block_structure_from_gf.ref.h5
Normal file
Binary file not shown.
115
test/analyze_block_structure_from_gf2.py
Normal file
115
test/analyze_block_structure_from_gf2.py
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
from pytriqs.gf import *
|
||||||
|
from sumk_dft import SumkDFT
|
||||||
|
import numpy as np
|
||||||
|
from pytriqs.utility.comparison_tests import assert_block_gfs_are_close
|
||||||
|
|
||||||
|
# here we test the SK.analyze_block_structure_from_gf function
|
||||||
|
# with GfReFreq, GfReTime
|
||||||
|
|
||||||
|
|
||||||
|
# helper function to get random Hermitian matrix
|
||||||
|
def get_random_hermitian(dim):
|
||||||
|
herm = np.random.rand(dim,dim)+1.0j*np.random.rand(dim,dim)
|
||||||
|
herm = herm + herm.conjugate().transpose()
|
||||||
|
return herm
|
||||||
|
|
||||||
|
# helper function to get random unitary matrix
|
||||||
|
def get_random_transformation(dim):
|
||||||
|
herm = get_random_hermitian(dim)
|
||||||
|
T = expm(1.0j*herm)
|
||||||
|
return T
|
||||||
|
|
||||||
|
# construct a random block-diagonal Hloc
|
||||||
|
Hloc = np.zeros((10,10), dtype=np.complex_)
|
||||||
|
# the Hloc of the first three 2x2 blocks is equal
|
||||||
|
Hloc0 = get_random_hermitian(2)
|
||||||
|
Hloc[:2,:2] = Hloc0
|
||||||
|
Hloc[2:4,2:4] = Hloc0
|
||||||
|
Hloc[4:6,4:6] = Hloc0
|
||||||
|
# the Hloc of the last two 2x2 blocks is equal
|
||||||
|
Hloc1 = get_random_hermitian(2)
|
||||||
|
Hloc[6:8,6:8] = Hloc1
|
||||||
|
Hloc[8:,8:] = Hloc1
|
||||||
|
# construct the hybridization delta
|
||||||
|
# this is equal for all 2x2 blocks
|
||||||
|
V = get_random_hermitian(2) # the hopping elements from impurity to bath
|
||||||
|
b1 = np.random.rand() # the bath energy of the first bath level
|
||||||
|
b2 = np.random.rand() # the bath energy of the second bath level
|
||||||
|
delta = GfReFreq(window=(-5,5), indices=range(2), n_points=1001)
|
||||||
|
delta[0,0] << (V[0,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[0,1].conjugate()*inverse(Omega-b2+0.02j))/2.0
|
||||||
|
delta[0,1] << (V[0,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[0,1]*V[1,1].conjugate()*inverse(Omega-b2+0.02j))/2.0
|
||||||
|
delta[1,0] << (V[1,0]*V[0,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[0,1].conjugate()*inverse(Omega-b2+0.02j))/2.0
|
||||||
|
delta[1,1] << (V[1,0]*V[1,0].conjugate()*inverse(Omega-b1)+V[1,1]*V[1,1].conjugate()*inverse(Omega-b2+0.02j))/2.0
|
||||||
|
# construct G
|
||||||
|
G = BlockGf(name_block_generator=[('ud',GfReFreq(window=(-5,5), indices=range(10), n_points=1001))], make_copies=False)
|
||||||
|
for i in range(0,10,2):
|
||||||
|
G['ud'][i:i+2,i:i+2] << inverse(Omega-delta+0.02j)
|
||||||
|
G['ud'] << inverse(inverse(G['ud']) - Hloc)
|
||||||
|
|
||||||
|
|
||||||
|
SK = SumkDFT(hdf_file = 'SrIrO3_rot.h5', use_dft_blocks=False)
|
||||||
|
G_new = SK.analyse_block_structure_from_gf([G])
|
||||||
|
G_new_symm = G_new[0].copy()
|
||||||
|
SK.symm_deg_gf(G_new_symm, 0)
|
||||||
|
assert_block_gfs_are_close(G_new[0], G_new_symm)
|
||||||
|
|
||||||
|
|
||||||
|
assert SK.gf_struct_sumk == [[('ud', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])], [('ud', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])]],\
|
||||||
|
"wrong gf_struct_sumk"
|
||||||
|
for i in range(5):
|
||||||
|
assert 'ud_{}'.format(i) in SK.gf_struct_solver[0], "missing block"
|
||||||
|
assert SK.gf_struct_solver[0]['ud_{}'.format(i)] == range(2), "wrong block size"
|
||||||
|
for i in range(10):
|
||||||
|
assert SK.sumk_to_solver[0]['ud',i] == ('ud_{}'.format(i/2), i%2), "wrong mapping"
|
||||||
|
|
||||||
|
assert len(SK.deg_shells[0]) == 2, "wrong number of equivalent groups found"
|
||||||
|
assert sorted([len(d) for d in SK.deg_shells[0]]) == [2,3], "wrong number of members in the equivalent groups found"
|
||||||
|
for d in SK.deg_shells[0]:
|
||||||
|
if len(d)==2:
|
||||||
|
assert 'ud_3' in d, "shell ud_3 missing"
|
||||||
|
assert 'ud_4' in d, "shell ud_4 missing"
|
||||||
|
if len(d)==3:
|
||||||
|
assert 'ud_0' in d, "shell ud_0 missing"
|
||||||
|
assert 'ud_1' in d, "shell ud_1 missing"
|
||||||
|
assert 'ud_2' in d, "shell ud_2 missing"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def get_delta_from_mesh(mesh):
|
||||||
|
w0 = None
|
||||||
|
for w in mesh:
|
||||||
|
if w0 is None:
|
||||||
|
w0 = w
|
||||||
|
else:
|
||||||
|
return w-w0
|
||||||
|
|
||||||
|
Gt = BlockGf(name_block_generator = [(name,
|
||||||
|
GfReTime(window=(-np.pi*(len(block.mesh)-1) / (len(block.mesh)*get_delta_from_mesh(block.mesh)), np.pi*(len(block.mesh)-1) / (len(block.mesh)*get_delta_from_mesh(block.mesh))),
|
||||||
|
n_points=len(block.mesh),
|
||||||
|
indices=block.indices)) for name, block in G], make_copies=False)
|
||||||
|
|
||||||
|
Gt['ud'].set_from_inverse_fourier(G['ud'])
|
||||||
|
|
||||||
|
G_new = SK.analyse_block_structure_from_gf([Gt])
|
||||||
|
G_new_symm = G_new[0].copy()
|
||||||
|
SK.symm_deg_gf(G_new_symm, 0)
|
||||||
|
assert_block_gfs_are_close(G_new[0], G_new_symm)
|
||||||
|
|
||||||
|
assert SK.gf_struct_sumk == [[('ud', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])], [('ud', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])]],\
|
||||||
|
"wrong gf_struct_sumk"
|
||||||
|
for i in range(5):
|
||||||
|
assert 'ud_{}'.format(i) in SK.gf_struct_solver[0], "missing block"
|
||||||
|
assert SK.gf_struct_solver[0]['ud_{}'.format(i)] == range(2), "wrong block size"
|
||||||
|
for i in range(10):
|
||||||
|
assert SK.sumk_to_solver[0]['ud',i] == ('ud_{}'.format(i/2), i%2), "wrong mapping"
|
||||||
|
|
||||||
|
assert len(SK.deg_shells[0]) == 2, "wrong number of equivalent groups found"
|
||||||
|
assert sorted([len(d) for d in SK.deg_shells[0]]) == [2,3], "wrong number of members in the equivalent groups found"
|
||||||
|
for d in SK.deg_shells[0]:
|
||||||
|
if len(d)==2:
|
||||||
|
assert 'ud_3' in d, "shell ud_3 missing"
|
||||||
|
assert 'ud_4' in d, "shell ud_4 missing"
|
||||||
|
if len(d)==3:
|
||||||
|
assert 'ud_0' in d, "shell ud_0 missing"
|
||||||
|
assert 'ud_1' in d, "shell ud_1 missing"
|
||||||
|
assert 'ud_2' in d, "shell ud_2 missing"
|
Binary file not shown.
@ -1,8 +1,8 @@
|
|||||||
from pytriqs.applications.dft.sumk_dft import *
|
from sumk_dft import *
|
||||||
from pytriqs.utility.h5diff import h5diff
|
from pytriqs.utility.h5diff import h5diff
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
from pytriqs.utility.comparison_tests import assert_block_gfs_are_close
|
from pytriqs.utility.comparison_tests import assert_block_gfs_are_close
|
||||||
from pytriqs.applications.dft import BlockStructure
|
from block_structure import BlockStructure
|
||||||
|
|
||||||
SK = SumkDFT('blockstructure.in.h5',use_dft_blocks=True)
|
SK = SumkDFT('blockstructure.in.h5',use_dft_blocks=True)
|
||||||
|
|
||||||
@ -21,7 +21,8 @@ sk_pick1 = BlockStructure(gf_struct_sumk = SK.gf_struct_sumk,
|
|||||||
gf_struct_solver = SK.gf_struct_solver,
|
gf_struct_solver = SK.gf_struct_solver,
|
||||||
solver_to_sumk = SK.solver_to_sumk,
|
solver_to_sumk = SK.solver_to_sumk,
|
||||||
sumk_to_solver = SK.sumk_to_solver,
|
sumk_to_solver = SK.sumk_to_solver,
|
||||||
solver_to_sumk_block = SK.solver_to_sumk_block)
|
solver_to_sumk_block = SK.solver_to_sumk_block,
|
||||||
|
deg_shells = SK.deg_shells)
|
||||||
assert sk_pick1 == pick1, 'constructing block structure from SumkDFT properties failed'
|
assert sk_pick1 == pick1, 'constructing block structure from SumkDFT properties failed'
|
||||||
|
|
||||||
# check pick_gf_struct_sumk
|
# check pick_gf_struct_sumk
|
||||||
|
Binary file not shown.
@ -21,11 +21,12 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
|
|
||||||
from pytriqs.applications.dft.converters import *
|
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
from pytriqs.utility.h5diff import h5diff
|
from pytriqs.utility.h5diff import h5diff
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
|
|
||||||
|
from converters import *
|
||||||
|
|
||||||
Converter = HkConverter(filename='hk_convert_hamiltonian.hk',hdf_filename='hk_convert.out.h5')
|
Converter = HkConverter(filename='hk_convert_hamiltonian.hk',hdf_filename='hk_convert.out.h5')
|
||||||
|
|
||||||
Converter.convert_dft_input()
|
Converter.convert_dft_input()
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# load triqs helper to set up tests
|
# load triqs helper to set up tests
|
||||||
set(TestSuites
|
set(all_tests
|
||||||
inpconf
|
inpconf
|
||||||
# plocar_io
|
# plocar_io
|
||||||
plotools
|
plotools
|
||||||
@ -8,10 +8,11 @@ set(TestSuites
|
|||||||
vaspio
|
vaspio
|
||||||
atm)
|
atm)
|
||||||
|
|
||||||
FILE(COPY ${TestSuites} DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
|
FILE(COPY ${all_tests} DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
FILE(COPY run_suite.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
|
FILE(COPY run_suite.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
foreach(test_suite ${TestSuites})
|
foreach(t ${all_tests})
|
||||||
add_test(${test_suite}
|
add_test(NAME ${t} COMMAND python run_suite.py ${t})
|
||||||
${PythonBuildExecutable} run_suite.py ${test_suite})
|
endforeach()
|
||||||
endforeach(test_suite ${TestSuites})
|
|
||||||
|
set_property(TEST ${all_tests} PROPERTY ENVIRONMENT PYTHONPATH=${CMAKE_BINARY_DIR}/python:$ENV{PYTHONPATH} )
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.atm import dos_tetra_weights_3d
|
from converters.plovasp.atm import dos_tetra_weights_3d
|
||||||
import mytest
|
import mytest
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -7,7 +7,7 @@ _rpath = os.path.dirname(rpath.__file__) + '/'
|
|||||||
|
|
||||||
import arraytest
|
import arraytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
|
@ -7,7 +7,7 @@ _rpath = os.path.dirname(rpath.__file__) + '/'
|
|||||||
|
|
||||||
import arraytest
|
import arraytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
|
@ -7,7 +7,7 @@ _rpath = os.path.dirname(rpath.__file__) + '/'
|
|||||||
|
|
||||||
import arraytest
|
import arraytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
|
@ -7,7 +7,7 @@ _rpath = os.path.dirname(rpath.__file__) + '/'
|
|||||||
|
|
||||||
import arraytest
|
import arraytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
|
@ -7,7 +7,7 @@ _rpath = os.path.dirname(rpath.__file__) + '/'
|
|||||||
|
|
||||||
import arraytest
|
import arraytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
|
@ -7,7 +7,7 @@ _rpath = os.path.dirname(rpath.__file__) + '/'
|
|||||||
|
|
||||||
import arraytest
|
import arraytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
|
|
||||||
import pytriqs.applications.dft.converters.plovasp.vaspio
|
import converters.plovasp.vaspio
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
from pytriqs.applications.dft.converters.plovasp.plotools import check_data_consistency
|
from converters.plovasp.plotools import check_data_consistency
|
||||||
from pytriqs.applications.dft.converters.plovasp.elstruct import ElectronicStructure
|
from converters.plovasp.elstruct import ElectronicStructure
|
||||||
import mytest
|
import mytest
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -4,9 +4,9 @@ import rpath
|
|||||||
_rpath = os.path.dirname(rpath.__file__) + '/'
|
_rpath = os.path.dirname(rpath.__file__) + '/'
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
from pytriqs.applications.dft.converters.plovasp.proj_shell import ProjectorShell
|
from converters.plovasp.proj_shell import ProjectorShell
|
||||||
from pytriqs.applications.dft.converters.plovasp.proj_group import ProjectorGroup
|
from converters.plovasp.proj_group import ProjectorGroup
|
||||||
import mytest
|
import mytest
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -4,11 +4,11 @@ import rpath
|
|||||||
_rpath = os.path.dirname(rpath.__file__) + '/'
|
_rpath = os.path.dirname(rpath.__file__) + '/'
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.vaspio import VaspData
|
from converters.plovasp.vaspio import VaspData
|
||||||
from pytriqs.applications.dft.converters.plovasp.elstruct import ElectronicStructure
|
from converters.plovasp.elstruct import ElectronicStructure
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
from pytriqs.applications.dft.converters.plovasp.proj_shell import ProjectorShell
|
from converters.plovasp.proj_shell import ProjectorShell
|
||||||
from pytriqs.applications.dft.converters.plovasp.proj_group import ProjectorGroup
|
from converters.plovasp.proj_group import ProjectorGroup
|
||||||
from pytriqs.archive import HDFArchive
|
from pytriqs.archive import HDFArchive
|
||||||
import mytest
|
import mytest
|
||||||
|
|
||||||
|
@ -4,11 +4,11 @@ import rpath
|
|||||||
_rpath = os.path.dirname(rpath.__file__) + '/'
|
_rpath = os.path.dirname(rpath.__file__) + '/'
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.vaspio import VaspData
|
from converters.plovasp.vaspio import VaspData
|
||||||
from pytriqs.applications.dft.converters.plovasp.elstruct import ElectronicStructure
|
from converters.plovasp.elstruct import ElectronicStructure
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
from pytriqs.applications.dft.converters.plovasp.proj_shell import ProjectorShell
|
from converters.plovasp.proj_shell import ProjectorShell
|
||||||
from pytriqs.applications.dft.converters.plovasp.proj_group import ProjectorGroup
|
from converters.plovasp.proj_group import ProjectorGroup
|
||||||
import mytest
|
import mytest
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -4,11 +4,11 @@ import rpath
|
|||||||
_rpath = os.path.dirname(rpath.__file__) + '/'
|
_rpath = os.path.dirname(rpath.__file__) + '/'
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.vaspio import VaspData
|
from converters.plovasp.vaspio import VaspData
|
||||||
from pytriqs.applications.dft.converters.plovasp.elstruct import ElectronicStructure
|
from converters.plovasp.elstruct import ElectronicStructure
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
from pytriqs.applications.dft.converters.plovasp.proj_shell import ProjectorShell
|
from converters.plovasp.proj_shell import ProjectorShell
|
||||||
from pytriqs.applications.dft.converters.plovasp.proj_group import ProjectorGroup
|
from converters.plovasp.proj_group import ProjectorGroup
|
||||||
from pytriqs.archive import HDFArchive
|
from pytriqs.archive import HDFArchive
|
||||||
import mytest
|
import mytest
|
||||||
|
|
||||||
|
@ -4,11 +4,11 @@ import rpath
|
|||||||
_rpath = os.path.dirname(rpath.__file__) + '/'
|
_rpath = os.path.dirname(rpath.__file__) + '/'
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.vaspio import VaspData
|
from converters.plovasp.vaspio import VaspData
|
||||||
from pytriqs.applications.dft.converters.plovasp.elstruct import ElectronicStructure
|
from converters.plovasp.elstruct import ElectronicStructure
|
||||||
from pytriqs.applications.dft.converters.plovasp.inpconf import ConfigParameters
|
from converters.plovasp.inpconf import ConfigParameters
|
||||||
from pytriqs.applications.dft.converters.plovasp.proj_shell import ProjectorShell
|
from converters.plovasp.proj_shell import ProjectorShell
|
||||||
from pytriqs.applications.dft.converters.plovasp.proj_group import ProjectorGroup
|
from converters.plovasp.proj_group import ProjectorGroup
|
||||||
import mytest
|
import mytest
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -7,7 +7,7 @@ _rpath = os.path.dirname(rpath.__file__) + '/'
|
|||||||
|
|
||||||
import mytest
|
import mytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.vaspio import Doscar
|
from converters.plovasp.vaspio import Doscar
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
|
@ -7,7 +7,7 @@ _rpath = os.path.dirname(rpath.__file__) + '/'
|
|||||||
|
|
||||||
import mytest
|
import mytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.vaspio import Eigenval
|
from converters.plovasp.vaspio import Eigenval
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
|
@ -7,7 +7,7 @@ _rpath = os.path.dirname(rpath.__file__) + '/'
|
|||||||
|
|
||||||
import mytest
|
import mytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.vaspio import Kpoints
|
from converters.plovasp.vaspio import Kpoints
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
|
@ -7,7 +7,7 @@ _rpath = os.path.dirname(rpath.__file__) + '/'
|
|||||||
|
|
||||||
import mytest
|
import mytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pytriqs.applications.dft.converters.plovasp.vaspio import Poscar
|
from converters.plovasp.vaspio import Poscar
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
|
@ -20,9 +20,9 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
from pytriqs.gf.local.tools import *
|
from pytriqs.gf.tools import *
|
||||||
from pytriqs.applications.dft.sumk_dft_tools import *
|
from sumk_dft_tools import *
|
||||||
from pytriqs.utility.comparison_tests import *
|
from pytriqs.utility.comparison_tests import *
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
@ -20,9 +20,9 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
from pytriqs.gf.local import *
|
from pytriqs.gf import *
|
||||||
from pytriqs.applications.dft.sumk_dft import *
|
from sumk_dft import *
|
||||||
from pytriqs.applications.dft.converters.wien2k_converter import *
|
from converters.wien2k_converter import *
|
||||||
from pytriqs.operators.util import set_operator_structure
|
from pytriqs.operators.util import set_operator_structure
|
||||||
from pytriqs.utility.comparison_tests import *
|
from pytriqs.utility.comparison_tests import *
|
||||||
from pytriqs.utility.h5diff import h5diff
|
from pytriqs.utility.h5diff import h5diff
|
||||||
@ -40,8 +40,8 @@ orb_names = ['%s'%i for i in range(num_orbitals)]
|
|||||||
orb_hybridized = False
|
orb_hybridized = False
|
||||||
|
|
||||||
gf_struct = set_operator_structure(spin_names,orb_names,orb_hybridized)
|
gf_struct = set_operator_structure(spin_names,orb_names,orb_hybridized)
|
||||||
glist = [ GfImFreq(indices=inner,beta=beta) for block,inner in gf_struct.iteritems()]
|
glist = [ GfImFreq(indices=inner,beta=beta) for block,inner in gf_struct]
|
||||||
Sigma_iw = BlockGf(name_list = gf_struct.keys(), block_list = glist, make_copies = False)
|
Sigma_iw = BlockGf(name_list = [block for block,inner in gf_struct], block_list = glist, make_copies = False)
|
||||||
|
|
||||||
SK.set_Sigma([Sigma_iw])
|
SK.set_Sigma([Sigma_iw])
|
||||||
Gloc = SK.extract_G_loc()
|
Gloc = SK.extract_G_loc()
|
||||||
|
@ -20,9 +20,9 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
from numpy import *
|
from numpy import *
|
||||||
from pytriqs.applications.dft.converters.wien2k_converter import *
|
from converters.wien2k_converter import *
|
||||||
from pytriqs.applications.dft.sumk_dft import *
|
from sumk_dft import *
|
||||||
from pytriqs.applications.dft.sumk_dft_tools import *
|
from sumk_dft_tools import *
|
||||||
from pytriqs.utility.comparison_tests import *
|
from pytriqs.utility.comparison_tests import *
|
||||||
from pytriqs.utility.h5diff import h5diff
|
from pytriqs.utility.h5diff import h5diff
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
from pytriqs.applications.dft.sumk_dft_tools import SumkDFTTools
|
from sumk_dft_tools import SumkDFTTools
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
from pytriqs.utility.comparison_tests import *
|
from pytriqs.utility.comparison_tests import *
|
||||||
from pytriqs.utility.h5diff import h5diff
|
from pytriqs.utility.h5diff import h5diff
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
|
|
||||||
from pytriqs.applications.dft.converters import *
|
from converters import *
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
from pytriqs.utility.h5diff import h5diff
|
from pytriqs.utility.h5diff import h5diff
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
|
@ -21,11 +21,12 @@
|
|||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
from pytriqs.archive import *
|
from pytriqs.archive import *
|
||||||
from pytriqs.applications.dft.converters import Wien2kConverter
|
|
||||||
from pytriqs.utility.comparison_tests import *
|
from pytriqs.utility.comparison_tests import *
|
||||||
from pytriqs.utility.h5diff import h5diff
|
from pytriqs.utility.h5diff import h5diff
|
||||||
import pytriqs.utility.mpi as mpi
|
import pytriqs.utility.mpi as mpi
|
||||||
|
|
||||||
|
from converters import Wien2kConverter
|
||||||
|
|
||||||
Converter = Wien2kConverter(filename='SrVO3')
|
Converter = Wien2kConverter(filename='SrVO3')
|
||||||
Converter.hdf_file = 'wien2k_convert.out.h5'
|
Converter.hdf_file = 'wien2k_convert.out.h5'
|
||||||
Converter.convert_dft_input()
|
Converter.convert_dft_input()
|
||||||
|
Loading…
Reference in New Issue
Block a user