Mimi new version

This commit is contained in:
Pierre-Francois Loos 2022-09-30 16:14:22 +02:00
parent cf64f5d24f
commit 0b2f85d96d
6 changed files with 380 additions and 178 deletions

BIN
fig1.pdf

Binary file not shown.

BIN
fig2.pdf

Binary file not shown.

BIN
fig3.pdf Normal file

Binary file not shown.

BIN
fig4.pdf Normal file

Binary file not shown.

73
g.bib
View File

@ -1,13 +1,57 @@
%% This BibTeX bibliography file was created using BibDesk.
%% http://bibdesk.sourceforge.net/
%% Created for Pierre-Francois Loos at 2022-09-15 10:25:57 +0200
%% Created for Pierre-Francois Loos at 2022-09-30 16:13:18 +0200
%% Saved with string encoding Unicode (UTF-8)
@misc{note,
note = {As $\tau \rightarrow 0$ and $N \rightarrow \infty$ with $N\tau=t$, the operator $T^N$ converges to $e^{-t(H-E \Id)}$. We then have $G^E_{ij} \rightarrow \int_0^{\infty} dt \mel{i}{e^{-t(H-E \Id)}}{j}$, which is the Laplace transform of the time-dependent Green's function $\mel{i}{e^{-t(H-E \Id)}}{j}$.}}
@article{Willow_2012,
author = {Willow,Soohaeng Yoo and Kim,Kwang S. and Hirata,So},
date-modified = {2022-09-30 16:10:46 +0200},
doi = {10.1063/1.4768697},
journal = {J. Chem. Phys.},
number = {20},
pages = {204122},
title = {Stochastic evaluation of second-order many-body perturbation energies},
volume = {137},
year = {2012},
bdsk-url-1 = {https://doi.org/10.1063/1.4768697}}
@article{Petruzielo_2012,
author = {Petruzielo, F. R. and Holmes, A. A. and Changlani, Hitesh J. and Nightingale, M. P. and Umrigar, C. J.},
date-modified = {2022-09-30 16:12:05 +0200},
doi = {10.1103/PhysRevLett.109.230201},
issue = {23},
journal = {Phys. Rev. Lett.},
month = {Dec},
numpages = {5},
pages = {230201},
publisher = {American Physical Society},
title = {Semistochastic Projector Monte Carlo Method},
url = {https://link.aps.org/doi/10.1103/PhysRevLett.109.230201},
volume = {109},
year = {2012},
bdsk-url-1 = {https://link.aps.org/doi/10.1103/PhysRevLett.109.230201},
bdsk-url-2 = {https://doi.org/10.1103/PhysRevLett.109.230201}}
@article{Caffarel_2019,
author = {Caffarel,Michel},
date-modified = {2022-09-30 16:11:35 +0200},
doi = {10.1063/1.5114703},
journal = {J. Chem. Phys.},
number = {6},
pages = {064101},
title = {Evaluating two-electron-repulsion integrals over arbitrary orbitals using zero variance Monte Carlo: Application to full configuration interaction calculations with Slater-type orbitals},
volume = {151},
year = {2019},
bdsk-url-1 = {https://doi.org/10.1063/1.5114703}}
@article{Zhang_2003,
author = {Zhang, Shiwei and Krakauer, Henry},
doi = {10.1103/PhysRevLett.90.136401},
@ -265,7 +309,8 @@
pages = {104510},
title = {Many-body wavefunctions for normal liquid $^{3}\mathrm{He}$},
volume = {74},
year = {2006}}
year = {2006},
bdsk-url-1 = {https://doi.org/10.1103/PhysRevB.74.104510}}
@article{Foulkes_2001,
author = {Foulkes, W. M. C. and Mitas, L. and Needs, R. J. and Rajagopal, G.},
@ -456,11 +501,12 @@
year = {2016},
bdsk-url-1 = {http://dx.doi.org/10.1021/acs.jctc.6b00044}}
@article{zhou_2017,
@article{Zhou_2017,
author = {Zhou, Xiaojun and Wang, Fan},
date-modified = {2022-09-30 16:11:07 +0200},
doi = {10.1002/jcc.24750},
issn = {1096-987X},
journal = {Journal of Computational Chemistry},
journal = {J. Comput. Chem},
keywords = {FN-DMC ⋅ barrier heights ⋅ H-transfer reactions ⋅ CCSD(T) ⋅ density functional theory},
number = {11},
pages = {798--806},
@ -1284,16 +1330,13 @@
@article{Garniron_2017b,
author = {Garniron, Yann and Scemama, Anthony and Loos, Pierre-Fran{\c c}ois and Caffarel, Michel},
date-modified = {2017-10-07 12:41:01 +0000},
date-modified = {2022-09-30 16:13:18 +0200},
doi = {10.1063/1.4992127},
issn = {1089-7690},
journal = {The Journal of Chemical Physics},
journal = {J. Chem. Phys.},
month = {Jul},
number = {3},
pages = {034101},
publisher = {AIP Publishing},
title = {Hybrid stochastic-deterministic calculation of the second-order perturbative contribution of multireference perturbation theory},
url = {http://dx.doi.org/10.1063/1.4992127},
volume = {147},
year = {2017},
bdsk-url-1 = {http://dx.doi.org/10.1063/1.4992127}}
@ -1316,16 +1359,13 @@
@article{Garniron_2017a,
author = {Garniron, Yann and Giner, Emmanuel and Malrieu, Jean-Paul and Scemama, Anthony},
date-modified = {2017-10-07 12:40:58 +0000},
date-modified = {2022-09-30 16:13:07 +0200},
doi = {10.1063/1.4980034},
issn = {1089-7690},
journal = {The Journal of Chemical Physics},
journal = {J. Chem. Phys.},
month = {Apr},
number = {15},
pages = {154107},
publisher = {AIP Publishing},
title = {Alternative definition of excitation amplitudes in multi-reference state-specific coupled cluster},
url = {http://dx.doi.org/10.1063/1.4980034},
volume = {146},
year = {2017},
bdsk-url-1 = {http://dx.doi.org/10.1063/1.4980034}}
@ -1756,16 +1796,13 @@
@article{Sharma_2017,
author = {Sharma, Sandeep and Holmes, Adam A. and Jeanmairet, Guillaume and Alavi, Ali and Umrigar, C. J.},
date-modified = {2022-09-15 10:19:34 +0200},
date-modified = {2022-09-30 16:12:36 +0200},
doi = {10.1021/acs.jctc.6b01028},
issn = {1549-9626},
journal = {J. Chem. Theory Comput.},
month = {Mar},
number = {4},
pages = {1595--1604},
publisher = {American Chemical Society (ACS)},
title = {Semistochastic Heat-Bath Configuration Interaction Method: Selected Configuration Interaction with Semistochastic Perturbation Theory},
url = {http://dx.doi.org/10.1021/acs.jctc.6b01028},
volume = {13},
year = {2017},
bdsk-url-1 = {http://dx.doi.org/10.1021/acs.jctc.6b01028}}

485
g.tex
View File

@ -189,7 +189,17 @@ In their pioneering work, \cite{Kalos_1974} Kalos and collaborators introduced t
The domain used was the Cartesian product of small spheres around each particle, the Hamiltonian being approximated by the kinetic part only within the domain.
Some time later, Kalos proposed to extend these ideas to more general domains such as rectangular and/or cylindrical domains. \cite{Kalos_2000} In both works, the size of the domains is infinitely small in the limit of a vanishing time-step.
Here, the domains are of arbitrary size, thus greatly increasing the efficiency of the approach.
Finally, note that some general equations for arbitrary domains in continuous space have also been proposed in Ref.~\onlinecite{Assaraf_1999B}.
Note also that some general equations for arbitrary domains in continuous space have also been proposed by one of us in Ref.~\onlinecite{Assaraf_1999B}.
Finally, from a general perspective, it is interesting to mention that the method proposed here is an example
of how combining stochastic and deterministic techniques can be valuable.
%Here, a new stochastic process is built by taking advantage of a partial exact (deterministic) summation of local quantities.
In recent years, a number of works have exploited this idea and proposed hybrid stochastic/deterministic schemes.
Let us mention, for example,
the semi-stochastic approach of Petruzielo {\it et al.},\cite{Petruzielo_2012}
two different methods for evaluating stochastically the second-order perturbation energy of selected CI methods\cite{Garniron_2017b,Sharma_2017},
the stochastic approach of Willow {\it et al.} for the second-order many-body correction to the Hartree-Fock energy,\cite{Willow_2012}
or the zero variance Monte Carlo scheme for evaluating the two-electron integrals of quantum chemistry.\cite{Caffarel_2019}
The paper is organized as follows.
Section \ref{sec:DMC} presents the basic equations and notations of DMC.
@ -202,9 +212,6 @@ In Subsec.~\ref{sec:Green}, both the time- and energy-dependent Green's function
Section \ref{sec:numerical} presents the application of the approach to the one-dimensional Hubbard model.
Finally, in Sec.\ref{sec:conclu}, some conclusions and perspectives are given.
Atomic units are used throughout.
\titou{Comment on deterministic vs stochastic (PT2, FCIQMC, etc).}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Diffusion Monte Carlo}
\label{sec:DMC}
@ -229,16 +236,16 @@ The equality in Eq.~\eqref{eq:limTN} holds up to a global phase factor playing n
At large but finite $N$, the vector $T^N \ket{\Psi_0}$ differs from $\ket{\Phi_0}$ only by an exponentially small correction, making it straightforward to extrapolate the finite-$N$ results to $N \to \infty$.
Likewise, ground-state properties may be obtained at large $N$.
For example, in the important case of the energy, one can rely on the following formula
For example, in the important case of the energy, one can project out the vector $T^N \ket{\Psi_0}$ on some approximate vector, $\ket{\PsiT}$, as follows
\be
\label{eq:E0}
E_0 = \lim_{N \to \infty } \frac{\mel{\PsiT}{H T^N}{\Psi_0}}{\mel{\PsiT}{T^N}{\Psi_0}},
E_0 = \lim_{N \to \infty } \frac{\mel{\Psi_0}{T^N}{H\Psi_T}}{\mel{\Psi_0}{T^N}{\Psi_T}}.
\ee
where $\ket{\PsiT}$ is a trial wave function (some approximation of the true ground-state wave function) on which $T^N \ket{\Psi_0}$ is projected out.
$\ket{\PsiT}$ is known as the trial wave vector (function), and is chosen as an approximation of the true ground-state vector.
To proceed further we introduce the time-dependent Green's matrix $G^{(N)}$ defined as
\be
G^{(N)}_{ij}=\mel{j}{T^N}{i}.
G^{(N)}_{ij}=\mel{i}{T^N}{j}.
\ee
where $\ket{i}$ and $\ket{j}$ are basis vectors.
The denomination ``time-dependent Green's matrix'' is used here since $G$ may be viewed as a short-time approximation of the (time-imaginary) evolution operator $e^{-N\tau H}$, which is usually referred to as the imaginary-time dependent Green's function.
@ -261,12 +268,25 @@ Each path is associated with a weight $\prod_{k=0}^{N-1} T_{i_{k} i_{k+1}}$ and
This expression allows a simple and vivid interpretation of the solution.
In the limit $N \to \infty$, the $i_N$th component of the ground-state wave function (obtained as $\lim_{N \to \infty} G^{(N)}_{i_0 i_N})$ is the weighted sum over all possible paths arriving at vector $\ket{i_N}$.
This result is independent of the initial vector $\ket{i_0}$, apart from some irrelevant global phase factor.
This result is independent of the initial vector $\ket{i_0}$, apart from some irrelevant global phase factor. We illustrate this fundamental property pictorially in
Fig.\ref{fig1}.
When the size of the linear space is small, the explicit calculation of the full sums involving $M^N$ terms (where $M$ is the size of the Hilbert space) can be performed.
In such a case, we are in the realm of what one would call the ``deterministic'' power methods, such as the Lancz\`os or Davidson approaches.
If not, probabilistic techniques for generating only the paths contributing significantly to the sums are to be preferred.
This is the central theme of the present work.
%%% FIG 0 %%%
\begin{figure}
\includegraphics[width=\columnwidth]{fig1}
\caption{Path integral representation of the exact coefficient $c_i=\braket{i}{\Psi_0}$ of the ground-state wavefunction obtained as an infinite sum of paths starting
from $\ket{i_0}$ and ending at $\ket{i}$, Eq.(\ref{eq:G}). Each path carries a weight $\prod_k T_{i_{k} i_{k+1}}$ computed along the path.
The result is independent of the choice of the initial state $\ket{i_0}$, provided that $\braket{i_0}{\Psi_0}$ is non-zero.
Here, only four paths of infinite length have been represented.
}
\label{fig1}
\end{figure}
%=======================================%
\subsection{Probabilistic framework}
\label{sec:proba}
@ -356,44 +376,65 @@ Note that one can eschew this condition via a simple generalization of the trans
\ee
This new transition probability matrix with positive entries reduces to Eq.~\eqref{eq:pij} when $T^+_{ij}$ is positive as $\sum_j \PsiG_{j} T^+_{ij} = 1$.
\titou{Now}, using Eqs.~\eqref{eq:defT}, \eqref{eq:defTij} and \eqref{eq:pij}, the residual weights read
Now, we need to make the connection between the transition probability matrix, $p_{i \to j}$, just defined from the operator $T^+$ corresponding
to the approximate Hamiltonian
$H^{+}$ and the operator $T$ associated with the exact Hamiltonian $H$.
This is done thanks to the relation, Eq.(\ref{eq:defTij}), connecting $p_{i \to j}$ and $T_{ij}$ through a weight.
Using Eqs.~\eqref{eq:defT} and \eqref{eq:pij}, the weights read
\be
w_{ij}=\frac{T_{ij}}{T^+_{ij}}.
\ee
Using these notations the Green's matrix components can be rewritten as
Using these notations the similarity-transformed Green's matrix components can be rewritten as
\be
\label{eq:GN_simple}
\bar{G}^{(N)}_{i_0 i_N} =
\sum_{i_1,\ldots,i_{N-1}} \qty( \prod_{k=0}^{N-1} p_{i_{k} \to i_{k+1}} ) \prod_{k=0}^{N-1} w_{i_{k}i_{k+1}}.
\sum_{i_1,\ldots,i_{N-1}} \qty( \prod_{k=0}^{N-1} p_{i_{k} \to i_{k+1}} ) \prod_{k=0}^{N-1} w_{i_{k}i_{k+1}},
\ee
which is amenable to Monte Carlo calculations by generating paths using the transition probability matrix, $p_{i \to j}$.
The product $\prod_{k=0}^{N-1} p_{i_{k} \to i_{k+1}}$ is the probability, denoted $\text{Prob}_{i_0 \to i_N}(i_1,\ldots,i_{N-1})$,
for the path starting at $\ket{i_0}$ and ending at $\ket{i_N}$ to occur.
Using Eq.~\eqref{eq:sumup} and the fact that $p_{i \to j} \ge 0$, one can easily verify that $\text{Prob}_{i_0 \to i_N}$ is positive and obeys
Let us illustrate this in the case of the energy as given by Eq.~\eqref{eq:E0}. Taking $\ket{\Psi_0}=\ket{i_0}$ as initial condition, we have
\be
\sum_{i_1,\ldots,i_{N-1}} \text{Prob}_{i_0 \to i_N}(i_1,\ldots,i_{N-1}) = 1,
E_0 = \lim_{N \to \infty }
\frac{ \sum_{i_N} G^{(N)}_{i_0 i_N} {(H\PsiT)}_{i_N} }
{ \sum_{i_N} {G}^{(N)}_{i_0 i_N} {\PsiT}_{i_N} }.
\ee
which can be rewritten probabilistically as
\be
E_0 = \lim_{N \to \infty }
\frac{ \expval{ \prod_{k=0}^{N-1} w_{i_{k}i_{k+1}} \frac{ {(H\PsiT)}_{i_N} }{ \PsiG_{i_N} }}}
{ \expval{ \prod_{k=0}^{N-1} w_{i_{k}i_{k+1}} \frac{ {\PsiT}_{i_N} } {\PsiG_{i_N}} }}.
\ee
where $\expval{...}$ is the probabilistic average defined over the set of paths $\ket{i_1},\ldots,\ket{i_N}$ occuring with probability
\be
\text{Prob}_{i_0}(i_1,\ldots,i_{N}) = \prod_{k=0}^{N-1} p_{i_{k} \to i_{k+1}}
\ee
Using Eq.~\eqref{eq:sumup} and the fact that $p_{i \to j} \ge 0$, one can easily verify that $\text{Prob}_{i_0}$ is positive and obeys
\be
\sum_{i_1,\ldots,i_{N}} \text{Prob}_{i_0}(i_1,\ldots,i_{N}) = 1,
\ee
as it should.
For a given path $i_1,\ldots,i_{N-1}$, the probabilistic average associated with this probability is then defined as
\be
\label{eq:average}
\expval{F} = \sum_{i_1,\ldots,i_{N-1}} F(i_0,\ldots,i_N) \text{Prob}_{i_0 \to i_N}(i_1,\ldots,i_{N-1}),
\ee
where $F$ is an arbitrary function.
Finally, the path-integral expressed as a probabilistic average reads
\be
\label{eq:cn_stoch}
\bar{G}^{(N)}_{i_0 i_N}= \expval{ \prod_{k=0}^{N-1} w_{i_{k}i_{k+1}}}.
\ee
To calculate the probabilistic average, Eq.~\eqref{eq:average},
%For a given path $i_1,\ldots,i_{N-1}$, the probabilistic average associated with this probability is then defined as
%\be
%\label{eq:average}
% \expval{F} = \sum_{i_1,\ldots,i_{N-1}} F(i_0,\ldots,i_N) \text{Prob}_{i_0 \to i_N}(i_1,\ldots,i_{N-1}),
%\ee
%where $F$ is an arbitrary function.
%Finally, the path-integral expressed as a probabilistic average reads
%\be
%\label{eq:cn_stoch}
% \bar{G}^{(N)}_{i_0 i_N}= \expval{ \prod_{k=0}^{N-1} w_{i_{k}i_{k+1}}}.
%\ee
To calculate the probabilistic averages
an artificial (mathematical) ``particle'' called walker (or psi-particle) is introduced.
During the Monte Carlo simulation, the walker moves in configuration space by drawing new states with
probability $p_{i_k \to i_{k+1}}$, thus realizing the path of probability $\text{Prob}(i_0 \to i_N)$.
In this framework, the energy defined in Eq.~\eqref{eq:E0} is given by
\be
E_0 = \lim_{N \to \infty }
\frac{ \expval{ \prod_{k=0}^{N-1} w_{i_{k}i_{k+1}} {(H\PsiT)}_{i_N}} }
{ \expval{ \prod_{k=0}^{N-1} w_{i_{k}i_{k+1}} {\PsiT}_{i_N} }}.
\ee
probability $p_{i_k \to i_{k+1}}$, thus realizing the path of probability $\text{Prob}_{i_0}$.
%In this framework, the energy defined in Eq.~\eqref{eq:E0} is given by
%\be
% E_0 = \lim_{N \to \infty }
% \frac{ \expval{ \prod_{k=0}^{N-1} w_{i_{k}i_{k+1}} {(H\PsiT)}_{i_N}} }
% { \expval{ \prod_{k=0}^{N-1} w_{i_{k}i_{k+1}} {\PsiT}_{i_N} }}.
%\ee
%A schematic algorithm is presented in Fig.\ref{scheme1B}.
Note that, instead of using a single walker, it is common to introduce a population of independent walkers and to calculate the averages over this population.
In addition, thanks to the ergodic property of the stochastic matrix (see, for example, Ref.~\onlinecite{Caffarel_1988}), a unique path of infinite length from which sub-paths of length $N$ can be extracted may also be used.
We shall not here insist on these practical details that are discussed, for example, in Refs.~\onlinecite{Foulkes_2001,Kolorenc_2011}.
@ -483,28 +524,55 @@ This series can be recast
where $\ket{I_0}=\ket{i_0}$ is the initial state, $n_0$ is the number of times the walker remains in the domain of $\ket{i_0}$ (with $1 \le n_0 \le N+1$), $\ket{I_1}$ is the first exit state that does not belong to $\cD_{i_0}$, $n_1$ is the number of times the walker remains in $\cD_{i_1}$ (with $1 \le n_1 \le N+1-n_0$), $\ket{I_2}$ is the second exit state, and so on.
Here, the integer $p$ (with $0 \le p \le N$) indicates the number of exit events occurring along the path.
The two extreme values, $p=0$ and $p=N$, correspond to the cases where the walker remains in the initial domain during the entire path, and where the walker exits a domain at each step, respectively.
\titou{In what follows, we shall systematically write the integers representing the exit states in capital letter.}
In what follows, we shall systematically write the integers representing the exit states in capital letter, while small letters will be used for
denoting the elementary states $\ket{i_k}$ generated with $p_{i \to j}$. Making this distinction is important since the effective
stochastic dynamics used in practical Monte Carlo calculations will only involve exit states, the contribution of the elementary states, $\ket{i_k}$, being
exactly integrated out.
Figure \ref{fig2} exemplifies how a path can be decomposed as proposed in Eq.(\ref{eq:eff_series}).
To make things as clear as possible, let us explicit in detail how the path drawn in Figure \ref{fig2} evolves in time.
The walker realizing the path starts at $\ket{i_0}$ within the domain $\cD_{i_0}$. It then makes two steps to arrive at $\ket{i_1}$, then $\ket{i_2}$ and, finally, leaves the domain
at $\ket{i_3}$. The state $\ket{i_3}$ is the first exit state and is denoted as $\ket{I_1}(=\ket{i_3})$ following our convention of denoting exit states
with capital letters. The trapping time in $\cD_{i_0}$ is $n_0=3$ since three states of the domain have been visited
(namely, $\ket{i_0}$,$\ket{i_1}$,and $\ket{i_2}$).
During the next steps the domains $\cD_{I_1}$, $\cD_{I_2}$, and $\cD_{I_3}$ are successively visited with $n_1=2$, $n_2=3$, and $n_3=1$, respectively.
The corresponding exit states are $\ket{I_2}=\ket{i_5}$, $\ket{I_3}=\ket{i_8}$, and $\ket{I_4}=\ket{i_9}$, respectively. This work takes advantage of
the fact that each possible path can be decomposed in this way.
%Generalizing what has been done for domains consisting of only one single state, the general idea here is to integrate out exactly the stochastic dynamics over the
%set of all paths having the same representation, Eq.(\ref{eff_series}). As a consequence, an effective Monte Carlo dynamics including only exit states
%averages for renormalized quantities will be defined.\\
Let us define the probability of remaining $n$ times in the domain of $\ket{I_0}$ and to exit at $\ket{I} \notin \cD_{I_0}$ as
%%% FIG 0B %%%
\begin{figure}
\includegraphics[width=\columnwidth,angle=0]{fig2}
\caption{Representation of a path in terms of exit states, $\ket{I_k}$ and trapping times, $\ket{n_k}$. The
states $\ket{i_k}$ along the path are represented with small black solid circles and the exit states, $\ket{I_k}$, with larger black solid squares.
By convention the initial state, $\ket{i_0}$ is denoted using a capital letter, $\ket{I_0}$ since it will be the first state of the effective dynamics
involving only exit states.
See text, for some comments on the time evolution of the path.}
\label{fig2}
\end{figure}
Now, generalizing what has been done previously for a single-state domain, let us define the probability of remaining $n$ times in the domain of $\ket{I_0}$ and to exit at $\ket{I} \notin \cD_{I_0}$. This probability is given by
\be
\label{eq:eq1C}
\cP_{I_0 \to I}(n)
= \sum_{\ket{i_1} \in \cD_{I_0}} \cdots \sum_{\ket{i_{n-1}} \in \cD_{I_0}}
p_{I_0 \to i_1} \ldots p_{i_{n-2} \to i_{n-1}} p_{i_{n-1} \to I}.
\ee
\titou{To proceed} we must introduce the projector associated with each domain
Since the sums are restricted to states belonging to the domain it is convenient to introduce a projector for over each domain
\be
\label{eq:pi}
P_I = \sum_{\ket{i} \in \cD_I} \dyad{i}{i}
P_I = \sum_{\ket{i} \in \cD_I} \dyad{i}{i}.
\ee
and the projection of the operator $T^+$ to the domain that governs the dynamics of the walkers moving in $\cD_{I}$, \ie,
and, also,
the projection of the operator $T^+$ over the domain, \ie,
\be
T^+_I= P_I T^+ P_I.
\ee
Th operator $T^+_I$ governs the dynamics of the walkers trapped within the domain $\cD_{I}$,
see Eq.(\ref{eq:pij}) where $T^+$ is now restricted to the domain.
Using Eqs.~\eqref{eq:pij} and \eqref{eq:eq1C}, the probability can be rewritten as
\be
\label{eq:eq3C}
@ -519,34 +587,37 @@ corresponding to the last move connecting the inside and outside regions of the
\be
(F^+_I)_{ij} =
\begin{cases}
T^+_{ij}, & \qif* \ket{i} \in \cD_{I} \titou{\land} \ket{j} \notin \cD_{I},
T^+_{ij}, & \qif* \ket{i} \in \cD_{I} \land \ket{j} \notin \cD_{I},
\\
0, & \text{otherwise}.
\end{cases}
\ee
Physically, $F$ may be seen as a flux operator through the boundary of $\cD_{I}$.
\titou{Now}, the probability of being trapped $n$ times in $\cD_{I}$ is given by
Knowing the probability of remaining $n$ times in the domain and, then, to exit to some state, it is possible to obtain
the probability of being trapped $n$ times in $\cD_{I}$, just by summing over all possible exit states. Thus, we get
\be
\label{eq:PiN}
P_{I}(n) = \frac{1}{\PsiG_{I}} \mel{ I }{ \qty(T^+_{I})^{n-1} F^+_{I} }{ \PsiG }.
\ee
Using the fact that
The normalization to one of this probability can be verified
by using
the fact that
\be
\label{eq:relation}
\qty(T^+_{I})^{n-1} F^+_I = \qty(T^+_{I})^{n-1} T^+ - \qty(T^+_I)^n,
\ee
we have
leading to
\be
\sum_{n=0}^{\infty} P_{I}(n)
= \frac{1}{\PsiG_{I}} \sum_{n=1}^{\infty} \qty[ \mel{ I }{ \qty(T^+_{I})^{n-1} }{ \PsiG }
- \mel{ I }{ \qty(T^+_{I})^{n} }{ \PsiG } ] = 1,
- \mel{ I }{ \qty(T^+_{I})^{n} }{ \PsiG } ] = 1.
\ee
and the average trapping time is
The average trapping time defined as $t_{I}={\bar n}_{I} \tau$ where $ {\bar n}_{I}=\sum_n n P_{I}(n)$ is calculated to be
\be
t_{I}={\bar n}_{I} \tau = \frac{1}{\PsiG_{I}} \mel{ I }{ P_{I} \qty( H^+ - \EL^+ \Id )^{-1} P_{I} }{ \PsiG }.
t_{I}=\frac{1}{\PsiG_I} \mel{I}{ { \qty[ P_I \qty( H^+ - \EL^+ \Id ) P_I ] }^{-1} }{ \PsiG }.
\ee
In practice, the various quantities restricted to the domain are computed by diagonalizing the matrix $(H^+-\EL^+ \Id)$ in $\cD_{I}$.
In practice, the various quantities restricted to the domain will be computed by diagonalizing the matrix $(H^+-\EL^+ \Id)$ in $\cD_{I}$.
Note that it is possible only if the dimension of the domains is not too large (say, less than a few thousands).
%=======================================%
@ -558,10 +629,10 @@ Note that it is possible only if the dimension of the domains is not too large (
%\subsubsection{Time-dependent Green's matrix}
%\label{sec:time}
%--------------------------------------------%
In this section we generalize the path-integral expression of the Green's matrix, Eqs.~\eqref{eq:G} and \eqref{eq:cn_stoch}, to the case where domains are used.
In this section we generalize the path-integral expression of the Green's matrix, Eq.~\eqref{eq:G}, to the case where domains are used.
To do so, we introduce the Green's matrix associated with each domain as follows:
\be
G^{(N),\cD}_{IJ}= \mel{ J }{ T_I^N }{ I }.
G^{(N),\cD}_{ij}= \mel{ i }{ T_i^N }{ j }.
\ee
%Starting from Eq.~\eqref{eq:cn}
%\be
@ -569,7 +640,7 @@ To do so, we introduce the Green's matrix associated with each domain as follows
%\ee
Starting from Eq.~\eqref{eq:cn} and using the representation of the paths in terms of exit states and trapping times [see Eq.~\eqref{eq:eff_series}], we get
\be
G^{(N)}_{I_0 I_N} = \sum_{p=0}^N
G^{(N)}_{i_0 i_N} = \sum_{p=0}^N
\sum_{\cC_p} \sum_{(i_1,...,i_{N-1}) \in \cC_p}
\prod_{k=0}^{N-1} \mel{ i_k }{ T }{ i_{k+1} } ,
\ee
@ -577,7 +648,7 @@ where $\cC_p$ is the set of paths with $p$ exit states, $\ket{I_k}$, and trappin
It follows that
\begin{multline}
\label{eq:Gt}
G^{(N)}_{I_0 I_N}= G^{(N),\cD}_{I_0 I_N} +
G^{(N)}_{i_0 i_N}= G^{(N),\cD}_{i_0 i_N} +
\sum_{p=1}^{N}
\sum_{\ket{I_1} \notin \cD_{I_0}, \ldots , \ket{I_p} \notin \cD_{I_{p-1}} }
\sum_{n_0 \ge 1} \cdots \sum_{n_p \ge 1}
@ -585,60 +656,94 @@ It follows that
\\
\times
\qty[ \prod_{k=0}^{p-1} \mel{ I_k }{ \qty(T_{I_k})^{n_k-1} F_{I_k} }{ I_{k+1} } ]
G^{(n_p-1),\cD}_{I_p I_N},
G^{(n_p-1),\cD}_{I_p i_N},
\end{multline}
where $\delta_{i,j}$ is a Kronecker delta.
where $\delta_{i,j}$ is a Kronecker delta. Note that the first contribution, $G^{(N),\cD}_{i_0 i_N}$, corresponds to the case
$p=0$ and collects all contributions to the Green's matrix coming from paths remaining for ever within the domain of $\ket{i_0}$ (no exit event).
This contribution is here isolated from the $p$-sum since, as a domain Green's matrix, it will be calculated exactly and will not be suject to a stochastic
treatment.
Note also
that the last state $\ket{i_N}$ is never an exit state because of the very definition of our representation of the paths (if not, it would be
associated to the next contribution coresponding to $p+1$ exit events).
This expression is the path-integral representation of the Green's matrix using only the variables $(\ket{I_k},n_k)$ of the effective dynamics defined over the set of domains.
The standard formula derived above [see Eq.~\eqref{eq:G}] may be considered as the particular case where the domain associated with each state is empty.
In this case, $p=N$ and $n_k=1$ (for $0 \le k \le N$) and we are left only with the $p$th component of the sum, that is, $G^{(N)}_{I_0 I_N}
= \prod_{k=0}^{N-1} \mel{ I_k }{ F_{I_k} }{ I_{k+1} }$, with $F=T$.
This expression is the path-integral representation of the Green's matrix using only the variables $(\ket{I_k},n_k)$ of the effective dynamics defined over the set of domains.
The standard formula for $G^{(N)}_{i_0 i_N}$ derived above, Eq.~\eqref{eq:G}, may be considered as the particular case where the
walker exits of the current state $\ket{i_k}$ at each step (no domains are introduced), leading to a number of
exit events $p$ equal to $N$. In this case, we have $\ket{I_k}=\ket{i_k}$, $n_k=1$ (for $0 \le k \le N$), and we are left only with the $p$th component of the sum, that is, $G^{(N)}_{i_0 i_N}=\prod_{k=0}^{N-1} \mel{ I_k }{ F_{I_k} }{ I_{k+1} }$, with $F=T$, thus recovering Eq.~\eqref{eq:G}.
To express the fundamental equation of $G$ under the form of a probabilistic average, we rely on the importance-sampled version of Eq.~\eqref{eq:G}, \ie,
In order to compute $G^{(N)}_{i_0 i_N}$ by resorting to Monte Carlo techniques, let us reformulate Eq.~\eqref{eq:Gt}
using the transition probability $\cP_{I \to J}(n)$ introduced above,
Eq.~\eqref{eq:eq3C}. We first rewrite Eq.~\eqref{eq:Gt} under the form
\begin{multline}
\bar{G}^{(N)}_{I_0 I_N}=\bar{G}^{(N),\cD}_{I_0 I_N} +
{G}^{(N)}_{i_0 i_N}={G}^{(N),\cD}_{i_0 i_N} + {\PsiG_{i_0}}
\sum_{p=1}^{N}
\sum_{\ket{I_1} \notin \cD_{I_0}, \ldots , \ket{I_p} \notin \cD_{I_{p-1}}}
\sum_{n_0 \ge 1} \cdots \sum_{n_p \ge 1}
\\
\times
\delta_{\sum_{k=0}^p n_k,N+1} \qty{ \prod_{k=0}^{p-1} \qty[ \frac{\PsiG_{I_{k+1}}}{\PsiG_{I_k}} \mel{ I_k }{ \qty(T_{I_k})^{n_k-1} F_{I_k} }{ I_{k+1} } ] }
\bar{G}^{(n_p-1),\cD}_{I_p I_N}.
\frac{{G}^{(n_p-1),\cD}_{I_p i_N}}{\PsiG_{I_p}}.
\end{multline}
Introducing the weights
\be
W_{I_k I_{k+1}} = \frac{\mel{ I_k }{ \qty(T_{I_k})^{n_k-1} F_{I_k} }{ I_{k+1} }}{\mel{ I_k }{ \qty(T^{+}_{I_k})^{n_k-1} F^+_{I_k} }{ I_{k+1} }},
\ee
and using the effective transition probability defined in Eq.~\eqref{eq:eq3C}, we get
\be
\label{eq:Gbart}
\bar{G}^{(N)}_{I_0 I_N} = \bar{G}^{(N),\cD}_{I_0 I_N} + \sum_{p=1}^{N}
\expval{
\qty( \prod_{k=0}^{p-1} W_{I_k I_{k+1}} )
\bar{G}^{(n_p-1), \cD}_{I_p I_N} },
\ee
where, in this context, the average of a given function $F$ is defined as
and using the effective transition probability, Eq.~\eqref{eq:eq3C}, we get
\begin{multline}
\expval{F}
= \sum_{\ket{I_1} \notin \cD_{I_0}, \ldots , \ket{I_p} \notin \cD_{I_{p-1}}}
\sum_{n_0 \ge 1} \cdots \sum_{n_p \ge 1}
\delta_{\sum_{k=0}^p n_k,N+1}
\\
\times
\prod_{k=0}^{N-1}\cP_{I_k \to I_{k+1}}(n_k-1) F(I_0,n_0;\ldots;I_N,n_N).
\end{multline}
In practice, a schematic DMC algorithm to compute the average is as follows.\\
\titou{i) Choose some initial vector $\ket{I_0}$\\
ii) Generate a stochastic path by running over $k$ (starting at $k=0$) as follows.\\
$\;\;\;\bullet$ Draw $n_k$ using the probability $P_{I_k}(n)$ [see Eq.~\eqref{eq:PiN}]\\
$\;\;\;\bullet$ Draw the exit state, $\ket{I_{k+1}}$, using the conditional probability $$\frac{\cP_{I_k \to I_{k+1}}(n_k)}{P_{I_k}(n_k)}$$\\
iii) Terminate the path when $\sum_k n_k=N$ is greater than some target value $N_\text{max}$ and compute $F(I_0,n_0;\ldots;I_N,n_N)$\\
iv) Go to step ii) until some maximum number of paths is reached.\\
\label{eq:Gbart}
{G}^{(N)}_{i_0 i_N} = {G}^{(N),\cD}_{i_0 i_N}
\\
At the end of the simulation, an estimate of the average for a few values of $N$ greater but close to $N_\text{max}$ is obtained.
At large $N_\text{max}$ where the convergence of the average as a function of $p$ is reached, such values can be averaged.}
+ {\PsiG_{i_0}}\sum_{p=1}^{N} \sum_{{(I,n)}_{p,N}}
{
\qty( \prod_{k=0}^{p-1} W_{I_k I_{k+1}} ) \qty( \prod_{k=0}^{p-1}\cP_{I_k \to I_{k+1}}(n_k) )
\frac{1}{\PsiG_{I_p}} {G}^{(n_p-1), \cD}_{I_p i_N} },
\end{multline}
where, for clarity, $\sum_{{(I,n)}_{p,N}}$ is used as a short-hand notation for the sum,
$ \sum_{\ket{I_1} \notin \cD_{I_0}, \ldots , \ket{I_p} \notin \cD_{I_{p-1}}}
\sum_{n_0 \ge 1} \cdots \sum_{n_p \ge 1}$ with the constraint $\sum_{k=0}^p n_k=N+1$.\\
%==============================================%
Under this form ${G}^{(N)}_{i_0 i_N}$ is now amenable to Monte Carlo calculations
by generating paths using the transition probability matrix, $\cP_{I \to J}(n)$. For example, in the case of the energy, we start from
\be
E_0 = \lim_{N \to \infty }
\frac{ \sum_{i_N} {G}^{(N)}_{i_0 i_N} {(H\PsiT)}_{i_N} }
{ \sum_{i_N} {G}^{(N)}_{i_0 i_N} {\PsiT}_{i_N} }
\ee
which can be rewritten probabilistically as
\be
E_0 = \lim_{N \to \infty }
\frac{ {G}^{(N),\cD}_{i_0 i_N} + {\PsiG_{i_0}} \sum_{p=1}^{N} \expval{ \qty( \prod_{k=0}^{p-1} W_{I_k I_{k+1}} ) {\cal H}_{n_p,I_p} }_p}
{ {G}^{(N),\cD}_{i_0 i_N} + {\PsiG_{i_0}} \sum_{p=1}^{N} \expval{ \qty( \prod_{k=0}^{p-1} W_{I_k I_{k+1}} ) {\cal S}_{n_p,I_p} }_p}
\ee
where $\expval{...}_p$ is the probabilistic average defined over the set of paths $p$ exit events of probability
$\prod_{k=0}^{p-1} \cP_{I_k \to I_{k+1}}(n_k) $
and $({\cal H}_{n_p,I_p},{\cal S}_{n_p,I_p})$ two quantities taking into account the contribution of the trial wave function at the end of the path and defined as follows
\be
{\cal H}_{n_p,I_p}= \frac{1}{\PsiG_{I_p}} \sum_{i_N} {G}^{(n_p-1),\cD}_{I_p i_N} (H \Psi_T)_{i_N}
\ee
and
\be
{\cal S}_{n_p,I_p}= \frac{1}{\PsiG_{I_p}} \sum_{i_N} {G}^{(n_p-1), \cD}_{I_p i_N} (\Psi_T)_{i_N}
\ee
In practice, the Monte Carlo algorithm is a simple generalization of that used in standard diffusion Monte Carlo calculations.
Stochastic paths starting at $\ket{I_0}$ are generated using
the probability $\cP_{I_k \to I_{k+1}}(n_k)$ and are stopped when $\sum_k n_k$ is greater than some target value $N$. Averages of the
products of weights, $ \prod_{k=0}^{p-1} W_{I_k I_{k+1}} $ times the end-point contributions, ${{(\cal H}/{\cal S})}_{n_p,I_p} $ are computed for each $p$.
The generation of the paths can be performed using a two-step process. First, the integer $n_k$ is drawn using the probability $P_{I_k}(n)$ [see Eq.~\eqref{eq:PiN}]
and, then,
the exit state, $\ket{I_{k+1}}$, is drawn using the conditional probability $\frac{\cP_{I_k \to I_{k+1}}(n_k)}{P_{I_k}(n_k)}$.
%See fig.\ref{scheme1C}.
%\titou{i) Choose some initial vector $\ket{I_0}$\\
%ii) Generate a stochastic path by running over $k$ (starting at $k=0$) as follows.\\
%$\;\;\;\bullet$ Draw $n_k$ using the probability $P_{I_k}(n)$ [see Eq.~\eqref{eq:PiN}]\\
%$\;\;\;\bullet$ Draw the exit state, $\ket{I_{k+1}}$, using the conditional probability $$\frac{\cP_{I_k \to I_{k+1}}(n_k)}{P_{I_k}(n_k)}$$\\
%iii) Terminate the path when $\sum_k n_k=N$ is greater than some target value $N_\text{max}$ and compute $F(I_0,n_0;\ldots;I_N,n_N)$\\
%iv) Go to step ii) until some maximum number of paths is reached.\\
%\\
%%At the end of the simulation, an estimate of the average for a few values of $N$ greater but close to $N_\text{max}$ is obtained.
%At large $N_\text{max}$ where the convergence of the average as a function of $p$ is reached, such values can be averaged.}
%
%%==============================================%
\subsection{Domain Green's Function Monte Carlo}
\label{sec:energy}
%==============================================%
@ -655,29 +760,29 @@ The second, more direct and elegant, is based on the Dyson equation.
%--------------------------------------------%
Let us define the energy-dependent Green's matrix
\be
G^E_{ij}= \tau \sum_{N=0}^{\infty} \mel{ i }{ T^N }{ j} = \mel{i}{ \qty( H-E \Id )^{-1} }{j},
G^E_{ij}= \tau \sum_{N=0}^{\infty} \mel{ i }{ T^N }{ j} = \mel{i}{ \qty( H-E \Id )^{-1} }{j}.
\ee
which does not depends on the time step.
Note that, in a continuous space, this quantity is essentially the \titou{Laplace transform of the time-dependent Green's function}.
We shall use the same denomination in the following.
The denomimation ``energy-dependent'' is chosen here since
this quantity is the discrete version of the Laplace transform of the time-dependent Green's function in a continuous space,
usually known under this name.\cite{note}
The remarkable property is that, thanks to the summation over $N$ up to infinity, the constrained multiple sums appearing in Eq.~\eqref{eq:Gt} can be factorized in terms of a product of unconstrained single sums, as follows
\begin{multline}
\sum_{N=1}^\infty \sum_{p=1}^N \sum_{n_0 \ge 1} \cdots \sum_{n_p \ge 1} \delta_{\sum_{k=0}^p n_k,N+1} F(n_0,\ldots,n_N)
\\
= \sum_{p=1}^{\infty} \sum_{n_0=1}^{\infty} \cdots \sum_{n_p=1}^{\infty} F(n_0,\ldots,n_N).
\end{multline}
It is then trivial to integrate out exactly the $n_k$ variables, leading to
where $F$ is some arbitrary function of the trapping times.
Using the fact that $G^E_{ij}= \tau \sum_{N=0}^{\infty} G^{(N)}_{ij}$ where $G^{(N)}_{ij}$ is given by Eq.~\eqref{eq:Gt} and summing over the $n_k$-variables, we get
\begin{multline}
\label{eq:eqfond}
\mel{ I_0 }{ \qty(H-E \Id)^{-1} }{ I_N }
= \mel{ I_0 }{ P_0 \qty(H-E \Id)^{-1} P_0 }{ I_N}
\\
+ \sum_{p=1}^{\infty} \sum_{I_1 \notin \cD_0, \hdots , I_p \notin \cD_{p-1}}
\qty[ \prod_{k=0}^{p-1} \mel{ I_k }{ P_k \qty( H-E \Id )^{-1} P_k (-H)(1-P_k) }{ I_{k+1} } ]
\\
\times
\mel{ I_p }{ P_p \qty( H-E \Id)^{-1} P_p }{ I_N }
{G}^{E}_{i_0 i_N}
= {G}^{E,\cD}_{i_0 i_N}
+ \sum_{p=1}^{\infty} \sum_{I_1 \notin \cD_0, \hdots , I_p \notin \cD_{p-1}} \\
\qty[ \prod_{k=0}^{p-1} \mel{ I_k }{ {\qty[ P_k \qty( H-E \Id ) P_k ] }^{-1} (-H)(\Id-P_k) }{ I_{k+1} } ]
{G}^{E,\cD}_{I_p i_N}
\end{multline}
where, ${G}^{E,\cD}$ is the energy-dependent domain's Green matrix defined as ${G}^{E,\cD}_{ij} = \tau \sum_{N=0}^{\infty} \mel{ i }{ T^N_i }{ j}$.
As a didactical example, Appendix \ref{app:A} reports the exact derivation of this formula in the case of a two-state system.
%----------------------------%
@ -697,68 +802,122 @@ with $G^E_{0,ij} = \mel{i}{ \qty( H_0-E \Id )^{-1} }{j}$.
Let us choose $H_0$ such that $\mel{ i }{ H_0 }{ j } = \mel{ i }{ P_i H P_i }{ j }$ for all $i$ and $j$.
Then, the Dyson equation \eqref{eq:GE} becomes
\begin{multline}
\mel{ i }{ \qty(H-E \Id)^{-1} }{ j }
= \mel{ i }{ P_i \qty(H-E \Id)^{-1} P_i }{ j }
{G}^{E}_{ij}
= {G}^{E,\cD}_{ij}
\\
+ \sum_k \mel{ i }{ P_i \qty(H-E \Id)^{-1} P_i(H_0-H) }{ k } \mel{ k }{ \qty(H-E \Id)^{-1} }{ j }.
+ \sum_k \mel{ i }{ {\qty[P_i \qty(H-E \Id)P_i ]}^{-1} (H_0-H) }{ k } {G}^{E}_{kj}.
\end{multline}
Using the following identity
\be
\begin{split}
P_i \qty(H-E \Id)^{-1} P_i(H_0-H)
& = P_i \qty(H-E \Id)^{-1} P_i (P_i H P_i - H)
{\qty[ P_i \qty(H-E \Id) P_i ]}^{-1} (H_0-H)
& = { \qty[ P_i \qty(H-E \Id)P_i ]}^{-1} (P_i H P_i - H)
\\
& = P_i \qty(H-E \Id)^{-1} P_i (-H) (1-P_i),
& = {\qty[ P_i \qty(H-E \Id) P_i]}^{-1} (-H) ( \Id -P_i),
\end{split}
\ee
the Dyson equation may be written under the form
\begin{multline}
\mel{ i }{ \qty(H-E \Id)^{-1} }{ j }
= \mel{ i }{ P_i \qty(H-E \Id)^{-1} P_i }{ j}
G^E_{ij} = {G}^{E,\cD}_{ij}
\\
+ \sum_{k \notin \cD_i} \mel{ i }{ P_i \qty(H-E \Id)^{-1} P_i (-H)(1-P_i) }{ k } \mel{k}{\qty(H-E \Id)^{-1}}{j}
+ \sum_{k \notin \cD_i} \mel{ i }{ {\qty[P_i \qty(H-E \Id) P_i]}^{-1} (-H)( \Id -P_i) }{ k } {G}^{E}_{kj}
\end{multline}
which is identical to Eq.~\eqref{eq:eqfond} when $G$ is expanded iteratively.
which is identical to Eq.~\eqref{eq:eqfond} when $G^E_{ij}$ is expanded iteratively.
Let us use as effective transition probability density
\be
P_{I \to J} = \frac{1} {\PsiG(I)} \mel{ I }{ P_I \qty(H^+ - \EL^+ \Id)^{-1} P_I (-H^+) (1-P_I) }{ J } \PsiG(J)
P_{I \to J} = \frac{1} {\PsiG(I)} \mel{ I }{ {\qty[ P_I \qty(H^+ - \EL^+ \Id) P_I]}^{-1} (-H^+) (\Id -P_I) }{ J } \PsiG(J)
\ee
and the weight
\be
W^E_{IJ} =
\frac{ \mel{ I }{ \qty(H-E \Id)^{-1} P_I (-H)(1-P_I) }{ J} }
{\mel{ I }{ \qty(H^+ - \EL^+ \Id)^{-1} P_I (-H^+)(1-P_I) }{ J} }
\frac{ \mel{ I }{ {\qty[ P_I \qty(H-E \Id) P_I]}^{-1} (-H)( \Id -P_I) }{ J} }
{\mel{ I }{ {\qty[ P_I \qty(H^+ - \EL^+ \Id) P_I]}^{-1} (-H^+)( \Id -P_I) }{ J} }
\ee
Using Eqs.~\eqref{eq:eq1C}, \eqref{eq:eq3C} and \eqref{eq:relation}, one can easily verify that $P_{I \to J} \ge 0$ and $\sum_J P_{I \to J}=1$.
Finally, the probabilistic expression reads
\begin{multline}
\label{eq:final_E}
\mel{ I_0 }{ \qty(H-E \Id)^{-1} }{ I_N }
= \mel{ I_0 }{ P_{I_0} \qty(H-E \Id)^{-1} P_{I_0} }{ I_N }
\\
+ \sum_{p=0}^{\infty} \expval{ \qty( \prod_{k=0}^{p-1} W^E_{I_k I_{k+1}} ) \mel{ I_p }{ P_{I_p} \qty(H-E \Id)^{-1} P_{I_p} }{ I_N } }.
G^E_{i_0 i_N}= {G}^{E,\cD}_{i_0 i_N}
+ \sum_{p=1}^{\infty} \PsiG_{i_0} \expval{ \qty( \prod_{k=0}^{p-1} W^E_{I_k I_{k+1}} )
\frac{{G}^{E,\cD}_{I_p i_N}} { \PsiG_{I_p}} }
\end{multline}
% HERE
%----------------------------%
\subsubsection{Energy estimator}
%----------------------------%
To calculate the energy, we introduce the following estimator
\be
\cE(E) = \frac{ \mel{ I_0 }{ \qty(H-E \Id)^{-1} }{ H\PsiT } } {\mel{ I_0 }{ \qty(H-E \Id)^{-1} }{ \PsiT } },
\cE(E) = \frac{ \mel{ I_0 }{ \qty(H-E \Id)^{-1} }{ H\PsiT } } {\mel{ I_0 }{ \qty(H-E \Id)^{-1} }{ \PsiT } }.
\ee
and search for the solution of the non-linear equation $\cE(E)= E$.
which is the counterpart of the quantity ${\cal E}_N =\frac{ \mel{ I_0 }{ T^N }{ H\PsiT } } {\mel{ I_0 }{T^N}{ \PsiT } }$ used
in the time-dependent formalism. In this case, the energy was easily obtained by taking the large $N$-limit of ${\cal E}_N$, see Eq.(\ref{eq:E0}).
Here, the situation is not as simple and we need a way to extract the energy from $\cE(E)$.
Using the spectral decomposition of $H$, we have
\be
\label{eq:calE}
\cE(E) = \frac{ \sum_i \frac{E_i c_i}{E_i-E}}{\sum_i \frac{c_i}{E_i-E}},
\cE(E) = \frac{ \sum_i \frac{E_i c_i}{E_i-E}}{\sum_i \frac{c_i}{E_i-E}},
\ee
where $c_i = \braket{ I_0 }{ \Phi_i } \braket{ \Phi_i}{ \PsiT }$ and $\Phi_i$ are eigenstates of $H$, \ie, $H \ket{\Phi_i} = E_i \ket{\Phi_i}$.
where $c_i = \braket{ I_0 }{ \Phi_i } \braket{ \Phi_i}{ \PsiT }$ and $\Phi_i$ are the eigenstates of $H$, \ie, $H \ket{\Phi_i} = E_i \ket{\Phi_i}$.
The important observation is that for all eigenstates we have $\cE(E_i)= E_i$. Thus, to get the ground-state energy we propose to search for the solution
of the non-linear equation $\cE(E)= E$ in the vicinity of $E_0$.
In practical Monte Carlo calculations the DMC energy will be obtained by computing a finite number of components $H_p$ and $S_p$ defined as follows
\be
\cE^\text{DMC}(E,p_{max})= \frac{ H_0+ \sum_{p=1}^{p_{max}} H^\text{DMC}_p }
{S_1 +\sum_{p=1}^{p_{max}} S^\text{DMC}_p }
\ee
For $ p\ge 1$, Eq~\eqref{eq:final_E} gives
\begin{align}
H^\text{DMC}_p & = \PsiG_{i_0}\expval{ \qty(\prod_{k=0}^{p-1} W^E_{I_k I_{k+1}}) {\cal H}_{I_p} }
\\
S^\text{DMC}_p & = \PsiG_{i_0} \expval{ \qty(\prod_{k=0}^{p-1} W^E_{I_k I_{k+1}}) {\cal S}_{I_p} }.
\end{align}
where
\be
{\cal H}_{I_p}= \frac{1}{\PsiG_{I_p}} \sum_{i_N} {G}^{(E,\cD}_{I_p i_N} (H \Psi_T)_{i_N}
\ee
and
\be
{\cal S}_{I_p}= \frac{1}{\PsiG_{I_p}} \sum_{i_N} {G}^{E, \cD}_{I_p i_N} (\Psi_T)_{i_N}
\ee
For $p=0$, the two components are exactly evaluated as
\begin{align}
H_0 & = \mel{ I_0 }{ {\qty[ P_{I_0} \qty(H-E \Id) P_{I_0} ]}^{-1} }{ H\PsiT }
\\
S_0 & = \mel{ I_0 }{ {\qty[ P_{I_0} \qty(H-E \Id) P_{I_0} ]}^{-1} }{ \PsiT }.
\end{align}
Note that the evaluation of $(H_0,S_0)$ is possible as long as the size of the domain is small enough to allow the calculation of the inverse matrix.
To avoid the calculation by Monte Carlo of a quantity, such as $H_0$ or $S_0$, which can be evaluated analytically is computationally
very appealing (the statistical error is removed). We thus propose to extend
the exact calculation of $H_p$ and $S_p$ to higher values of $p$,
up to the point where the exponential increase
of the number of intermediate states involved in the explicit sums makes the calculation unfeasible.
%\begin{multline}
%H_p= \sum_{I_1 \notin \cD_0, \hdots , I_p \notin \cD_{p-1}}
% \qty[ \prod_{k=0}^{p-1} \mel{ I_k }{ {\qty[ P_k \qty( H-E \Id ) P_k ] }^{-1} (-H)(\Id-P_k) }{ I_{k+1} } ]
%\\
%\times \mel{ I_p }{ {\qty[ P_p ( H-E \Id) P_p ] }^{-1} }{ I_N }
%\end{multline}
%with a similar formula for $S_p$. In practice, $\cE^\text{DMC}(E,p_{ex},p_{max})$
Finally, $\cE^\text{DMC}(E,p_{ex},p_{max})$
is evaluated in practice as follows
\be
\cE^\text{DMC}(E,p_{ex},p_{max})= \frac{ \sum_{p=0}^{p_{ex}-1} H_p +\sum_{p=p_{ex}}^{p_{max}} H^\text{DMC}_p }
{ \sum_{p=0}^{p_{ex}-1} S_p +\sum_{p=p_{ex}}^{p_{max}} S^\text{DMC}_p }
\ee
where $p_{ex}$ is the number of components exactly computed.
Let us emphasize that, since the magnitude of $H_p$ and $S_p$ decreases as a function of $p$, to remove the statistical error
for the first most important contributions can lead
to important gains, as illustrated in the numerical application to follow.
It is easy to check that, in the vicinity of the exact energy $E_0$, $\cE(E)$ is a linear function of $E - E_0$.
Therefore, in practice, we compute the value of $\cE(E)$ for several values of $E$, and fit these data using a linear, quadratic or a more complicated function of $E$ in order to obtain, via extrapolation, an estimate of $E_0$.
In order to have a precise extrapolation of the energy, it is best to compute $\cE(E)$ for values of $E$ as close as possible to the exact energy.
However, as $E \to E_0$, both the numerators and denominators of Eq.~\eqref{eq:calE} diverge.
\titou{This is reflected by the fact that one needs to compute more and more $p$-components with an important increase of statistical fluctuations.}
This is reflected by the fact that one needs to compute more and more $p$-components with an important increase of statistical fluctuations.
Thus, from a practical point of view, a trade-off has to be found between the quality of the extrapolation and the amount of statistical fluctuations.
@ -852,7 +1011,7 @@ For the other states, we choose a single-state domain as
\ee
To define $\cD$, let us introduce the following set of states
\be
\titou{\cS_{ij} = \qty{ \ket{n} : n_D(n)=i \land n_A(n)=j }}.
{\cS_{ij} = \qty{ \ket{n} : n_D(n)=i \land n_A(n)=j }}.
\ee
which means that $\cD$ contains the set of states having up to $n_D^\text{max}$ double occupations and, for each state with a number of double occupations equal to $m$, a number of nearest-neighbor antiparallel pairs between $n_A^\text{min}(m)$ and $n_A^\text{max}(m)$.
Here, $n_A^\text{max}(m)$ will not be varied and taken to be the maximum possible for a given $m$, $n_A^\text{max}(m)= \max_{N}(N-1-2m,0)$.
@ -903,26 +1062,13 @@ For the three last cases, the dots indicate the remaining states obtained by per
Let us now present our DMC calculations for the Hubbard model.
In what follows, we shall restrict ourselves to the case of the Green's function Monte Carlo approach where trapping times are integrated out exactly.
Following Eqs.~\eqref{eq:final_E} and \eqref{eq:calE}, the practical formula used for computing the DMC energy is written as
\be
\cE^\text{DMC}(E,p_{ex},p_{max})= \frac{H_0 +...+ H_{p_{ex}} + \sum_{p=p_{ex}+1}^{p_{max}} H^\text{DMC}_p }
{S_0 +...+ S_{p_{ex}} + \sum_{p=p_{ex}+1}^{p_{max}} S^\text{DMC}_p }
\ee
where $p_{ex}+1$ is the number of pairs, ($H_p$, $S_p$), computed analytically.
For $p_{ex} < p \le p_{max}$ the Monte Carlo estimates are written as
\begin{align}
H^\text{DMC}_p & = \expval{ \qty( \prod_{k=0}^{p-1} W^E_{I_k I_{k+1}} ) \mel{ I_p }{ P_{I_p} \frac{1}{H-E} P_{I_p} }{ H\PsiT } }
\\
S^\text{DMC}_p & = \expval{ \qty(\prod_{k=0}^{p-1} W^E_{I_k I_{k+1}} ) \mel{ I_p }{ P_{I_p} \frac{1}{H-E} P_{I_p} }{ \PsiT } }.
\end{align}
Let us begin with a small chain of 4 sites with $U=12$.
From now on, we shall take $t=1$.
The size of the linear space is ${\binom{4}{2}}^2 = 36$ and the ground-state energy obtained by exact diagonalization is $E_0=-0.768068\ldots$.
The two variational parameters of the trial vector have been optimized and fixed at the values of $\alpha=1.292$, and $\beta=0.552$ with a variational energy of $E_\text{T}=-0.495361\ldots$.
In what follows $\ket{I_0}$ will be systematically chosen as one of the two N\'eel states, \eg, $\ket{I_0} = \ket{\uparrow,\downarrow, \uparrow,\ldots}$.
Figure \ref{fig1} shows the convergence of $H_p$ as a function of $p$ for different values of the reference energy $E$.
Figure \ref{fig3} shows the convergence of $H_p$ as a function of $p$ for different values of the reference energy $E$.
We consider the simplest case where a single-state domain is associated to each state.
Five different values of $E$ have been chosen, namely $E=-1.6$, $-1.2$, $-1.0$, $-0.9$, and $-0.8$.
Only $H_0$ is computed analytically ($p_{ex}=0$).
@ -931,7 +1077,7 @@ When $E$ is far from the exact value of $E_0=-0.768068\ldots$, the convergence i
In contrast, when $E$ approaches the exact energy, a slower convergence is observed, as expected from the divergence of the matrix elements of the Green's matrix at $E=E_0$ where the expansion does not converge at all.
Note the oscillations of the curves as a function of $p$ due to a parity effect specific to this system.
In practice, it is not too much of a problem since a smoothly convergent behavior is nevertheless observed for the even- and odd-parity curves.
The ratio, $\cE^\text{DMC}(E,p_{ex}=1,p_{max})$ as a function of $E$ is presented in Fig.~\ref{fig2}.
The ratio, $\cE^\text{DMC}(E,p_{ex}=1,p_{max})$ as a function of $E$ is presented in Fig.~\ref{fig4}.
Here, $p_{max}$ is taken sufficiently large so that the convergence at large $p$ is reached.
The values of $E$ are $-0.780$, $-0.790$, $-0.785$, $-0.780$, and $-0.775$.
For small $E$, the curve is extrapolated using the so-called two-component expression:
@ -949,24 +1095,24 @@ The estimate of the energy obtained from $\cE(E)=E$ is $-0.76807(5)$ in full agr
%%% FIG 1 %%%
\begin{figure}
\includegraphics[width=\columnwidth]{fig1}
\includegraphics[width=\columnwidth]{fig3}
\caption{One-dimensional Hubbard model with $N=4$ and $U=12$.
$H_p$ as a function of $p$ for $E=-1.6$, $-1.2$, $-1.0$, $-0.9$, and $-0.8$.
$H_0$ is computed analytically and $H_p$ ($p > 0$) is computed stochastically.
Error bars are smaller than the symbol size.}
\label{fig1}
\label{fig3}
\end{figure}
%%% FIG 2 %%%
\begin{figure}
\includegraphics[width=\columnwidth]{fig2}
\includegraphics[width=\columnwidth]{fig4}
\caption{One-dimensional Hubbard model with $N=4$ and $U=12$.
$\cE(E)$ as a function of $E$.
The horizontal and vertical lines are at $\cE(E_0)=E_0$ and $E=E_0$, respectively.
$E_0 = -0.768068\ldots$ is the exact energy.
The dotted line is the two-component extrapolation defined in Eq.~\eqref{eq:2comp}.
Error bars are smaller than the symbol size.}
\label{fig2}
\label{fig4}
\end{figure}
Table \ref{tab1} illustrates the dependence of the Monte Carlo results upon the choice of the domain.
@ -1023,8 +1169,8 @@ $\cD(0,1)\cup\cD(1,0)\cup\cD$(2,0) & 36 & $\infty$&1&$-0.75272390$\\
\end{ruledtabular}
\end{table}
As a general rule, it is always good to avoid the Monte Carlo calculation of a quantity which is computable analytically.
Here, we apply this idea to the case of the energy, Eq.~\eqref{eq:calE}, where the first $p$-components can be evaluated exactly up to some maximal value of $p$, $p_{ex}$.
As explained above, it is very advantageous to calculate exactly as many $(H_p,S_p)$ as possible in order to avoid the sttistical error on the heaviest
components.
Table \ref{tab2} shows the results both for the case of a single-state main domain and for the domain having the largest average trapping time, namely $\cD(0,1) \cup \cD(1,1)$ (see Table \ref{tab1}).
Table \ref{tab2} reports the statistical fluctuations of the energy for the simulation of Table \ref{tab1}.
Results show that it is indeed interesting to compute exactly as many components as possible.
@ -1032,7 +1178,7 @@ For the single-state domain, a factor 2 of reduction of the statistical error is
For the best domain, the impact is much more important with a huge reduction of about three orders of magnitude in the statistical error.
Table \ref{tab3} reports the energies converged as a function of $p$ with their statistical error on the last digit for $E=
-0.8$, $-0.795$, $-0.79$, $-0.785$, and $-0.78$.
The values are displayed in Fig.~\ref{fig2}.
The values are displayed in Fig.~\ref{fig4}.
As seen on the figure the behavior of $\cE$ as a function of $E$ is very close to the linearity.
The extrapolated values obtained from the five values of the energy with the three fitting functions are reported.
Using the linear fitting function leads to an energy of $-0.7680282(5)$ to compare with the exact value of $-0.768068\ldots$.
@ -1088,7 +1234,7 @@ $8$ & $2.2 \times10^{-5}$ &$ 0.05 \times 10^{-8}$\\
\begin{table}
\caption{One-dimensional Hubbard model with $N=4$, $U=12$, $\alpha=1.292$, $\beta=0.552$, and $p_{ex}=4$.
The main domain is $\cD(0,1) \cup \cD(1,0)$.
The simulation is performed with 20 independent blocks and $10^6$ stochastic paths \titou{starting from the N\'eel state}.
The simulation is performed with 20 independent blocks and $10^6$ stochastic paths starting from the N\'eel state.
The fits are performed with the five values of $E$.}
\label{tab3}
\begin{ruledtabular}
@ -1164,15 +1310,15 @@ The efficiency of the method is directly related to the importance of the averag
Being able to define domains with large average trapping times is the key aspect of the method since it may lead to some important reduction of the statistical error, as illustrated in our numerical applications.
Therefore, a trade-off has to be found between maximizing the average trapping time and minimizing the cost of computing the domain Green's functions.
In practice, there is no general rule to construct such domains.
For each system at hand, one needs to determine, on physical grounds, which \titou{spatial} regions are preferentially sampled by the stochastic trajectories and to build domains of minimal size enclosing such regions.
For each system at hand, one needs to determine, on physical grounds, which regions of the configuration space are preferentially sampled by the stochastic trajectories and to build domains of minimal size enclosing such regions.
In the first application presented here on the one-dimensional Hubbard model, we exploit the physics of the large-$U$ regime that is known to approach the Heisenberg limit where double occupations have small weights.
This simple example has been chosen to illustrate the various aspects of the approach.
\titou{Our goal is, of course, to tackle much larger systems, like those treated by state-of-the-art methods, such as selected CI, \cite{Huron_1973,Harrison_1991,Giner_2013,Holmes_2016,Schriber_2016,Tubman_2020}, FCIQMC, \cite{Booth_2009,Cleland_2010}, AFQMC, \cite{Zhang_2003} or DMRG. \cite{White_1999,Chan_2011}
Our goal is, of course, to tackle much larger systems, like those treated by state-of-the-art methods, such as selected CI, \cite{Huron_1973,Harrison_1991,Giner_2013,Holmes_2016,Schriber_2016,Tubman_2020}, FCIQMC, \cite{Booth_2009,Cleland_2010}, AFQMC, \cite{Zhang_2003} or DMRG. \cite{White_1999,Chan_2011}
Here, we have mainly focused on the theoretical aspects of the approach.
In order to consider larger systems, an elaborate implementation of the present method is necessary in order to keep under control the cost of the simulation.
This is outside the scope of the present study and will be presented in a forthcoming work.}
This is outside the scope of the present study and will be presented in a forthcoming work.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\acknowledgments{
@ -1195,38 +1341,37 @@ For the simplest case of a system containing only two states, $\ket{1}$ and $\ke
\be
\cI
= \mel{ I_0 }{ \qty(H-E \Id)^{-1} }{ \Psi }
= \mel{ I_0 }{ P_0 \qty(H-E \Id)^{-1} P_0 }{ \Psi }
= \mel{ I_0 }{ { \qty[ P_0 \qty(H-E \Id) P_0 ]}^{-1} }{ \Psi }
+ \sum_{p=1}^{\infty} \cI_p,
\ee
with
\begin{multline}
\cI_p= \sum_{I_1 \notin \cD_0, \ldots , I_p \notin \cD_{p-1}}
\qty[ \prod_{k=0}^{p-1} \mel{ I_k }{ P_k \qty(H-E \Id)^{-1} P_k (-H)(1-P_k) }{ I_{k+1} } ]
\qty[ \prod_{k=0}^{p-1} \mel{ I_k }{ { \qty[P_k \qty(H-E \Id) P_k ]}^{-1} (-H)(1-P_k) }{ I_{k+1} } ]
\\
\times \mel{ I_p }{ P_p \qty(H-E \Id)^{-1} P_p }{ \Psi }.
\times \mel{ I_p }{ { \qty[ P_p \qty(H-E \Id) P_p ]}^{-1} }{ \Psi }.
\end{multline}
To treat simultaneously the two possibilities for the final state, \ie, $\ket{I_N} = \ket{1}$ or $\ket{2}$, \titou{Eq.~\eqref{}} has been slightly generalized to the case of a general vector for the final state written as
To treat simultaneously the two possibilities for the final state, \ie, $\ket{I_N} = \ket{1}$ or $\ket{2}$, Eq.~\eqref{eq:eqfond} has been slightly generalized to the case of a general vector for the final state written as
\be
\ket{\Psi} = \Psi_1 \ket{1} + \Psi_2 \ket{2}.
\ee
Let us choose a single-state domain for both states, namely $\cD_1 = \qty{ \ket{1} }$ and $\cD_2 = \qty{ \ket{2} }$.
%\titou{Note that the single exit state for each state is the other state.}
Note that, due to the simplicity of the present two-state model, there are only two possible deterministic ``alternating'' paths, namely, $\ket{1} \to \ket{2} \to \ket{1},\ldots$ and $\ket{2} \to \ket{1} \to \ket{2},\ldots$.
For the sake of convenience, we introduce the following quantities
\begin{align}
\label{eq:defA1}
A_1 & = \mel{ 1 }{ P_1 \qty(H-E \Id)^{-1} P_1 (-H)(1-P_1) }{ 2 },
A_1 & = \mel{ 1 }{ {\qty[ P_1 \qty(H-E \Id) P_1 ]}^{-1} (-H)(1-P_1) }{ 2 },
\\
\label{eq:defA2}
A_2 & = \mel{ 2 }{ P_2 \qty(H-E \Id)^{-1} P_2 (-H) (1-P_2) }{ 1 },
A_2 & = \mel{ 2 }{ {\qty[ P_2 \qty(H-E \Id) P_2]}^{-1} (-H) (1-P_2) }{ 1 },
\end{align}
and
\begin{align}
C_1 & = \mel{ 1 }{ P_1 \qty(H-E \Id)^{-1} P_1 }{ \Psi },
C_1 & = \mel{ 1 }{ {\qty[ P_1 \qty(H-E \Id) P_1]}^{-1} }{ \Psi },
\\
C_2 & = \mel{ 2 }{ P_2 \qty(H-E \Id)^{-1} P_2 }{ \Psi }.
C_2 & = \mel{ 2 }{ {\qty[ P_2 \qty(H-E \Id) P_2]}^{-1} }{ \Psi }.
\end{align}
Without loss of generality, let us choose, for example, $\ket{I_0} = \ket{1}$.
Then, it is easy to show that
@ -1280,9 +1425,29 @@ which finally yields
% { (H_{11}-E)(H_{22}-E) - H^2_{12}}
% \\
= \frac{ H_{22}-E}{\Delta} \Psi_1 - \frac{H_{12}}{\Delta} \Psi_2,
\label{final}
\ee
where $\Delta$ is the determinant of $H$.
\titou{It is easy to check that the RHS is equal to the LHS, thus validating the fundamental equation for this particular case. ??}
On the other hand, the quantity $\mel{ 1 }{ \qty(H-E \Id)^{-1} }{ \Psi}$ of the L.H.S of Eq.(\ref{final}) can be directly calculated using the inverse of the $2\times2$ matrix
\be
{ \qty(H-E \Id)^{-1} }{ |\Psi\rangle}=
\frac{1}{H-E \Id} \begin{pmatrix}
\Psi_1 \\
\Psi_2 \\
\end{pmatrix}
= \frac{1}{\Delta}
\begin{pmatrix}
H_{22}-E & - H_{21} \\
- H_{21} & H_{11}-E \\
\end{pmatrix}
\begin{pmatrix}
\Psi_1 \\
\Psi_2 \\
\end{pmatrix}
\ee
As seen, the first component of the vector ${ \qty(H-E \Id)^{-1} }{ |\Psi\rangle}$ is identical to the one given in
Eq.~\eqref{final}, thus confirming independently the validity of this equation.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\bibliography{g}