new version from Michel

This commit is contained in:
Pierre-Francois Loos 2022-10-05 14:39:58 +02:00
parent 0c032fd60f
commit e1233afcdf
2 changed files with 104 additions and 22 deletions

75
g.bib
View File

@ -4,10 +4,74 @@
%% Created for Pierre-Francois Loos at 2022-09-30 16:13:18 +0200
%% Saved with string encoding Unicode (UTF-8)
%% Saved with string encoding Unicode (UTF-8) *
@article{Ceperley_1983,
title = {The simulation of quantum systems with random walks: A new algorithm for charged systems},
journal = {Journal of Computational Physics},
volume = {51},
number = {3},
pages = {404-422},
year = {1983},
issn = {0021-9991},
doi = {https://doi.org/10.1016/0021-9991(83)90161-4},
url = {https://www.sciencedirect.com/science/article/pii/0021999183901614},
author = {D Ceperley},
abstract = {Random walks with branching have been used to calculate exact properties of the ground state of quantum many-body systems. In this paper, a more general Green's function identity is derived which relates the potential energy, a trial wavefunction, and a trial density matrix to the rules of a branched random walk. It is shown that an efficient algorithm requires a good trial wavefunction, a good trial density matrix, and a good sampling of this density matrix. An accurate density matrix is constructed for Coulomb systems using the path integral formula. The random walks from this new algorithm diffuse through phase space an order of magnitude faster than the previous Green's Function Monte Carlo method. In contrast to the simple diffusion Monte Carlo algorithm, it is an exact method. Representative results are presented for several molecules.}
}
@article{Kalos_1962,
title = {Monte Carlo Calculations of the Ground State of Three- and Four-Body Nuclei},
author = {Kalos, M. H.},
journal = {Phys. Rev.},
volume = {128},
issue = {4},
pages = {1791--1795},
numpages = {0},
year = {1962},
month = {Nov},
publisher = {American Physical Society},
doi = {10.1103/PhysRev.128.1791},
url = {https://link.aps.org/doi/10.1103/PhysRev.128.1791}
}
@article{Kalos_1970,
title = {Energy of a Boson Fluid with Lennard-Jones Potentials},
author = {Kalos, M. H.},
journal = {Phys. Rev. A},
volume = {2},
issue = {1},
pages = {250--255},
numpages = {0},
year = {1970},
month = {Jul},
publisher = {American Physical Society},
doi = {10.1103/PhysRevA.2.250},
url = {https://link.aps.org/doi/10.1103/PhysRevA.2.250}
}
@article{Moskowitz_1986,
author = {Moskowitz,Jules W. and Schmidt,K. E. },
title = {The domain Greens function method},
journal = {The Journal of Chemical Physics},
volume = {85},
number = {5},
pages = {2868-2874},
year = {1986},
doi = {10.1063/1.451046},
URL = {
https://doi.org/10.1063/1.451046
},
eprint = {
https://doi.org/10.1063/1.451046
}
}
@misc{note,
note = {As $\tau \rightarrow 0$ and $N \rightarrow \infty$ with $N\tau=t$, the operator $T^N$ converges to $e^{-t(H-E \Id)}$. We then have $G^E_{ij} \rightarrow \int_0^{\infty} dt \mel{i}{e^{-t(H-E \Id)}}{j}$, which is the Laplace transform of the time-dependent Green's function $\mel{i}{e^{-t(H-E \Id)}}{j}$.}}
@ -279,6 +343,15 @@
title = {Matrix Computations},
year = {2012}}
@book{Ceperley_1979,
author = {D.M. Ceperley and M.H Kalos},
editor={K.Binder},
chapter={4},
publisher = {Springer, Berlin},
title = {Monte Carlo Methods in Statistical Physics},
year = {1979}}
@article{Carlson_2007,
author = {J. Carlson},
date-modified = {2022-09-15 10:14:32 +0200},

51
g.tex
View File

@ -173,7 +173,7 @@ There exist several variants of DMC known under various names: pure DMC, \cite{C
Here, we shall place ourselves within the framework of pure DMC whose mathematical simplicity is particularly appealing when developing new ideas.
However, all the ideas presented in this work can be adapted without too much difficulty to the other variants, so the denomination DMC must ultimately be understood here as a generic name for this broad class of methods.
Without entering into the mathematical details (which are presented below), the main ingredient of DMC in order to perform the matrix-vector multiplications probabilistically is the stochastic matrix (or transition probability matrix) that generates stepwise a series of states over which statistical averages are evaluated.
Without entering into the mathematical details (which are presented below), the main ingredient of DMC in order to perform the matrix-vector multiplications probabilistically is to introduce a stochastic matrix (or transition probability matrix) that generates stepwise a series of states over which statistical averages are evaluated.
The critical aspect of any Monte Carlo scheme is the amount of computational effort required to reach a given statistical error.
Two important avenues to decrease the error are the use of variance reduction techniques (for example, by introducing improved estimators \cite{Assaraf_1999A}) or to improve the quality of the sampling (minimization of the correlation time between states).
Another possibility, at the heart of the present work, is to integrate out exactly some parts of the dynamics, thus reducing the number of degrees of freedom and, hence, the amount of statistical fluctuations.
@ -185,12 +185,20 @@ It is shown how to define an effective stochastic dynamics describing walkers mo
The equations of the effective dynamics are derived and a numerical application for a model (one-dimensional) problem is presented.
In particular, it shows that the statistical convergence of the energy can be greatly enhanced when domains associated with large average trapping times are considered.
It should be noted that the use of domains in quantum Monte Carlo is not new.
In their pioneering work, \cite{Kalos_1974} Kalos and collaborators introduced the so-called domain Green's function Monte Carlo approach in continuous space that they applied to a system of bosons with hard-sphere interaction.
The domain used was the Cartesian product of small spheres around each particle, the Hamiltonian being approximated by the kinetic part only within the domain.
Some time later, Kalos proposed to extend these ideas to more general domains such as rectangular and/or cylindrical domains. \cite{Kalos_2000} In both works, the size of the domains is infinitely small in the limit of a vanishing time step.
Here, the domains are of arbitrary size, thus greatly increasing the efficiency of the approach.
Note also that some general equations for arbitrary domains in continuous space have also been proposed by some of us in Ref.~\onlinecite{Assaraf_1999B}.
It should be noted that the use of domains in quantum Monte Carlo is not new. Domains have been introduced within the context of the
Green's function Monte Carlo (GFMC) method pioneered by Kalos\cite{Kalos_1962,Kalos_1970}
and later developped and applied by Kalos and others.\cite{Kalos_1974,Ceperley_1979,Ceperley_1983,Moskowitz_1986}
In GFMC, an approximate Green's function that can be sampled is needed to propagate stochastically the wavefunction.
In the so-called Domain's GFMC version of GFMC introduced in Ref.\onlinecite{Kalos_1970} and \onlinecite{Kalos_1974}
the sampling is realized by using the restriction of the Green's function
to a small domain consisting of the Cartesian product of small spheres around each particle, the potential being considered as constant within the domain.
Fundamentally, the method presented in this work is closely related to the Domain's GFMC, although the way we present the formalism in terms of walkers
trapped within domains
and derive the equations may appear as different.
However, a key difference here is that we show how to use
domains of arbitrary size, a point which can greatly enhance the efficiency of the simulations when domains are suitably chosen, as illustrated in our numerical
application.
Finally, from a general perspective, it is interesting to mention that the method proposed here is an illustration of how valuable and efficient can be the combination of stochastic and deterministic techniques.
In recent years, a number of works have exploited this idea and proposed hybrid stochastic/deterministic schemes.
@ -246,7 +254,7 @@ To proceed further we introduce the time-dependent Green's matrix $G^{(N)}$ defi
where $\ket{i}$ and $\ket{j}$ are basis vectors.
The denomination ``time-dependent Green's matrix'' is used here since $G$ may be viewed as a short-time approximation of the (time-imaginary) evolution operator $e^{-N\tau H}$, which is usually referred to as the imaginary-time dependent Green's function.
Introducing the set of $N-1$ intermediate states, $\{ \ket{i_k} \}_{1 \le k \le N-1}$, in $T^N$, $G^{(N)}$ can be written in the following expanded form
Introducing the set of $N-1$ intermediate states, $\{ \ket{i_k} \}_{1 \le k \le N-1}$, between each $T$ in $T^N$, $G^{(N)}$ can be written in the following expanded form
\be
\label{eq:cn}
G^{(N)}_{i_0 i_N} = \sum_{i_1} \sum_{i_2} \cdots \sum_{i_{N-1}} \prod_{k=0}^{N-1} T_{i_{k} i_{k+1}},
@ -268,7 +276,7 @@ This result is independent of the initial vector $\ket{i_0}$, apart from some ir
We illustrate this fundamental property pictorially in Fig.~\ref{fig:paths}.
When the size of the linear space is small, the explicit calculation of the full sums involving $M^N$ terms (where $M$ is the size of the Hilbert space) can be performed.
In such a case, we are in the realm of what one would call ``deterministic'' power methods, such as the Lancz\`os or Davidson approaches.
If not, probabilistic techniques for generating only the paths contributing significantly to the sums are to be preferred.
If not, probabilistic techniques for generating only the paths contributing significantly to the sums are to be used.
This is the central theme of the present work.
%%% FIG 1 %%%
@ -449,7 +457,7 @@ During the simulation, walkers move from state to state with the possibility of
exiting to a different state. This fact can be exploited in order to integrate out some part of the dynamics, thus leading to a reduction of the statistical
fluctuations. This idea was proposed some time ago and applied to the SU($N$) one-dimensional Hubbard model.\cite{Assaraf_1999A,Assaraf_1999B,Caffarel_2000}
Considering a given state $\ket{i}$, the probability that a walker remains exactly $n$ times in $\ket{i}$ (with $1 \le n < \infty$) and then exits to a different state $j$ (with $j \neq i$) is
Considering a given state $\ket{i}$, the probability that a walker remains exactly $n$ times in $\ket{i}$ (with $ n \ge 1 $) and then exits to a different state $j$ (with $j \neq i$) is
\be
\cP_{i \to j}(n) = \qty(p_{i \to i})^{n-1} p_{i \to j}.
\ee
@ -470,7 +478,7 @@ and this defines a Poisson law with an average number of trapping events
\be
\bar{n}_i= \sum_{n=1}^{\infty} n P_i(n) = \frac{1}{1 -p_{i \to i}}.
\ee
Introducing the continuous time $t_i = n \tau$, the average trapping time is thus given by
Introducing the continuous time $t_i = n_i \tau$, the average trapping time is thus given by
\be
\bar{t_i}= \qty[ H^+_{ii}-(\EL^+)_{i} ]^{-1},
\ee
@ -582,9 +590,9 @@ leading to
= \frac{1}{\PsiG_{I}} \sum_{n=1}^{\infty} \qty[ \mel{ I }{ \qty(T^+_{I})^{n-1} }{ \PsiG }
- \mel{ I }{ \qty(T^+_{I})^{n} }{ \PsiG } ] = 1.
\ee
The average trapping time defined as $t_{I}={\bar n}_{I} \tau$ where $ {\bar n}_{I}=\sum_n n P_{I}(n)$ is calculated to be
The average trapping time defined as ${\bar t}_{I}={\bar n}_{I} \tau$ where $ {\bar n}_{I}=\sum_n n P_{I}(n)$ is calculated to be
\be
t_{I}=\frac{1}{\PsiG_I} \mel{I}{ { \qty[ P_I \qty( H^+ - \EL^+ \Id ) P_I ] }^{-1} }{ \PsiG }.
{\bar t}_{I}=\frac{1}{\PsiG_I} \mel{I}{ { \qty[ P_I \qty( H^+ - \EL^+ \Id ) P_I ] }^{-1} }{ \PsiG }.
\ee
In practice, the various quantities restricted to the domain will be computed by diagonalizing the matrix $(H^+-\EL^+ \Id)$ in $\cD_{I}$.
Note that it is possible only if the dimension of the domains is not too large (say, less than a few thousands).
@ -892,7 +900,7 @@ In the site representation, a general vector of the Hilbert space can be written
\ee
where $n_{i \sigma}=0$ or $1$ is the number of electrons of spin $\sigma$ on site $i$.
For the one-dimensional Hubbard model with open boundary conditions, the components of the ground-state vector have the same sign (say, $c_i \ge 0$).
For the one-dimensional Hubbard model with open boundary conditions, the components of the ground-state vector have the same sign (say, $c_i > 0$).
It is then possible to equate the guiding and trial vectors, \ie, $\ket{c^+} = \ket{c_\text{T}}$.
As trial wave function, we shall employ a generalization of the Gutzwiller wave function \cite{Gutzwiller_1963}
\be
@ -917,7 +925,7 @@ As a general guiding principle, it is advantageous to build domains associated w
Here, as a first illustration of the method, we shall consider the large-$U$ regime of the Hubbard model where the construction of such domains is rather natural.
Indeed, at large $U$ and half-filling, the Hubbard model approaches the Heisenberg limit where only the $2^N$ states with no double occupancy, $n_D(n)=0$, have a significant weight in the wave function.
The contribution of the other states vanishes as $U$ increases with a rate increasing sharply with $n_D(n)$.
In addition, for a given number of double occupations, configurations with large values of $n_A(n)$ are favored due to their high kinetic energy.
In addition, for a given number of double occupations, configurations with large values of $n_A(n)$ are favored due to their high electronic mobility.
Therefore, we build domains associated with small $n_D$ and large $n_A$ in a hierarchical way as described below.
For simplicity and reducing the number of diagonalizations to perform, we shall consider only one non-trivial domain called here the main domain and denoted as $\cD$.
@ -931,10 +939,10 @@ For the other states, we choose a single-state domain as
\ee
To define $\cD$, let us introduce the following set of states
\be
{\cS_{ij} = \qty{ \ket{n} : n_D(n)=i \land n_A(n)=j }},
{\cS_{ij} = \qty{ \ket{n} : n_D(n)=i \land n_A(n)=j }}.
\ee
which means that $\cD$ contains the set of states having up to $n_D^\text{max}$ double occupations and, for each state with a number of double occupations equal to $m$, a number of nearest-neighbor antiparallel pairs between $n_A^\text{min}(m)$ and $n_A^\text{max}(m)$.
Here, $n_A^\text{max}(m)$ is fixed and set at its maximum value for a given $m$, \ie, $n_A^\text{max}(m)= \max_{N}(N-1-2m,0)$.
$\cD$ is defined as containing the set of states having up to $n_D^\text{max}$ double occupations and, for each state with a number of double occupations equal to $m$, a number of nearest-neighbor antiparallel pairs between $n_A^\text{min}(m)$ and $n_A^\text{max}(m)$.
Here, $n_A^\text{max}(m)$ is fixed and set at its maximum value for a given $m$, \ie, $n_A^\text{max}(m)= \max(N-1-2m,0)$.
Using these definitions, the main domain is taken as the union of some elementary domains
\be
\cD = \bigcup_{n_D=0}^{n_D^\text{max}}\cD(n_D,n_A^\text{min}(n_D)),
@ -1101,7 +1109,7 @@ For the best domain, the impact is much more important with a huge reduction of
Table \ref{tab3} reports the energies convergence as a function of $p$ alongside their statistical error on the last digit for $E=
-0.8$, $-0.795$, $-0.79$, $-0.785$, and $-0.78$.
The values are displayed in Fig.~\ref{fig4}.
As seen, the behavior of $\cE$ as a function of $E$ is very linear.
As seen, the behavior of $\cE$ as a function of $E$ is almost perfectly linear.
The extrapolated values obtained from the five values of the energy with three different fitting functions are reported.
Using the linear fitting function leads to an energy of $-0.7680282(5)$ (to be compared with the exact value of $-0.768068\ldots$).
A small bias of about $4 \times 10^{-5}$ is observed.
@ -1224,7 +1232,8 @@ In such a way, a new effective stochastic dynamics connecting only domains (and
A key property of this effective dynamics is that it does not depend on the ``shape'' of the domains used for each state.
Therefore, rather general domains (with or without overlap) can be considered.
To obtain the effective transition probability (which provides the probability of going from one domain to another) and the corresponding renormalized estimators, the domain Green's functions must be computed analytically, that is, in practice, matrices of the size of the number of states for the sampled domains have to be inverted.
To obtain the effective transition probability (which provides the probability of going from one domain to another)
and the corresponding renormalized estimators, the Green's functions restricted to each domain must be computed analytically, that is, in practice, matrices of the size of the number of states for the sampled domains have to be inverted.
This is the main computationally intensive step of the present approach.
The efficiency of the method is directly related to the importance of the average time spent by the stochastic trajectories in each domain.
@ -1272,7 +1281,7 @@ with
\\
\times \mel{ I_p }{ { \qty[ P_p \qty(H-E \Id) P_p ]}^{-1} }{ \Psi }.
\end{multline}
To treat simultaneously the two possibilities for the final state, \ie, $\ket{I_N} = \ket{1}$ or $\ket{2}$, Eq.~\eqref{eq:eqfond} has been slightly generalized to the case of a general vector for the final state written as
To treat simultaneously the two possibilities for the final state, \ie, $\ket{i_N} = \ket{1}$ or $\ket{2}$, Eq.~\eqref{eq:eqfond} has been slightly generalized to the case of a general vector for the final state written as
\be
\ket{\Psi} = \Psi_1 \ket{1} + \Psi_2 \ket{2}.
\ee