diff --git a/index.html b/index.html index f74f20d..236ee75 100644 --- a/index.html +++ b/index.html @@ -3,7 +3,7 @@ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
- +[0/1]
Last things to do[0/1]
Last things to doWe propose different exercises to understand quantum Monte Carlo (QMC) @@ -436,8 +436,8 @@ coordinates, etc).
In this section we consider the Hydrogen atom with the following @@ -510,8 +510,8 @@ E & = & \frac{\langle \Psi| \hat{H} | \Psi\rangle}{\langle \Psi |\Psi \rangle} \end{eqnarray*}
Write all the functions of this section in a single file : @@ -534,8 +534,8 @@ to catch the error.
@@ -546,8 +546,8 @@ Find the theoretical value of \(a\) for which \(\Psi\) is an eigenfunction of \(
@@ -567,7 +567,7 @@ and returns the potential.
import numpy as np @@ -579,7 +579,7 @@ and returns the potential.
import numpy as np @@ -593,7 +593,7 @@ and returns the potential.
double precision function potential(r) @@ -606,7 +606,7 @@ and returns the potential.
double precision function potential(r) @@ -627,8 +627,8 @@ and returns the potential.
@@ -641,7 +641,7 @@ input arguments, and returns a scalar.
def psi(a, r): @@ -651,7 +651,7 @@ input arguments, and returns a scalar.
def psi(a, r): @@ -661,7 +661,7 @@ input arguments, and returns a scalar.
double precision function psi(a, r) @@ -674,7 +674,7 @@ input arguments, and returns a scalar.
double precision function psi(a, r) @@ -689,8 +689,8 @@ input arguments, and returns a scalar.
@@ -749,7 +749,7 @@ So the local kinetic energy is
def kinetic(a,r): @@ -759,7 +759,7 @@ So the local kinetic energy is
def kinetic(a,r): @@ -771,7 +771,7 @@ So the local kinetic energy is
double precision function kinetic(a,r) @@ -784,7 +784,7 @@ So the local kinetic energy is
double precision function kinetic(a,r) @@ -805,8 +805,8 @@ So the local kinetic energy is
@@ -827,7 +827,7 @@ local kinetic energy.
def e_loc(a,r): @@ -837,7 +837,7 @@ local kinetic energy.
def e_loc(a,r): @@ -847,7 +847,7 @@ local kinetic energy.
double precision function e_loc(a,r) @@ -860,7 +860,7 @@ local kinetic energy.
double precision function e_loc(a,r) @@ -877,8 +877,8 @@ local kinetic energy.
@@ -889,8 +889,8 @@ choose a grid which does not contain the origin.
@@ -905,7 +905,7 @@ Gnuplot to plot the files.
import numpy as np @@ -926,7 +926,7 @@ plt.savefig("plot_py.png")
import numpy as np @@ -955,7 +955,7 @@ plt.savefig("plot_py.png")
program plot @@ -1005,7 +1005,7 @@ plot './data' index 0 using 1:2 with lines title 'a=0.1', \
program plot @@ -1077,8 +1077,8 @@ plot './data' index 0 using 1:2 with lines title 'a=0.1', \
If the space is discretized in small volume elements \(\mathbf{r}_i\) @@ -1108,8 +1108,8 @@ The energy is biased because:
@@ -1122,7 +1122,7 @@ Compute a numerical estimate of the energy in a grid of
import numpy as np @@ -1142,7 +1142,7 @@ Compute a numerical estimate of the energy in a grid of
import numpy as np @@ -1174,7 +1174,7 @@ Compute a numerical estimate of the energy in a grid of
program energy_hydrogen @@ -1211,7 +1211,7 @@ To compile the Fortran and run it:
program energy_hydrogen @@ -1280,8 +1280,8 @@ a = 2.0000000000000000 E = -8.0869806678448772E-002
The variance of the local energy is a functional of \(\Psi\) @@ -1308,8 +1308,8 @@ energy can be used as a measure of the quality of a wave function.
@@ -1321,8 +1321,8 @@ Prove that :
@@ -1337,7 +1337,7 @@ in a grid of \(50\times50\times50\) points in the range
import numpy as np @@ -1356,7 +1356,7 @@ in a grid of \(50\times50\times50\) points in the range
import numpy as np @@ -1392,7 +1392,7 @@ in a grid of \(50\times50\times50\) points in the range
program variance_hydrogen @@ -1434,7 +1434,7 @@ To compile and run:
program variance_hydrogen @@ -1511,8 +1511,8 @@ a = 2.0000000000000000 E = -8.0869806678448772E-002 s2 = 1.806881
Numerical integration with deterministic methods is very efficient @@ -1528,8 +1528,8 @@ interval.
To compute the statistical error, you need to perform \(M\) @@ -1569,8 +1569,8 @@ And the confidence interval is given by
@@ -1582,7 +1582,7 @@ input array.
from math import sqrt @@ -1594,7 +1594,7 @@ input array.
from math import sqrt @@ -1613,7 +1613,7 @@ input array.
subroutine ave_error(x,n,ave,err) @@ -1628,7 +1628,7 @@ input array.
subroutine ave_error(x,n,ave,err) @@ -1656,8 +1656,8 @@ input array.
We will now do our first Monte Carlo calculation to compute the @@ -1691,8 +1691,8 @@ compute the statistical error.
@@ -1706,7 +1706,7 @@ compute the average energy and the associated error bar.
@@ -1732,7 +1732,7 @@ the Python solution
+
from hydrogen import * @@ -1759,7 +1759,7 @@ the Fortran
+
@@ -1819,7 +1819,7 @@ well as the index of the current step.
@@ -1892,8 +1892,8 @@ E = -0.49588321986667677 +/- 7.1758863546737969E-004
We will now use the square of the wave function to sample random @@ -1981,8 +1981,8 @@ step such that the acceptance rate is close to 0.5 is a good compromise.
@@ -2003,7 +2003,7 @@ Can you observe a reduction in the statistical error?
from hydrogen import * @@ -2033,7 +2033,7 @@ Can you observe a reduction in the statistical error?
from hydrogen import * @@ -2076,7 +2076,7 @@ Can you observe a reduction in the statistical error?
subroutine metropolis_montecarlo(a,nmax,tau,energy,accep) @@ -2129,7 +2129,7 @@ Can you observe a reduction in the statistical error?
subroutine metropolis_montecarlo(a,nmax,tau,energy,accep) @@ -2209,8 +2209,8 @@ A = 0.51713866666666664 +/- 3.7072551835783688E-004
To obtain Gaussian-distributed random numbers, you can apply the @@ -2261,16 +2261,19 @@ following sections. end subroutine random_gauss
+In Python, you can use the random.normal
function of Numpy.
+
-One can use more efficient numerical schemes to move the electrons. -But in that case, the Metropolis accepation step has to be adapted -accordingly: the acceptance +One can use more efficient numerical schemes to move the electrons, +but the Metropolis accepation step has to be adapted accordingly: +the acceptance probability \(A\) is chosen so that it is consistent with the probability of leaving \(\mathbf{r}_n\) and the probability of entering \(\mathbf{r}_{n+1}\): @@ -2294,25 +2297,25 @@ numbers. Hence, the transition probability was
\[ - T(\mathbf{r}_{n} \rightarrow \mathbf{r}_{n+1}) & = & + T(\mathbf{r}_{n} \rightarrow \mathbf{r}_{n+1}) = \text{constant} \]
-So the expression of \(A\) was simplified to the ratios of the squared +so the expression of \(A\) was simplified to the ratios of the squared wave functions.
-Now, if instead of drawing uniform random numbers -choose to draw Gaussian random numbers with mean 0 and variance +Now, if instead of drawing uniform random numbers we +choose to draw Gaussian random numbers with zero mean and variance \(\tau\), the transition probability becomes:
\[ - T(\mathbf{r}_{n} \rightarrow \mathbf{r}_{n+1}) & = & + T(\mathbf{r}_{n} \rightarrow \mathbf{r}_{n+1}) = \frac{1}{(2\pi\,\tau)^{3/2}} \exp \left[ - \frac{\left( \mathbf{r}_{n+1} - \mathbf{r}_{n} \right)^2}{2\tau} \right] \] @@ -2332,8 +2335,8 @@ To do this, we can add the drift vector
\[ - \frac{\nabla [ \Psi^2 ]}{\Psi^2} = 2 \frac{\nabla \Psi}{\Psi} - \]. + \frac{\nabla [ \Psi^2 ]}{\Psi^2} = 2 \frac{\nabla \Psi}{\Psi}. + \]
@@ -2355,7 +2358,7 @@ The transition probability becomes:
\[ - T(\mathbf{r}_{n} \rightarrow \mathbf{r}_{n+1}) & = & + T(\mathbf{r}_{n} \rightarrow \mathbf{r}_{n+1}) = \frac{1}{(2\pi\,\tau)^{3/2}} \exp \left[ - \frac{\left( \mathbf{r}_{n+1} - \mathbf{r}_{n} - \frac{\nabla \Psi(\mathbf{r}_n)}{\Psi(\mathbf{r}_n)} \right)^2}{2\,\tau} \right] @@ -2364,8 +2367,8 @@ The transition probability becomes:
@@ -2373,20 +2376,46 @@ Write a function to compute the drift vector \(\frac{\nabla \Psi(\mathbf{r})}{\P
-Python -
-def drift(a,r): - ar_inv = -a/np.sqrt(np.dot(r,r)) - return r * ar_inv -
-Fortran -
+def drift(a,r): + # TODO ++
def drift(a,r): + ar_inv = -a/np.sqrt(np.dot(r,r)) + return r * ar_inv ++
subroutine drift(a,r,b) + implicit none + double precision, intent(in) :: a, r(3) + double precision, intent(out) :: b(3) + ! TODO +end subroutine drift ++
subroutine drift(a,r,b) implicit none @@ -2399,10 +2428,12 @@ Write a function to compute the drift vector \(\frac{\nabla \Psi(\mathbf{r})}{\P
@@ -2410,18 +2441,47 @@ Modify the previous program to introduce the drifted diffusion scheme. (This is a necessary step for the next section).
+-Python -
+from hydrogen import * from qmc_stats import * -def MonteCarlo(a,tau,nmax): - E = 0. - N = 0. +def MonteCarlo(a,nmax,tau): + # TODO + +# Run simulation +a = 0.9 +nmax = 100000 +tau = 1.3 +X0 = [ MonteCarlo(a,nmax,tau) for i in range(30)] + +# Energy +X = [ x for (x, _) in X0 ] +E, deltaE = ave_error(X) +print(f"E = {E} +/- {deltaE}") + +# Acceptance rate +X = [ x for (_, x) in X0 ] +A, deltaA = ave_error(X) +print(f"A = {A} +/- {deltaA}") ++
from hydrogen import * +from qmc_stats import * + +def MonteCarlo(a,nmax,tau): + energy = 0. accep_rate = 0. sq_tau = np.sqrt(tau) r_old = np.random.normal(loc=0., scale=1.0, size=(3)) @@ -2445,24 +2505,32 @@ Modify the previous program to introduce the drifted diffusion scheme. d_old = d_new d2_old = d2_new psi_old = psi_new - N += 1. - E += e_loc(a,r_old) - return E/N, accep_rate/N + energy += e_loc(a,r_old) + return energy/nmax, accep_rate/nmax +# Run simulation a = 0.9 nmax = 100000 -tau = 1.0 -X = [MonteCarlo(a,tau,nmax) for i in range(30)] -E, deltaE = ave_error([x[0] for x in X]) -A, deltaA = ave_error([x[1] for x in X]) -print(f"E = {E} +/- {deltaE}\nA = {A} +/- {deltaA}") +tau = 1.3 +X0 = [ MonteCarlo(a,nmax,tau) for i in range(30)] + +# Energy +X = [ x for (x, _) in X0 ] +E, deltaE = ave_error(X) +print(f"E = {E} +/- {deltaE}") + +# Acceptance rate +X = [ x for (_, x) in X0 ] +A, deltaA = ave_error(X) +print(f"A = {A} +/- {deltaA}")
-Fortran -
+subroutine variational_montecarlo(a,tau,nmax,energy,accep_rate) implicit none @@ -2471,7 +2539,57 @@ Modify the previous program to introduce the drifted diffusion scheme. double precision, intent(out) :: energy, accep_rate integer*8 :: istep - double precision :: norm, sq_tau, chi(3), d2_old, prod, u + double precision :: sq_tau, chi(3) + double precision :: psi_old, psi_new + double precision :: r_old(3), r_new(3) + double precision :: d_old(3), d_new(3) + double precision, external :: e_loc, psi + + ! TODO + +end subroutine variational_montecarlo + +program qmc + implicit none + double precision, parameter :: a = 0.9 + double precision, parameter :: tau = 1.0 + integer*8 , parameter :: nmax = 100000 + integer , parameter :: nruns = 30 + + integer :: irun + double precision :: X(nruns), accep(nruns) + double precision :: ave, err + + do irun=1,nruns + call variational_montecarlo(a,tau,nmax,X(irun),accep(irun)) + enddo + call ave_error(X,nruns,ave,err) + print *, 'E = ', ave, '+/-', err + call ave_error(accep,nruns,ave,err) + print *, 'A = ', ave, '+/-', err +end program qmc ++
gfortran hydrogen.f90 qmc_stats.f90 vmc_metropolis.f90 -o vmc_metropolis +./vmc_metropolis ++
subroutine variational_montecarlo(a,tau,nmax,energy,accep_rate) + implicit none + double precision, intent(in) :: a, tau + integer*8 , intent(in) :: nmax + double precision, intent(out) :: energy, accep_rate + + integer*8 :: istep + double precision :: sq_tau, chi(3), d2_old, prod, u double precision :: psi_old, psi_new, d2_new, argexpo, q double precision :: r_old(3), r_new(3) double precision :: d_old(3), d_new(3) @@ -2481,7 +2599,6 @@ Modify the previous program to introduce the drifted diffusion scheme. ! Initialization energy = 0.d0 - norm = 0.d0 accep_rate = 0.d0 call random_gauss(r_old,3) call drift(a,r_old,d_old) @@ -2496,8 +2613,8 @@ Modify the previous program to introduce the drifted diffusion scheme. psi_new = psi(a,r_new) ! Metropolis prod = (d_new(1) + d_old(1))*(r_new(1) - r_old(1)) + & - (d_new(2) + d_old(2))*(r_new(2) - r_old(2)) + & - (d_new(3) + d_old(3))*(r_new(3) - r_old(3)) + (d_new(2) + d_old(2))*(r_new(2) - r_old(2)) + & + (d_new(3) + d_old(3))*(r_new(3) - r_old(3)) argexpo = 0.5d0 * (d2_new - d2_old)*tau + prod q = psi_new / psi_old q = dexp(-argexpo) * q*q @@ -2509,11 +2626,10 @@ Modify the previous program to introduce the drifted diffusion scheme. d2_old = d2_new psi_old = psi_new end if - norm = norm + 1.d0 energy = energy + e_loc(a,r_old) end do - energy = energy / norm - accep_rate = accep_rate / norm + energy = energy / dble(nmax) + accep_rate = dble(accep_rate) / dble(nmax) end subroutine variational_montecarlo program qmc @@ -2545,26 +2661,28 @@ Modify the previous program to introduce the drifted diffusion scheme.
-E = -0.49499990423528023 +/- 1.5958250761863871E-004 -A = 0.78861366666666655 +/- 3.5096729498002445E-004 +E = -0.49495906384751226 +/- 1.5257646086088266E-004 +A = 0.78861366666666666 +/- 3.7855335138754813E-004
@@ -2582,10 +2700,10 @@ energy of H for any value of \(a\).
from hydrogen import * from qmc_stats import * -def MonteCarlo(a,tau,nmax,Eref): - E = 0. - N = 0. - accep_rate = 0. +def MonteCarlo(a,nmax,tau,Eref): + energy = 0. + normalization = 0. + accep_rate = 0 sq_tau = np.sqrt(tau) r_old = np.random.normal(loc=0., scale=1.0, size=(3)) d_old = drift(a,r_old) @@ -2596,8 +2714,8 @@ energy of H for any value of \(a\). chi = np.random.normal(loc=0., scale=1.0, size=(3)) el = e_loc(a,r_old) w *= np.exp(-tau*(el - Eref)) - N += w - E += w * el + normalization += w + energy += w * el r_new = r_old + tau * d_old + sq_tau * chi d_new = drift(a,r_new) @@ -2610,27 +2728,29 @@ energy of H for any value of \(a\). q = np.exp(-argexpo) * q*q # PDMC weight if np.random.uniform() < q: - accep_rate += w + accep_rate += 1 r_old = r_new d_old = d_new d2_old = d2_new psi_old = psi_new - return E/N, accep_rate/N + return energy/normalization, accep_rate/nmax a = 0.9 nmax = 10000 tau = .1 -X = [MonteCarlo(a,tau,nmax,-0.5) for i in range(30)] +E_ref = -0.5 +X = [MonteCarlo(a,nmax,tau,E_ref) for i in range(30)] E, deltaE = ave_error([x[0] for x in X]) A, deltaA = ave_error([x[1] for x in X]) print(f"E = {E} +/- {deltaE}\nA = {A} +/- {deltaA}")
-Fortran -
+We will now consider the H2 molecule in a minimal basis composed of the @@ -2744,8 +2866,8 @@ the nuclei.
[0/1]
Last things to do[0/1]
Last things to do[ ]
Prepare 4 questions for the exam: multiple-choice questions
@@ -2760,7 +2882,7 @@ the H\(_2\) molecule at $R$=1.4010 bohr. Answer: 0.17406 a.u.