diff --git a/index.html b/index.html index 5a789a7..a6bc418 100644 --- a/index.html +++ b/index.html @@ -3,7 +3,7 @@ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
- +[0/3]
Last things to do[0/3]
Last things to doThis web site is the QMC tutorial of the LTTC winter school @@ -510,8 +509,8 @@ coordinates, etc).
In this section we consider the Hydrogen atom with the following @@ -584,8 +583,8 @@ E & = & \frac{\langle \Psi| \hat{H} | \Psi\rangle}{\langle \Psi |\Psi \rangle} \end{eqnarray*}
Write all the functions of this section in a single file : @@ -608,8 +607,8 @@ to catch the error.
@@ -645,14 +644,16 @@ and returns the potential.
double precision function potential(r) implicit none double precision, intent(in) :: r(3) + ! TODO + end function potential
Python @@ -674,13 +675,17 @@ and returns the potential.
double precision function potential(r) implicit none double precision, intent(in) :: r(3) - double precision :: distance + + double precision :: distance + distance = dsqrt( r(1)*r(1) + r(2)*r(2) + r(3)*r(3) ) + if (distance > 0.d0) then - potential = -1.d0 / dsqrt( r(1)*r(1) + r(2)*r(2) + r(3)*r(3) ) + potential = -1.d0 / distance else stop 'potential at r=0.d0 diverges' end if + end function potential
@@ -716,14 +721,16 @@ input arguments, and returns a scalar.
double precision function psi(a, r) implicit none double precision, intent(in) :: a, r(3) + ! TODO + end function psi
Python @@ -741,6 +748,7 @@ input arguments, and returns a scalar.
double precision function psi(a, r) implicit none double precision, intent(in) :: a, r(3) + psi = dexp(-a * dsqrt( r(1)*r(1) + r(2)*r(2) + r(3)*r(3) )) end function psi@@ -749,8 +757,8 @@ input arguments, and returns a scalar.
@@ -823,14 +831,16 @@ So the local kinetic energy is
double precision function kinetic(a,r) implicit none double precision, intent(in) :: a, r(3) + ! TODO + end function kinetic
Python @@ -839,7 +849,8 @@ So the local kinetic energy is
def kinetic(a,r): distance = np.sqrt(np.dot(r,r)) assert (distance > 0.) - return -0.5 * (a**2 - (2.*a)/distance) + + return a * (1./distance - 0.5 * a)
double precision function kinetic(a,r) implicit none double precision, intent(in) :: a, r(3) - double precision :: distance + + double precision :: distance + distance = dsqrt( r(1)*r(1) + r(2)*r(2) + r(3)*r(3) ) + if (distance > 0.d0) then - kinetic = -0.5d0 * (a*a - (2.d0*a) / distance) + + kinetic = a * (1.d0 / distance - 0.5d0 * a) + else stop 'kinetic energy diverges at r=0' end if + end function kinetic
@@ -900,14 +917,16 @@ local kinetic energy.
double precision function e_loc(a,r) implicit none double precision, intent(in) :: a, r(3) + ! TODO + end function e_loc
Python @@ -925,8 +944,11 @@ local kinetic energy.
double precision function e_loc(a,r) implicit none double precision, intent(in) :: a, r(3) + double precision, external :: kinetic, potential + e_loc = kinetic(a,r) + potential(r) + end function e_loc
@@ -945,8 +967,8 @@ Find the theoretical value of \(a\) for which \(\Psi\) is an eigenfunction of \(
@@ -978,8 +1000,8 @@ choose a grid which does not contain the origin.
@@ -1062,8 +1084,8 @@ plot './data' index 0 using 1:2 with lines title 'a=0.1', \
Python @@ -1138,8 +1160,8 @@ plt.savefig("plot_py.png")
If the space is discretized in small volume elements \(\mathbf{r}_i\) @@ -1169,8 +1191,8 @@ The energy is biased because:
@@ -1218,7 +1240,9 @@ Compute a numerical estimate of the energy in a grid of end do do j=1,size(a) + ! TODO + print *, 'a = ', a(j), ' E = ', energy end do @@ -1231,15 +1255,14 @@ To compile the Fortran and run it:
-gfortran hydrogen.f90 energy_hydrogen.f90 -o energy_hydrogen +gfortran hydrogen.f90 energy_hydrogen.f90 -o energy_hydrogen ./energy_hydrogen
Python @@ -1254,18 +1277,22 @@ gfortran hydrogen.f90 energy_hydrogen.f90 -o energy_hydrogen r = np.array([0.,0.,0.]) for a in [0.1, 0.2, 0.5, 0.9, 1., 1.5, 2.]: - E = 0. + E = 0. norm = 0. + for x in interval: r[0] = x for y in interval: r[1] = y for z in interval: r[2] = z + w = psi(a,r) w = w * w * delta + E += w * e_loc(a,r) norm += w + E = E / norm print(f"a = {a} \t E = {E}") @@ -1291,7 +1318,7 @@ a = 2.0 E = -0.08086980667844901 implicit none double precision, external :: e_loc, psi double precision :: x(50), w, delta, energy, dx, r(3), a(6), norm - integer :: i, k, l, j + integer :: i, k, l, j a = (/ 0.1d0, 0.2d0, 0.5d0, 1.d0, 1.5d0, 2.d0 /) @@ -1306,20 +1333,28 @@ a = 2.0 E = -0.08086980667844901 do j=1,size(a) energy = 0.d0 - norm = 0.d0 + norm = 0.d0 + do i=1,size(x) r(1) = x(i) + do k=1,size(x) r(2) = x(k) + do l=1,size(x) r(3) = x(l) + w = psi(a(j),r) w = w * w * delta + energy = energy + w * e_loc(a(j), r) norm = norm + w end do + end do + end do + energy = energy / norm print *, 'a = ', a(j), ' E = ', energy end do @@ -1342,8 +1377,8 @@ a = 2.0000000000000000 E = -8.0869806678448772E-002
The variance of the local energy is a functional of \(\Psi\) @@ -1370,8 +1405,8 @@ energy can be used as a measure of the quality of a wave function.
@@ -1382,8 +1417,8 @@ Prove that :
\(\bar{E} = \langle E \rangle\) is a constant, so \(\langle \bar{E} @@ -1402,8 +1437,8 @@ Prove that :
@@ -1421,150 +1456,192 @@ a grid of \(50\times50\times50\) points in the range \((-5,-5,-5) \le
import numpy as np from hydrogen import e_loc, psi -interval = np.linspace(-5,5,num=50) delta = -(interval[1]-interval[0])**3 +interval = np.linspace(-5,5,num=50) + +delta = (interval[1]-interval[0])**3 r = np.array([0.,0.,0.]) for a in [0.1, 0.2, 0.5, 0.9, 1., 1.5, 2.]: + # TODO + print(f"a = {a} \t E = {E:10.8f} \t \sigma^2 = {s2:10.8f}")
- Fortran - #+beginsrc f90 :tangle none -program variancehydrogen implicit none double precision, external :: - eloc, psi double precision :: x(50), w, delta, energy, dx, r(3), - a(6), norm, s2 double precision :: e, energy2 integer :: i, k, l, j +Fortran
+program variance_hydrogen + implicit none -+-a = (/ 0.1d0, 0.2d0, 0.5d0, 1.d0, 1.5d0, 2.d0 /) -
+ double precision :: x(50), w, delta, energy, energy2 + double precision :: dx, r(3), a(6), norm, e_tmp, s2 + integer :: i, k, l, j --dx = 10.d0/(size(x)-1) do i=1,size(x) x(i) = -5.d0 + (i-1)*dx end do -
+ double precision, external :: e_loc, psi --delta = dx**3 -
+ a = (/ 0.1d0, 0.2d0, 0.5d0, 1.d0, 1.5d0, 2.d0 /) --r(:) = 0.d0 -
+ dx = 10.d0/(size(x)-1) + do i=1,size(x) + x(i) = -5.d0 + (i-1)*dx + end do --do j=1,size(a) ! TODO print *, 'a = ', a(j), ' E = ', energy, ' s2 = -', s2 end do -
+ do j=1,size(a) --end program variancehydrogen #+endsrc -
+ ! TODO + + print *, 'a = ', a(j), ' E = ', energy + end do + +end program variance_hydrogen +
To compile and run:
-- #+beginsrc sh :results output :exports both -gfortran hydrogen.f90 variancehydrogen.f90 -o variancehydrogen -./variancehydrogen #+endsrc -
+gfortran hydrogen.f90 variance_hydrogen.f90 -o variance_hydrogen +./variance_hydrogen ++
- Python - #+beginsrc python :results none :exports both -import numpy as np from hydrogen import eloc, psi +Python
+import numpy as np +from hydrogen import e_loc, psi -+-interval = np.linspace(-5,5,num=50) delta = -(interval[1]-interval[0])**3 -
+interval = np.linspace(-5,5,num=50) --r = np.array([0.,0.,0.]) -
+delta = (interval[1]-interval[0])**3 --for a in [0.1, 0.2, 0.5, 0.9, 1., 1.5, 2.]: E = 0. E2 = 0. norm = 0. - for x in interval: r[0] = x for y in interval: r[1] = y for z in - interval: r[2] = z w = psi(a, r) w = w * w * delta El = eloc(a, - r) E += w * El E2 += w * El*El norm += w E = E / norm E2 = E2 / - norm s2 = E2 - E*E print(f"a = {a} \t E = {E:10.8f} \t σ2 = - {s2:10.8f}") #+endsrc -
+r = np.array([0.,0.,0.]) + +for a in [0.1, 0.2, 0.5, 0.9, 1., 1.5, 2.]: + E = 0. + E2 = 0. + norm = 0. + + for x in interval: + r[0] = x + + for y in interval: + r[1] = y + + for z in interval: + r[2] = z + + w = psi(a,r) + w = w * w * delta + + e_tmp = e_loc(a,r) + E += w * e_tmp + E2 += w * e_tmp * e_tmp + norm += w + + E = E / norm + E2 = E2 / norm + + s2 = E2 - E**2 + print(f"a = {a} \t E = {E:10.8f} \t \sigma^2 = {s2:10.8f}") + +
-a = 0.1 E = -0.24518439 \sigma^2 = 0.02696522 -a = 0.2 E = -0.26966058 \sigma^2 = 0.03719707 -a = 0.5 E = -0.38563576 \sigma^2 = 0.05318597 -a = 0.9 E = -0.49435710 \sigma^2 = 0.00577812 -a = 1.0 E = -0.50000000 \sigma^2 = 0.00000000 -a = 1.5 E = -0.39242967 \sigma^2 = 0.31449671 -a = 2.0 E = -0.08086981 \sigma^2 = 1.80688143 +a = 0.1 E = -0.24518439 \sigma^2 = 0.02696522 +a = 0.2 E = -0.26966058 \sigma^2 = 0.03719707 +a = 0.5 E = -0.38563576 \sigma^2 = 0.05318597 +a = 0.9 E = -0.49435710 \sigma^2 = 0.00577812 +a = 1.0 E = -0.50000000 \sigma^2 = 0.00000000 +a = 1.5 E = -0.39242967 \sigma^2 = 0.31449671 +a = 2.0 E = -0.08086981 \sigma^2 = 1.80688143
- Fortran - #+beginsrc f90 -program variancehydrogen implicit none double precision, external :: - eloc, psi double precision :: x(50), w, delta, energy, dx, r(3), - a(6), norm, s2 double precision :: e, energy2 integer :: i, k, l, j +Fortran
--a = (/ 0.1d0, 0.2d0, 0.5d0, 1.d0, 1.5d0, 2.d0 /) -
+program variance_hydrogen + implicit none -+-dx = 10.d0/(size(x)-1) do i=1,size(x) x(i) = -5.d0 + (i-1)*dx end do -
+ double precision :: x(50), w, delta, energy, energy2 + double precision :: dx, r(3), a(6), norm, e_tmp, s2 + integer :: i, k, l, j --delta = dx**3 -
+ double precision, external :: e_loc, psi --r(:) = 0.d0 -
+ a = (/ 0.1d0, 0.2d0, 0.5d0, 1.d0, 1.5d0, 2.d0 /) --do j=1,size(a) energy = 0.d0 energy2 = 0.d0 norm = 0.d0 do - i=1,size(x) r(1) = x(i) do k=1,size(x) r(2) = x(k) do l=1,size(x) - r(3) = x(l) w = psi(a(j),r) w = w * w * delta e = eloc(a(j), r) - energy = energy + w * e energy2 = energy2 + w * e * e norm = - norm + w end do end do end do energy = energy / norm energy2 = - energy2 / norm s2 = energy2 - energy*energy print *, 'a = ', - a(j), ' E = ', energy, ' s2 = ', s2 end do -
+ dx = 10.d0/(size(x)-1) + do i=1,size(x) + x(i) = -5.d0 + (i-1)*dx + end do --end program variancehydrogen #+endsrc -
+ delta = dx**3 -- #+beginsrc sh :results output :exports results -gfortran hydrogen.f90 variancehydrogen.f90 -o variancehydrogen -./variancehydrogen #+endsrc -
+ r(:) = 0.d0 + + do j=1,size(a) + energy = 0.d0 + energy2 = 0.d0 + norm = 0.d0 + + do i=1,size(x) + r(1) = x(i) + + do k=1,size(x) + r(2) = x(k) + + do l=1,size(x) + r(3) = x(l) + + w = psi(a(j),r) + w = w * w * delta + + e_tmp = e_loc(a(j), r) + + energy = energy + w * e_tmp + energy2 = energy2 + w * e_tmp * e_tmp + norm = norm + w + end do + + end do + + end do + + energy = energy / norm + energy2 = energy2 / norm + + s2 = energy2 - energy*energy + + print *, 'a = ', a(j), ' E = ', energy, ' s2 = ', s2 + end do + +end program variance_hydrogen +
-a = 0.10000000000000001 E = -0.24518438948809140 s2 = 2.6965218719722767E-002 -a = 0.20000000000000001 E = -0.26966057967803236 s2 = 3.7197072370201284E-002 -a = 0.50000000000000000 E = -0.38563576125173815 s2 = 5.3185967578480653E-002 -a = 1.0000000000000000 E = -0.50000000000000000 s2 = 0.0000000000000000 -a = 1.5000000000000000 E = -0.39242967082602065 s2 = 0.31449670909172917 -a = 2.0000000000000000 E = -8.0869806678448772E-002 s2 = 1.8068814270846534 +a = 0.10000000000000001 E = -0.24518438948809140 s2 = 2.6965218719722767E-002 +a = 0.20000000000000001 E = -0.26966057967803236 s2 = 3.7197072370201284E-002 +a = 0.50000000000000000 E = -0.38563576125173815 s2 = 5.3185967578480653E-002 +a = 1.0000000000000000 E = -0.50000000000000000 s2 = 0.0000000000000000 +a = 1.5000000000000000 E = -0.39242967082602065 s2 = 0.31449670909172917 +a = 2.0000000000000000 E = -8.0869806678448772E-002 s2 = 1.8068814270846534
Numerical integration with deterministic methods is very efficient @@ -1590,8 +1667,8 @@ interval.
To compute the statistical error, you need to perform \(M\) @@ -1631,8 +1708,8 @@ And the confidence interval is given by
@@ -1662,14 +1739,16 @@ input array. integer, intent(in) :: n double precision, intent(in) :: x(n) double precision, intent(out) :: ave, err + ! TODO + end subroutine ave_error
Python @@ -1679,13 +1758,17 @@ input array. def ave_error(arr): M = len(arr) assert(M>0) + if M == 1: - return (arr[0], 0.) + average = arr[0] + error = 0. + else: average = sum(arr)/M variance = 1./(M-1) * sum( [ (x - average)**2 for x in arr ] ) error = sqrt(variance/M) - return (average, error) + + return (average, error)
subroutine ave_error(x,n,ave,err) implicit none + integer, intent(in) :: n double precision, intent(in) :: x(n) double precision, intent(out) :: ave, err - double precision :: variance + + double precision :: variance + if (n < 1) then stop 'n<1 in ave_error' + else if (n == 1) then ave = x(1) err = 0.d0 + else - ave = sum(x(:)) / dble(n) - variance = sum( (x(:) - ave)**2 ) / dble(n-1) - err = dsqrt(variance/dble(n)) + ave = sum(x(:)) / dble(n) + + variance = sum((x(:) - ave)**2) / dble(n-1) + err = dsqrt(variance/dble(n)) + endif end subroutine ave_error@@ -1717,8 +1807,8 @@ input array.
We will now do our first Monte Carlo calculation to compute the @@ -1752,8 +1842,8 @@ compute the statistical error.
@@ -1783,9 +1873,11 @@ the def MonteCarlo(a, nmax): # TODO -a = 0.9 +a = 0.9 nmax = 100000 + #TODO + print(f"E = {E} +/- {deltaE}")
Python @@ -1863,18 +1956,24 @@ well as the index of the current step. def MonteCarlo(a, nmax): energy = 0. normalization = 0. + for istep in range(nmax): r = np.random.uniform(-5., 5., (3)) + w = psi(a,r) w = w*w - normalization += w - energy += w * e_loc(a,r) - return energy/normalization -a = 0.9 + energy += w * e_loc(a,r) + normalization += w + + return energy / normalization + +a = 0.9 nmax = 100000 + X = [MonteCarlo(a,nmax) for i in range(30)] E, deltaE = ave_error(X) + print(f"E = {E} +/- {deltaE}")
-E = -0.49588321986667677 +/- 7.1758863546737969E-004 +E = -0.49518773675598715 +/- 5.2391494923686175E-004
We will now use the square of the wave function to sample random @@ -2040,8 +2147,8 @@ step such that the acceptance rate is close to 0.5 is a good compromise.
@@ -2068,13 +2175,17 @@ Can you observe a reduction in the statistical error? from qmc_stats import * def MonteCarlo(a,nmax,dt): + # TODO + return energy/nmax, N_accep/nmax + # Run simulation -a = 0.9 +a = 0.9 nmax = 100000 -dt = 1.3 +dt = #TODO + X0 = [ MonteCarlo(a,nmax,dt) for i in range(30)] # Energy @@ -2101,24 +2212,25 @@ Can you observe a reduction in the statistical error? double precision, intent(out) :: energy double precision, intent(out) :: accep - integer*8 :: istep - + integer*8 :: istep + integer*8 :: n_accep double precision :: r_old(3), r_new(3), psi_old, psi_new double precision :: v, ratio - integer*8 :: n_accep + double precision, external :: e_loc, psi, gaussian ! TODO + end subroutine metropolis_montecarlo program qmc implicit none - double precision, parameter :: a = 0.9d0 - double precision, parameter :: dt = 1.3d0 - integer*8 , parameter :: nmax = 100000 + double precision, parameter :: a = 0.9d0 + double precision, parameter :: dt = ! TODO + integer*8 , parameter :: nmax = 100000 integer , parameter :: nruns = 30 - integer :: irun + integer :: irun double precision :: X(nruns), Y(nruns) double precision :: ave, err @@ -2131,6 +2243,7 @@ Can you observe a reduction in the statistical error? call ave_error(Y,nruns,ave,err) print *, 'A = ', ave, '+/-', err + end program qmc
Python @@ -2153,26 +2266,33 @@ Can you observe a reduction in the statistical error? from qmc_stats import * def MonteCarlo(a,nmax,dt): - energy = 0. + energy = 0. N_accep = 0 + r_old = np.random.uniform(-dt, dt, (3)) psi_old = psi(a,r_old) + for istep in range(nmax): + energy += e_loc(a,r_old) + r_new = r_old + np.random.uniform(-dt,dt,(3)) psi_new = psi(a,r_new) + ratio = (psi_new / psi_old)**2 - v = np.random.uniform() - if v <= ratio: + + if np.random.uniform() <= ratio: N_accep += 1 - r_old = r_new + + r_old = r_new psi_old = psi_new - energy += e_loc(a,r_old) + return energy/nmax, N_accep/nmax # Run simulation -a = 0.9 +a = 0.9 nmax = 100000 -dt = 1.3 +dt = 1.3 + X0 = [ MonteCarlo(a,nmax,dt) for i in range(30)] # Energy @@ -2205,33 +2325,45 @@ A = 0.5172960000000001 +/- 0.0003443446549306529 double precision, intent(out) :: energy double precision, intent(out) :: accep - integer*8 :: istep - double precision :: r_old(3), r_new(3), psi_old, psi_new double precision :: v, ratio integer*8 :: n_accep + integer*8 :: istep + double precision, external :: e_loc, psi, gaussian - energy = 0.d0 + energy = 0.d0 n_accep = 0_8 + call random_number(r_old) r_old(:) = dt * (2.d0*r_old(:) - 1.d0) psi_old = psi(a,r_old) + do istep = 1,nmax + energy = energy + e_loc(a,r_old) + call random_number(r_new) - r_new(:) = r_old(:) + dt * (2.d0*r_new(:) - 1.d0) + r_new(:) = r_old(:) + dt*(2.d0*r_new(:) - 1.d0) + psi_new = psi(a,r_new) + ratio = (psi_new / psi_old)**2 call random_number(v) + if (v <= ratio) then + + n_accep = n_accep + 1_8 + r_old(:) = r_new(:) psi_old = psi_new - n_accep = n_accep + 1_8 + endif - energy = energy + e_loc(a,r_old) + end do + energy = energy / dble(nmax) accep = dble(n_accep) / dble(nmax) + end subroutine metropolis_montecarlo program qmc @@ -2241,7 +2373,7 @@ A = 0.5172960000000001 +/- 0.0003443446549306529 integer*8 , parameter :: nmax = 100000 integer , parameter :: nruns = 30 - integer :: irun + integer :: irun double precision :: X(nruns), Y(nruns) double precision :: ave, err @@ -2254,13 +2386,14 @@ A = 0.5172960000000001 +/- 0.0003443446549306529 call ave_error(Y,nruns,ave,err) print *, 'A = ', ave, '+/-', err + end program qmc
-E = -0.49515370205041676 +/- 1.7660819245720729E-004 -A = 0.51713866666666664 +/- 3.7072551835783688E-004 +E = -0.49503130891988767 +/- 1.7160104275040037E-004 +A = 0.51695266666666673 +/- 4.0445505648997396E-004
To obtain Gaussian-distributed random numbers, you can apply the @@ -2300,6 +2433,7 @@ following sections. integer :: i call random_number(u) + if (iand(n,1) == 0) then ! n is even do i=1,n,2 @@ -2307,6 +2441,7 @@ following sections. z(i+1) = z(i) * dsin( two_pi*u(i+1) ) z(i) = z(i) * dcos( two_pi*u(i+1) ) end do + else ! n is odd do i=1,n-1,2 @@ -2314,9 +2449,12 @@ following sections. z(i+1) = z(i) * dsin( two_pi*u(i+1) ) z(i) = z(i) * dcos( two_pi*u(i+1) ) end do + z(n) = dsqrt(-2.d0*dlog(u(n))) z(n) = z(n) * dcos( two_pi*u(n+1) ) + end if + end subroutine random_gauss
One can use more efficient numerical schemes to move the electrons, @@ -2426,8 +2564,8 @@ The transition probability becomes:
@@ -2453,14 +2591,16 @@ Write a function to compute the drift vector \(\frac{\nabla \Psi(\mathbf{r})}{\P implicit none double precision, intent(in) :: a, r(3) double precision, intent(out) :: b(3) + ! TODO + end subroutine drift
Python @@ -2480,9 +2620,12 @@ Write a function to compute the drift vector \(\frac{\nabla \Psi(\mathbf{r})}{\P implicit none double precision, intent(in) :: a, r(3) double precision, intent(out) :: b(3) + double precision :: ar_inv + ar_inv = -a / dsqrt(r(1)*r(1) + r(2)*r(2) + r(3)*r(3)) - b(:) = r(:) * ar_inv + b(:) = r(:) * ar_inv + end subroutine drift
@@ -2512,9 +2655,10 @@ Modify the previous program to introduce the drifted diffusion scheme. # TODO # Run simulation -a = 0.9 +a = 0.9 nmax = 100000 -dt = 1.3 +dt = # TODO + X0 = [ MonteCarlo(a,nmax,dt) for i in range(30)] # Energy @@ -2533,17 +2677,19 @@ Modify the previous program to introduce the drifted diffusion scheme. Fortran
subroutine variational_montecarlo(a,dt,nmax,energy,accep_rate) +subroutine variational_montecarlo(a,dt,nmax,energy,accep) implicit none double precision, intent(in) :: a, dt integer*8 , intent(in) :: nmax - double precision, intent(out) :: energy, accep_rate + double precision, intent(out) :: energy, accep - integer*8 :: istep + integer*8 :: istep + integer*8 :: n_accep double precision :: sq_dt, chi(3) double precision :: psi_old, psi_new double precision :: r_old(3), r_new(3) double precision :: d_old(3), d_new(3) + double precision, external :: e_loc, psi ! TODO @@ -2552,22 +2698,25 @@ Modify the previous program to introduce the drifted diffusion scheme. program qmc implicit none - double precision, parameter :: a = 0.9 - double precision, parameter :: dt = 1.0 - integer*8 , parameter :: nmax = 100000 + double precision, parameter :: a = 0.9 + double precision, parameter :: dt = ! TODO + integer*8 , parameter :: nmax = 100000 integer , parameter :: nruns = 30 - integer :: irun + integer :: irun double precision :: X(nruns), accep(nruns) double precision :: ave, err do irun=1,nruns call variational_montecarlo(a,dt,nmax,X(irun),accep(irun)) enddo + call ave_error(X,nruns,ave,err) print *, 'E = ', ave, '+/-', err + call ave_error(accep,nruns,ave,err) print *, 'A = ', ave, '+/-', err + end program qmc
Python @@ -2590,38 +2739,49 @@ Modify the previous program to introduce the drifted diffusion scheme. from qmc_stats import * def MonteCarlo(a,nmax,dt): - energy = 0. - accep_rate = 0. sq_dt = np.sqrt(dt) - r_old = np.random.normal(loc=0., scale=1.0, size=(3)) - d_old = drift(a,r_old) - d2_old = np.dot(d_old,d_old) + + energy = 0. + N_accep = 0 + + r_old = np.random.normal(loc=0., scale=1.0, size=(3)) + d_old = drift(a,r_old) + d2_old = np.dot(d_old,d_old) psi_old = psi(a,r_old) + for istep in range(nmax): chi = np.random.normal(loc=0., scale=1.0, size=(3)) - r_new = r_old + dt * d_old + sq_dt * chi - d_new = drift(a,r_new) - d2_new = np.dot(d_new,d_new) + + energy += e_loc(a,r_old) + + r_new = r_old + dt * d_old + sq_dt * chi + d_new = drift(a,r_new) + d2_new = np.dot(d_new,d_new) psi_new = psi(a,r_new) + # Metropolis - prod = np.dot((d_new + d_old), (r_new - r_old)) + prod = np.dot((d_new + d_old), (r_new - r_old)) argexpo = 0.5 * (d2_new - d2_old)*dt + prod + q = psi_new / psi_old q = np.exp(-argexpo) * q*q - if np.random.uniform() < q: - accep_rate += 1. - r_old = r_new - d_old = d_new - d2_old = d2_new + + if np.random.uniform() <= q: + N_accep += 1 + + r_old = r_new + d_old = d_new + d2_old = d2_new psi_old = psi_new - energy += e_loc(a,r_old) + return energy/nmax, accep_rate/nmax # Run simulation -a = 0.9 +a = 0.9 nmax = 100000 -dt = 1.3 +dt = 1.3 + X0 = [ MonteCarlo(a,nmax,dt) for i in range(30)] # Energy @@ -2646,81 +2806,107 @@ A = 0.7200673333333333 +/- 0.00045942791345632793 Fortran
subroutine variational_montecarlo(a,dt,nmax,energy,accep_rate) +subroutine variational_montecarlo(a,dt,nmax,energy,accep) implicit none double precision, intent(in) :: a, dt integer*8 , intent(in) :: nmax - double precision, intent(out) :: energy, accep_rate + double precision, intent(out) :: energy, accep - integer*8 :: istep + integer*8 :: istep + integer*8 :: n_accep double precision :: sq_dt, chi(3), d2_old, prod, u double precision :: psi_old, psi_new, d2_new, argexpo, q double precision :: r_old(3), r_new(3) double precision :: d_old(3), d_new(3) + double precision, external :: e_loc, psi sq_dt = dsqrt(dt) ! Initialization - energy = 0.d0 - accep_rate = 0.d0 + energy = 0.d0 + n_accep = 0_8 + call random_gauss(r_old,3) + call drift(a,r_old,d_old) - d2_old = d_old(1)*d_old(1) + d_old(2)*d_old(2) + d_old(3)*d_old(3) + d2_old = d_old(1)*d_old(1) + & + d_old(2)*d_old(2) + & + d_old(3)*d_old(3) + psi_old = psi(a,r_old) do istep = 1,nmax + energy = energy + e_loc(a,r_old) + call random_gauss(chi,3) - r_new(:) = r_old(:) + dt * d_old(:) + chi(:)*sq_dt + r_new(:) = r_old(:) + dt*d_old(:) + chi(:)*sq_dt + call drift(a,r_new,d_new) - d2_new = d_new(1)*d_new(1) + d_new(2)*d_new(2) + d_new(3)*d_new(3) + d2_new = d_new(1)*d_new(1) + & + d_new(2)*d_new(2) + & + d_new(3)*d_new(3) + psi_new = psi(a,r_new) + ! Metropolis prod = (d_new(1) + d_old(1))*(r_new(1) - r_old(1)) + & (d_new(2) + d_old(2))*(r_new(2) - r_old(2)) + & (d_new(3) + d_old(3))*(r_new(3) - r_old(3)) + argexpo = 0.5d0 * (d2_new - d2_old)*dt + prod + q = psi_new / psi_old q = dexp(-argexpo) * q*q + call random_number(u) - if (u<q) then - accep_rate = accep_rate + 1.d0 + + if (u <= q) then + + n_accep = n_accep + 1_8 + r_old(:) = r_new(:) d_old(:) = d_new(:) - d2_old = d2_new - psi_old = psi_new + d2_old = d2_new + psi_old = psi_new + end if - energy = energy + e_loc(a,r_old) + end do + energy = energy / dble(nmax) - accep_rate = dble(accep_rate) / dble(nmax) + accep = dble(n_accep) / dble(nmax) + end subroutine variational_montecarlo program qmc implicit none - double precision, parameter :: a = 0.9 - double precision, parameter :: dt = 1.0 - integer*8 , parameter :: nmax = 100000 + double precision, parameter :: a = 0.9 + double precision, parameter :: dt = 1.0 + integer*8 , parameter :: nmax = 100000 integer , parameter :: nruns = 30 - integer :: irun + integer :: irun double precision :: X(nruns), accep(nruns) double precision :: ave, err do irun=1,nruns call variational_montecarlo(a,dt,nmax,X(irun),accep(irun)) enddo + call ave_error(X,nruns,ave,err) print *, 'E = ', ave, '+/-', err + call ave_error(accep,nruns,ave,err) print *, 'A = ', ave, '+/-', err + end program qmc
-E = -0.49495906384751226 +/- 1.5257646086088266E-004 -A = 0.78861366666666666 +/- 3.7855335138754813E-004 +E = -0.49497258331144794 +/- 1.0973395750688713E-004 +A = 0.78839866666666658 +/- 3.2503783452043152E-004
Consider the time-dependent Schrödinger equation: @@ -2796,8 +2979,8 @@ system.
The diffusion equation of particles is given by @@ -2851,8 +3034,8 @@ the combination of a diffusion process and a branching process.
In a molecular system, the potential is far from being constant, @@ -2862,8 +3045,7 @@ in the numbers of particles, making the calculations impossible in practice. Fortunately, if we multiply the Schrödinger equation by a chosen trial wave function \(\Psi_T(\mathbf{r})\) (Hartree-Fock, Kohn-Sham -determinant, CI wave function, etc), one obtains (see appendix -for details) +determinant, CI wave function, etc), one obtains
@@ -2874,8 +3056,7 @@ for details)
-Defining \(\Pi(\mathbf{r},\tau) = \psi(\mathbf{r},\tau) - \Psi_T(\mathbf{r})\), +Defining \(\Pi(\mathbf{r},\tau) = \psi(\mathbf{r},\tau) \Psi_T(\mathbf{r})\), (see appendix for details)
@@ -2903,17 +3084,16 @@ constant. This equation generates the N-electron density \(\Pi\), which is the product of the ground state with the trial wave function. It introduces the constraint that \(\Pi(\mathbf{r},\tau)=0\) where -\(\Psi_T(\mathbf{r})=0\). If the wave function has the same sign -everywhere, as in the hydrogen atom or the H2 molecule, this +\(\Psi_T(\mathbf{r})=0\). In the few cases where the wave function has no nodes, +such as in the hydrogen atom or the H2 molecule, this constraint is harmless and we can obtain the exact energy. But for -systems where the wave function has nodes (systems with 3 or more -electrons), this scheme introduces an error known as the fixed node -error. +systems where the wave function has nodes, this scheme introduces an +error known as the fixed node error.
\[ @@ -2975,8 +3155,8 @@ Defining \(\Pi(\mathbf{r},t) = \psi(\mathbf{r},\tau)
Now that we have a process to sample \(\Pi(\mathbf{r},\tau) = @@ -2992,9 +3172,6 @@ energy of the system, within the fixed-node constraint, as: \]
--Indeed, this is equivalent to -
\[ @@ -3025,32 +3202,35 @@ As \(\hat{H}\) is Hermitian,
So computing the energy within DMC consists in generating the -density \(\Pi\), and simply averaging the local energies computed -with the trial wave function. +density \(\Pi\) with random walks, and simply averaging the local +energies computed with the trial wave function.
Instead of having a variable number of particles to simulate the branching process, one can choose to sample \([\Psi_T(\mathbf{r})]^2\) instead of \(\psi(\mathbf{r},\tau) \Psi_T(\mathbf{r})\), and consider the term \(\exp \left( -\delta t\,( E_L(\mathbf{r}) - E_{\text{ref}} \right)\) as a -cumulative product of branching weights: +cumulative product of weights:
\[ W(\mathbf{r}_n, \tau) = \exp \left( \int_0^\tau - (E_L(\mathbf{r}_t) - E_{\text{ref}}) dt \right) - \approx \prod_{i=1}^{n} \exp \left( -\delta t\, (E_L(\mathbf{r}_i) - E_{\text{ref}}) \right). + \approx \prod_{i=1}^{n} \exp \left( -\delta t\, + (E_L(\mathbf{r}_i) - E_{\text{ref}}) \right) = + \prod_{i=1}^{n} w(\mathbf{r}_i) \]
+where \(\mathbf{r}_i\) are the coordinates along the trajectory. The wave function becomes
@@ -3075,17 +3255,19 @@ E(\tau) & = & \frac{\int \psi(\mathbf{r},\tau) \Psi_T(\mathbf{r}) E_L(\mathbf{r} This algorithm is less stable than the branching algorithm: it requires to have a value of \(E_\text{ref}\) which is close to the fixed-node energy, and a good trial wave function. Its big -advantage is that it is very easy to program starting from a VMC code. +advantage is that it is very easy to program starting from a VMC +code, so this is what we will do in the next section.@@ -3094,152 +3276,305 @@ In the limit \(\delta t \rightarrow 0\), you should recover the exact energy of H for any value of \(a\).
-+Python +
from hydrogen import * from qmc_stats import * -def MonteCarlo(a,nmax,tau,Eref): - energy = 0. - normalization = 0. - accep_rate = 0 - sq_tau = np.sqrt(tau) - r_old = np.random.normal(loc=0., scale=1.0, size=(3)) - d_old = drift(a,r_old) - d2_old = np.dot(d_old,d_old) - psi_old = psi(a,r_old) - w = 1.0 - for istep in range(nmax): - chi = np.random.normal(loc=0., scale=1.0, size=(3)) - el = e_loc(a,r_old) - w *= np.exp(-tau*(el - Eref)) - normalization += w - energy += w * el +def MonteCarlo(a, nmax, dt, Eref): + # TODO - r_new = r_old + tau * d_old + sq_tau * chi - d_new = drift(a,r_new) - d2_new = np.dot(d_new,d_new) - psi_new = psi(a,r_new) - # Metropolis - prod = np.dot((d_new + d_old), (r_new - r_old)) - argexpo = 0.5 * (d2_new - d2_old)*tau + prod - q = psi_new / psi_old - q = np.exp(-argexpo) * q*q - # PDMC weight - if np.random.uniform() < q: - accep_rate += 1 - r_old = r_new - d_old = d_new - d2_old = d2_new - psi_old = psi_new - return energy/normalization, accep_rate/nmax - - -a = 0.9 -nmax = 10000 -tau = .1 +# Run simulation +a = 0.9 +nmax = 100000 +dt = 0.01 E_ref = -0.5 -X = [MonteCarlo(a,nmax,tau,E_ref) for i in range(30)] -E, deltaE = ave_error([x[0] for x in X]) -A, deltaA = ave_error([x[1] for x in X]) -print(f"E = {E} +/- {deltaE}\nA = {A} +/- {deltaA}") + +X0 = [ MonteCarlo(a, nmax, dt, E_ref) for i in range(30)] + +# Energy +X = [ x for (x, _) in X0 ] +E, deltaE = ave_error(X) +print(f"E = {E} +/- {deltaE}") + +# Acceptance rate +X = [ x for (_, x) in X0 ] +A, deltaA = ave_error(X) +print(f"A = {A} +/- {deltaA}")
+Fortran +
subroutine variational_montecarlo(a,tau,nmax,energy,accep_rate) +subroutine pdmc(a, dt, nmax, energy, accep, tau, E_ref) implicit none - double precision, intent(in) :: a, tau + double precision, intent(in) :: a, dt, tau integer*8 , intent(in) :: nmax - double precision, intent(out) :: energy, accep_rate + double precision, intent(out) :: energy, accep + double precision, intent(in) :: E_ref - integer*8 :: istep - double precision :: norm, sq_tau, chi(3), d2_old, prod, u - double precision :: psi_old, psi_new, d2_new, argexpo, q + integer*8 :: istep + integer*8 :: n_accep + double precision :: sq_dt, chi(3) + double precision :: psi_old, psi_new double precision :: r_old(3), r_new(3) double precision :: d_old(3), d_new(3) + double precision, external :: e_loc, psi - sq_tau = dsqrt(tau) + ! TODO - ! Initialization - energy = 0.d0 - norm = 0.d0 - accep_rate = 0.d0 - call random_gauss(r_old,3) - call drift(a,r_old,d_old) - d2_old = d_old(1)*d_old(1) + d_old(2)*d_old(2) + d_old(3)*d_old(3) - psi_old = psi(a,r_old) - - do istep = 1,nmax - call random_gauss(chi,3) - r_new(:) = r_old(:) + tau * d_old(:) + chi(:)*sq_tau - call drift(a,r_new,d_new) - d2_new = d_new(1)*d_new(1) + d_new(2)*d_new(2) + d_new(3)*d_new(3) - psi_new = psi(a,r_new) - ! Metropolis - prod = (d_new(1) + d_old(1))*(r_new(1) - r_old(1)) + & - (d_new(2) + d_old(2))*(r_new(2) - r_old(2)) + & - (d_new(3) + d_old(3))*(r_new(3) - r_old(3)) - argexpo = 0.5d0 * (d2_new - d2_old)*tau + prod - q = psi_new / psi_old - q = dexp(-argexpo) * q*q - call random_number(u) - if (u<q) then - accep_rate = accep_rate + 1.d0 - r_old(:) = r_new(:) - d_old(:) = d_new(:) - d2_old = d2_new - psi_old = psi_new - end if - norm = norm + 1.d0 - energy = energy + e_loc(a,r_old) - end do - energy = energy / norm - accep_rate = accep_rate / norm -end subroutine variational_montecarlo +end subroutine pdmc program qmc implicit none - double precision, parameter :: a = 0.9 - double precision, parameter :: tau = 1.0 - integer*8 , parameter :: nmax = 100000 + double precision, parameter :: a = 0.9 + double precision, parameter :: dt = 0.1d0 + double precision, parameter :: E_ref = -0.5d0 + double precision, parameter :: tau = 10.d0 + integer*8 , parameter :: nmax = 100000 integer , parameter :: nruns = 30 - integer :: irun + integer :: irun double precision :: X(nruns), accep(nruns) double precision :: ave, err do irun=1,nruns - call variational_montecarlo(a,tau,nmax,X(irun),accep(irun)) + call pdmc(a, dt, nmax, X(irun), accep(irun), tau, E_ref) enddo + call ave_error(X,nruns,ave,err) print *, 'E = ', ave, '+/-', err + call ave_error(accep,nruns,ave,err) print *, 'A = ', ave, '+/-', err + end program qmc
gfortran hydrogen.f90 qmc_stats.f90 vmc_metropolis.f90 -o vmc_metropolis -./vmc_metropolis +gfortran hydrogen.f90 qmc_stats.f90 pdmc.f90 -o pdmc +./pdmc ++
+Python +
+from hydrogen import * +from qmc_stats import * + +def MonteCarlo(a, nmax, dt, tau, Eref): + sq_dt = np.sqrt(dt) + + energy = 0. + N_accep = 0 + normalization = 0. + + w = 1. + tau_current = 0. + + r_old = np.random.normal(loc=0., scale=1.0, size=(3)) + d_old = drift(a,r_old) + d2_old = np.dot(d_old,d_old) + psi_old = psi(a,r_old) + + for istep in range(nmax): + el = e_loc(a,r_old) + w *= np.exp(-dt*(el - Eref)) + + normalization += w + energy += w * el + + tau_current += dt + + # Reset when tau is reached + if tau_current >= tau: + w = 1. + tau_current = 0. + + chi = np.random.normal(loc=0., scale=1.0, size=(3)) + + r_new = r_old + dt * d_old + sq_dt * chi + d_new = drift(a,r_new) + d2_new = np.dot(d_new,d_new) + psi_new = psi(a,r_new) + + # Metropolis + prod = np.dot((d_new + d_old), (r_new - r_old)) + argexpo = 0.5 * (d2_new - d2_old)*dt + prod + + q = psi_new / psi_old + q = np.exp(-argexpo) * q*q + + if np.random.uniform() <= q: + N_accep += 1 + r_old = r_new + d_old = d_new + d2_old = d2_new + psi_old = psi_new + + return energy/normalization, N_accep/nmax + + +# Run simulation +a = 0.9 +nmax = 100000 +dt = 0.1 +tau = 10. +E_ref = -0.5 + +X0 = [ MonteCarlo(a, nmax, dt, tau, E_ref) for i in range(30)] + +# Energy +X = [ x for (x, _) in X0 ] +E, deltaE = ave_error(X) +print(f"E = {E} +/- {deltaE}") + +# Acceptance rate +X = [ x for (_, x) in X0 ] +A, deltaA = ave_error(X) +print(f"A = {A} +/- {deltaA}") ++
+Fortran +
+subroutine pdmc(a, dt, nmax, energy, accep, tau, E_ref) + implicit none + double precision, intent(in) :: a, dt, tau + integer*8 , intent(in) :: nmax + double precision, intent(out) :: energy, accep + double precision, intent(in) :: E_ref + + integer*8 :: istep + integer*8 :: n_accep + double precision :: sq_dt, chi(3), d2_old, prod, u + double precision :: psi_old, psi_new, d2_new, argexpo, q + double precision :: r_old(3), r_new(3) + double precision :: d_old(3), d_new(3) + double precision :: e, w, norm, tau_current + + double precision, external :: e_loc, psi + + sq_dt = dsqrt(dt) + + ! Initialization + energy = 0.d0 + n_accep = 0_8 + norm = 0.d0 + + w = 1.d0 + tau_current = 0.d0 + + call random_gauss(r_old,3) + + call drift(a,r_old,d_old) + d2_old = d_old(1)*d_old(1) + & + d_old(2)*d_old(2) + & + d_old(3)*d_old(3) + + psi_old = psi(a,r_old) + + do istep = 1,nmax + e = e_loc(a,r_old) + w = w * dexp(-dt*(e - E_ref)) + + energy = energy + w*e + norm = norm + w + + tau_current = tau_current + dt + + ! Reset when tau is reached + if (tau_current >= tau) then + w = 1.d0 + tau_current = 0.d0 + endif + + call random_gauss(chi,3) + r_new(:) = r_old(:) + dt*d_old(:) + chi(:)*sq_dt + + call drift(a,r_new,d_new) + d2_new = d_new(1)*d_new(1) + & + d_new(2)*d_new(2) + & + d_new(3)*d_new(3) + + psi_new = psi(a,r_new) + + ! Metropolis + prod = (d_new(1) + d_old(1))*(r_new(1) - r_old(1)) + & + (d_new(2) + d_old(2))*(r_new(2) - r_old(2)) + & + (d_new(3) + d_old(3))*(r_new(3) - r_old(3)) + + argexpo = 0.5d0 * (d2_new - d2_old)*dt + prod + + q = psi_new / psi_old + q = dexp(-argexpo) * q*q + + call random_number(u) + + if (u <= q) then + + n_accep = n_accep + 1_8 + + r_old(:) = r_new(:) + d_old(:) = d_new(:) + d2_old = d2_new + psi_old = psi_new + + end if + + end do + + energy = energy / norm + accep = dble(n_accep) / dble(nmax) + +end subroutine pdmc + +program qmc + implicit none + double precision, parameter :: a = 0.9 + double precision, parameter :: dt = 0.1d0 + double precision, parameter :: E_ref = -0.5d0 + double precision, parameter :: tau = 10.d0 + integer*8 , parameter :: nmax = 100000 + integer , parameter :: nruns = 30 + + integer :: irun + double precision :: X(nruns), accep(nruns) + double precision :: ave, err + + do irun=1,nruns + call pdmc(a, dt, nmax, X(irun), accep(irun), tau, E_ref) + enddo + + call ave_error(X,nruns,ave,err) + print *, 'E = ', ave, '+/-', err + + call ave_error(accep,nruns,ave,err) + print *, 'A = ', ave, '+/-', err + +end program qmc
-E = -0.49499990423528023 +/- 1.5958250761863871E-004 -A = 0.78861366666666655 +/- 3.5096729498002445E-004 +E = -0.50067519934141380 +/- 7.9390940184720371E-004 +A = 0.98788066666666663 +/- 7.2889356133441110E-005
We will now consider the H2 molecule in a minimal basis composed of the @@ -3270,8 +3605,8 @@ the nuclei.
[0/3]
Last things to do[0/3]
Last things to do[ ]
Give some hints of how much time is required for each section