diff --git a/src/JASTROW/jastrow_core.irp.f b/src/JASTROW/jastrow_core.irp.f new file mode 100644 index 0000000..f498a98 --- /dev/null +++ b/src/JASTROW/jastrow_core.irp.f @@ -0,0 +1,170 @@ +! Core Jastrow +! -------------- + + BEGIN_PROVIDER [ double precision, jast_elec_Core_expo, (nucl_num) ] +&BEGIN_PROVIDER [ double precision, jast_elec_Core_range, (nucl_num) ] + implicit none + BEGIN_DOC +! Exponent of the core jastrow factor per nucleus + END_DOC + integer :: i + do i=1,nucl_num + if (nucl_charge(i) < 2.5d0) then + jast_elec_Core_expo(i) = 0.d0 + jast_elec_Core_range(i) = 0.d0 + else + double precision :: rc + double precision, parameter :: thresh = 0.5 ! function = thresh at rc + rc = min(0.8d0,max(4.0d0/nucl_charge(i), 0.25d0)) + jast_elec_Core_expo(i) = -1.d0/rc**2 * log(thresh) + jast_elec_Core_range(i) = dsqrt(15.d0/jast_elec_Core_expo(i)) + endif + call dinfo(irp_here, 'expo', jast_elec_Core_expo(i)) + enddo + +END_PROVIDER + + + + +BEGIN_PROVIDER [ double precision , jast_elec_Core_value, (elec_num_8) ] +implicit none + BEGIN_DOC +! J(i) = \sum_j a.rij/(1+b^2.rij) - \sum_A (a.riA/(1+a.riA))^2 + END_DOC + integer :: i,j,k + double precision :: a, b, rij, tmp, a2 + double precision :: f1 + + jast_elec_Core_value = 0.d0 + + do k=1,nucl_num + if (jast_elec_Core_range(k) == 0.d0) then + cycle + endif + + a = 0.5d0 + a2 = jast_core_a1(k) + b = jast_core_b1(k) + + do j=1,elec_alpha_num + if (nucl_elec_dist(k,j) > jast_elec_Core_range(k)) then + cycle + endif + do i=elec_alpha_num+1,elec_num + if (nucl_elec_dist(k,i) > jast_elec_Core_range(k)) then + cycle + endif + rij = elec_dist(i,j) + f1 = exp(-jast_elec_Core_expo(k)*(nucl_elec_dist(k,i)*nucl_elec_dist(k,i)+nucl_elec_dist(k,j)*nucl_elec_dist(k,j))) + tmp = f1*(a*rij/(1.d0+b*rij) - a2) + jast_elec_Core_value(i) = jast_elec_Core_value(i) + 0.5d0*tmp + jast_elec_Core_value(j) = jast_elec_Core_value(j) + 0.5d0*tmp + enddo + enddo + + enddo +END_PROVIDER + + BEGIN_PROVIDER [ double precision , jast_elec_Core_grad_x, (elec_num_8) ] +&BEGIN_PROVIDER [ double precision , jast_elec_Core_grad_y, (elec_num_8) ] +&BEGIN_PROVIDER [ double precision , jast_elec_Core_grad_z, (elec_num_8) ] + implicit none + BEGIN_DOC +! Gradient of the Jastrow factor + END_DOC + + integer :: i,j,k + double precision :: a, b, rij, tmp, x, y, z, f1, a2 + + jast_elec_Core_grad_x = 0.d0 + jast_elec_Core_grad_y = 0.d0 + jast_elec_Core_grad_z = 0.d0 + + + do k=1,nucl_num + if (jast_elec_Core_range(k) == 0.d0) then + cycle + endif + + a = 0.5d0 + a2 = jast_core_a1(k) + b = jast_core_b1(k) + + do j=1,elec_alpha_num + if (nucl_elec_dist(k,j) > jast_elec_Core_range(k)) then + cycle + endif + do i=elec_alpha_num+1,elec_num + if (nucl_elec_dist(k,i) > jast_elec_Core_range(k)) then + cycle + endif + rij = elec_dist(i,j) + f1 = exp(-jast_elec_Core_expo(k)*(nucl_elec_dist(k,i)*nucl_elec_dist(k,i)+nucl_elec_dist(k,j)*nucl_elec_dist(k,j))) + tmp = f1*(a/(rij*(1.d0+b*rij*(2.d0+b*rij)))) + f1 = -2.d0*jast_elec_Core_expo(k)* f1* (a*rij/(1.d0+b*rij) -a2) + jast_elec_Core_grad_x(i) += elec_dist_vec_x(i,j)*tmp + nucl_elec_dist_vec(1,k,i)*f1 + jast_elec_Core_grad_y(i) += elec_dist_vec_y(i,j)*tmp + nucl_elec_dist_vec(2,k,i)*f1 + jast_elec_Core_grad_z(i) += elec_dist_vec_z(i,j)*tmp + nucl_elec_dist_vec(3,k,i)*f1 + jast_elec_Core_grad_x(j) += -elec_dist_vec_x(i,j)*tmp + nucl_elec_dist_vec(1,k,j)*f1 + jast_elec_Core_grad_y(j) += -elec_dist_vec_y(i,j)*tmp + nucl_elec_dist_vec(2,k,j)*f1 + jast_elec_Core_grad_z(j) += -elec_dist_vec_z(i,j)*tmp + nucl_elec_dist_vec(3,k,j)*f1 + enddo + enddo + + enddo +END_PROVIDER + +BEGIN_PROVIDER [ double precision , jast_elec_Core_lapl, (elec_num_8) ] + implicit none + BEGIN_DOC +! Laplacian of the Jastrow factor + END_DOC + + integer :: i,j,k + double precision :: a, b, rij, tmp, x, y, z,f1, alpha, a2 + + jast_elec_Core_lapl = 0.d0 + + do k=1,nucl_num + if (jast_elec_Core_range(k) == 0.d0) then + cycle + endif + + a = 0.5d0 + a2 = jast_core_a1(k) + b = jast_core_b1(k) + + do j=1,elec_alpha_num + if (nucl_elec_dist(k,j) > jast_elec_Core_range(k)) then + cycle + endif + do i=elec_alpha_num+1,elec_num + if (nucl_elec_dist(k,i) > jast_elec_Core_range(k)) then + cycle + endif + f1 = exp(-jast_elec_Core_expo(k)*(nucl_elec_dist(k,i)*nucl_elec_dist(k,i)+nucl_elec_dist(k,j)*nucl_elec_dist(k,j))) + rij = b*elec_dist(i,j) + tmp = (a+a)/(elec_dist(i,j)*(1.d0+rij*(3.d0+rij*(3.d0+rij)))) + jast_elec_Core_lapl(i) += tmp*f1 + jast_elec_Core_lapl(j) += tmp*f1 + + rij = elec_dist(i,j) + tmp = f1* ( a*rij/(1.d0+b*rij) -a2 ) + jast_elec_Core_lapl(i) += tmp*( 4.d0*(nucl_elec_dist(k,i)*jast_elec_Core_expo(k))**2-6.d0*jast_elec_Core_expo(k)) + jast_elec_Core_lapl(j) += tmp*( 4.d0*(nucl_elec_dist(k,j)*jast_elec_Core_expo(k))**2-6.d0*jast_elec_Core_expo(k)) + + + tmp = 4.d0*jast_elec_Core_expo(k)*f1*(a/(rij*(1.d0+b*rij*(2.d0+b*rij))) ) + jast_elec_Core_lapl(i) -= tmp*(nucl_elec_dist_vec(1,k,i)*elec_dist_vec_x(i,j) & + + nucl_elec_dist_vec(2,k,i)*elec_dist_vec_y(i,j) & + + nucl_elec_dist_vec(3,k,i)*elec_dist_vec_z(i,j)) + jast_elec_Core_lapl(j) += tmp*(nucl_elec_dist_vec(1,k,j)*elec_dist_vec_x(i,j) & + + nucl_elec_dist_vec(2,k,j)*elec_dist_vec_y(i,j) & + + nucl_elec_dist_vec(3,k,j)*elec_dist_vec_z(i,j)) + enddo + enddo + + enddo + +END_PROVIDER diff --git a/src/JASTROW/jastrow_full.irp.f b/src/JASTROW/jastrow_full.irp.f new file mode 100644 index 0000000..c34aa8a --- /dev/null +++ b/src/JASTROW/jastrow_full.irp.f @@ -0,0 +1,142 @@ + BEGIN_PROVIDER [ double precision, jast_value ] +&BEGIN_PROVIDER [ double precision, jast_value_inv ] + implicit none + include '../types.F' + BEGIN_DOC +! Value of the Jastrow factor + END_DOC + + integer, save :: ifirst = 0 + integer :: i + double precision :: dshift = 0.d0 + + jast_value = 1.d0 + if (do_jast) then + double precision :: argexpo +BEGIN_TEMPLATE + if (jast_type == t_$X) then + argexpo = 0.d0 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (200) + do i=1,elec_num + argexpo += jast_elec_$X_value(i) + enddo + ! argexpo = argexpo/dble(elec_num) + endif +SUBST [X] +Simple ;; +Core ;; +END_TEMPLATE + if (ifirst == 0) then + dshift = argexpo + ifirst = 1 + endif + argexpo -= dshift + jast_value = exp(argexpo) + endif + ASSERT (jast_value > 0.d0) + jast_value_inv = 1.d0/jast_value + +END_PROVIDER + + BEGIN_PROVIDER [ double precision, jast_grad_jast_inv_x, (elec_num) ] +&BEGIN_PROVIDER [ double precision, jast_grad_jast_inv_y, (elec_num) ] +&BEGIN_PROVIDER [ double precision, jast_grad_jast_inv_z, (elec_num) ] +&BEGIN_PROVIDER [ double precision, jast_grad_x, (elec_num) ] +&BEGIN_PROVIDER [ double precision, jast_grad_y, (elec_num) ] +&BEGIN_PROVIDER [ double precision, jast_grad_z, (elec_num) ] + implicit none + include '../types.F' + BEGIN_DOC +! Grad(J)/J + END_DOC + + integer :: i,l + integer, save :: ifirst = 0 + + if (ifirst == 0) then + ifirst = 1 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (200) + do i=1,elec_num + jast_grad_jast_inv_x(i) = 0.d0 + jast_grad_jast_inv_y(i) = 0.d0 + jast_grad_jast_inv_z(i) = 0.d0 + jast_grad_x(i) = 0.d0 + jast_grad_y(i) = 0.d0 + jast_grad_z(i) = 0.d0 + enddo + endif + + if (do_jast) then + +BEGIN_TEMPLATE + if ( jast_type == t_$X ) then + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (200) + do i=1,elec_num + jast_grad_jast_inv_x(i) = jast_elec_$X_grad_x(i) + jast_grad_jast_inv_y(i) = jast_elec_$X_grad_y(i) + jast_grad_jast_inv_z(i) = jast_elec_$X_grad_z(i) + enddo + endif +SUBST [ X ] +Simple ;; +Core ;; +END_TEMPLATE + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (200) + do i=1,elec_num + jast_grad_x(i) = jast_grad_jast_inv_x(i)*jast_value + jast_grad_y(i) = jast_grad_jast_inv_y(i)*jast_value + jast_grad_z(i) = jast_grad_jast_inv_z(i)*jast_value + enddo + + endif +END_PROVIDER + + + BEGIN_PROVIDER [ double precision, jast_lapl, (elec_num) ] +&BEGIN_PROVIDER [ double precision, jast_lapl_jast_inv, (elec_num) ] + implicit none + include '../types.F' + BEGIN_DOC +! Lapl(J)/J + END_DOC + + integer :: i + integer,save :: ifirst = 0 + + if (ifirst == 0) then + ifirst = 1 + !DIR$ VECTOR ALIGNED + jast_lapl_jast_inv = 0.d0 + endif + + if (do_jast) then + +BEGIN_TEMPLATE + if (jast_type == t_$X) then + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (200) + do i=1,elec_num + jast_lapl_jast_inv(i) = (jast_elec_$X_lapl(i) + & + jast_grad_jast_inv_x(i)*jast_grad_jast_inv_x(i) + & + jast_grad_jast_inv_y(i)*jast_grad_jast_inv_y(i) + & + jast_grad_jast_inv_z(i)*jast_grad_jast_inv_z(i) ) + enddo + endif +SUBST [X] +Simple ;; +Core ;; +END_TEMPLATE + + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (256) + do i=1,elec_num + jast_lapl(i) = jast_lapl_jast_inv(i) * jast_value + enddo + + endif +END_PROVIDER + diff --git a/src/JASTROW/jastrow_param.irp.f b/src/JASTROW/jastrow_param.irp.f new file mode 100644 index 0000000..7c37c83 --- /dev/null +++ b/src/JASTROW/jastrow_param.irp.f @@ -0,0 +1,156 @@ +! Input data +! ---------- + +BEGIN_PROVIDER [ logical, do_jast ] + implicit none + BEGIN_DOC +! If true, compute the Jastrow factor + END_DOC + include '../types.F' + do_jast = jast_type /= t_None + call linfo(irp_here,'do_jast',do_jast) +END_PROVIDER + +BEGIN_PROVIDER [ integer, jast_type ] + implicit none + include '../types.F' + BEGIN_DOC +! Type of Jastrow factor : Simple or Core + END_DOC + character*(32) :: buffer + buffer = types(t_Simple) + jast_type = t_Core + call get_jastrow_jast_type(buffer) + if (buffer == types(t_Simple)) then + jast_type = t_Simple + else if (buffer == types(t_None)) then + jast_type = t_None + else if (buffer == types(t_Core)) then + jast_type = t_Core + else + call abrt(irp_here,'Jastrow type should be (None|Simple|Core)') + endif + call cinfo(irp_here,'jast_type',buffer) + +END_PROVIDER + +BEGIN_PROVIDER [ real, jast_a_up_up ] + implicit none + BEGIN_DOC +! a_{up up} parameters of the Jastrow + END_DOC + include '../types.F' + jast_a_up_up = 0.25 + call get_jastrow_jast_a_up_up(jast_a_up_up) + +END_PROVIDER + +BEGIN_PROVIDER [ real, jast_a_up_dn ] + implicit none + BEGIN_DOC +! a_{up dn} parameters of the Jastrow + END_DOC + include '../types.F' + jast_a_up_dn = 0.5 + call get_jastrow_jast_a_up_dn(jast_a_up_dn) + +END_PROVIDER + +BEGIN_PROVIDER [ real, jast_b_up_up ] + implicit none + BEGIN_DOC +! b_{up up} parameters of the Jastrow + END_DOC + include '../types.F' + jast_b_up_up = 5. + call get_jastrow_jast_b_up_up(jast_b_up_up) + +END_PROVIDER + +BEGIN_PROVIDER [ real, jast_b_up_dn ] + implicit none + BEGIN_DOC +! b_{up dn} parameters of the Jastrow + END_DOC + include '../types.F' + jast_b_up_dn = 5. + call get_jastrow_jast_b_up_dn(jast_b_up_dn) + +END_PROVIDER + +BEGIN_PROVIDER [ real, jast_pen, (nucl_num) ] + implicit none + BEGIN_DOC +! penetration parameters of the Jastrow + END_DOC + include '../types.F' + jast_pen(:) = 0. + call get_jastrow_jast_pen(jast_pen) + +END_PROVIDER + +BEGIN_PROVIDER [ real, jast_eeN_e_a, (nucl_num) ] + implicit none + BEGIN_DOC +! a parameters of the electron-electron-Nucleus component of the Jastrow + END_DOC + include '../types.F' + jast_eeN_e_a(:) = 0.5 + call get_jastrow_jast_eeN_e_a(jast_eeN_e_a) + +END_PROVIDER + +BEGIN_PROVIDER [ real, jast_eeN_e_b, (nucl_num) ] + implicit none + BEGIN_DOC +! b parameters of the electron-electron-Nucleus component of the Jastrow + END_DOC + include '../types.F' + jast_eeN_e_b(:) = 3. + call get_jastrow_jast_eeN_e_b(jast_eeN_e_b) + +END_PROVIDER + +BEGIN_PROVIDER [ real, jast_eeN_N, (nucl_num) ] + implicit none + BEGIN_DOC +! penetration parameters of the electron-electron-nucleus component of the Jastrow + END_DOC + include '../types.F' + integer :: i + jast_eeN_N(:) = 100. + call get_jastrow_jast_eeN_N(jast_eeN_N) + +END_PROVIDER + + +BEGIN_PROVIDER [ real, jast_core_a1, (nucl_num) ] + implicit none + BEGIN_DOC +! parameters of the core Jastrow + END_DOC + include '../types.F' + integer :: i + do i=1,nucl_num + if (nucl_charge(i) > 0.) then + jast_core_a1(i) = 0.6/nucl_charge(i) + else + jast_core_a1(i) = 0. + endif + enddo + call get_jastrow_jast_core_a1(jast_core_a1) + +END_PROVIDER + + + BEGIN_PROVIDER [ real, jast_core_b1, (nucl_num) ] + implicit none + BEGIN_DOC +! parameters of the core Jastrow + END_DOC + include '../types.F' + jast_core_b1(:) = max(1.,1. - 0.3 * nucl_charge(:)) + call get_jastrow_jast_core_b1(jast_core_b1) + +END_PROVIDER + diff --git a/src/JASTROW/jastrow_simple.irp.f b/src/JASTROW/jastrow_simple.irp.f new file mode 100644 index 0000000..bbd9c99 --- /dev/null +++ b/src/JASTROW/jastrow_simple.irp.f @@ -0,0 +1,194 @@ +! Simple Jastrow +! -------------- + +BEGIN_PROVIDER [ double precision , jast_elec_Simple_value, (elec_num_8) ] +implicit none + BEGIN_DOC +! J(i) = \sum_j a.rij/(1+b^2.rij) - \sum_A (a.riA/(1+a.riA))^2 + END_DOC + integer :: i,j + double precision :: a, b, rij, tmp + + do i=1,elec_num + jast_elec_Simple_value(i) = 0.d0 + !DIR$ LOOP COUNT (100) + do j=1,nucl_num + a = jast_pen(j) + rij = nucl_elec_dist(j,i) + tmp = a*rij/(1.d0+a*rij) + jast_elec_Simple_value(i) -= tmp*tmp + enddo + enddo + + a = 0.5*jast_a_up_up + b = jast_b_up_up + + do j=1,elec_alpha_num + !DIR$ LOOP COUNT (50) + do i=j+1,elec_alpha_num + rij = elec_dist(i,j) + tmp = a*rij/(1.d0+b*rij) + jast_elec_Simple_value(i) += tmp + jast_elec_Simple_value(j) += tmp + enddo + enddo + + do j=elec_alpha_num+1,elec_num + !DIR$ LOOP COUNT (50) + do i=j+1,elec_num + rij = elec_dist(i,j) + tmp = a*rij/(1.d0+b*rij) + jast_elec_Simple_value(i) += tmp + jast_elec_Simple_value(j) += tmp + enddo + enddo + + a = 0.5*jast_a_up_dn + b = jast_b_up_dn + + do j=1,elec_alpha_num + !DIR$ LOOP COUNT (50) + do i=elec_alpha_num+1,elec_num + rij = elec_dist(i,j) + tmp = a*rij/(1.d0+b*rij) + jast_elec_Simple_value(i) += tmp + jast_elec_Simple_value(j) += tmp + enddo + enddo + +END_PROVIDER + + BEGIN_PROVIDER [ double precision , jast_elec_Simple_grad_x, (elec_num_8) ] +&BEGIN_PROVIDER [ double precision , jast_elec_Simple_grad_y, (elec_num_8) ] +&BEGIN_PROVIDER [ double precision , jast_elec_Simple_grad_z, (elec_num_8) ] + implicit none + BEGIN_DOC +! Gradient of the Jastrow factor + END_DOC + + integer :: i,j + double precision :: a, b, rij, tmp, x, y, z + + do i=1,elec_num + jast_elec_Simple_grad_x(i) = 0.d0 + jast_elec_Simple_grad_y(i) = 0.d0 + jast_elec_Simple_grad_z(i) = 0.d0 + !DIR$ LOOP COUNT (100) + do j=1,nucl_num + a = jast_pen(j) + rij = a*nucl_elec_dist(j,i) + tmp = (a+a)*a/(1.d0+rij*(3.d0+rij*(3.d0+rij))) + jast_elec_Simple_grad_x(i) -= nucl_elec_dist_vec(1,j,i)*tmp + jast_elec_Simple_grad_y(i) -= nucl_elec_dist_vec(2,j,i)*tmp + jast_elec_Simple_grad_z(i) -= nucl_elec_dist_vec(3,j,i)*tmp + enddo + enddo + + a = jast_a_up_up + b = jast_b_up_up + + do j=1,elec_alpha_num + !DIR$ LOOP COUNT (50) + do i=1,j-1 + rij = elec_dist(i,j) + tmp = a/(rij*(1.d0+b*rij*(2.d0+b*rij))) + jast_elec_Simple_grad_x(i) += elec_dist_vec_x(i,j)*tmp + jast_elec_Simple_grad_y(i) += elec_dist_vec_y(i,j)*tmp + jast_elec_Simple_grad_z(i) += elec_dist_vec_z(i,j)*tmp + jast_elec_Simple_grad_x(j) -= elec_dist_vec_x(i,j)*tmp + jast_elec_Simple_grad_y(j) -= elec_dist_vec_y(i,j)*tmp + jast_elec_Simple_grad_z(j) -= elec_dist_vec_z(i,j)*tmp + enddo + enddo + + do j=elec_alpha_num+1,elec_num + !DIR$ LOOP COUNT (50) + do i=elec_alpha_num+1,j-1 + rij = elec_dist(i,j) + tmp = a/(rij*(1.d0+b*rij*(2.d0+b*rij))) + jast_elec_Simple_grad_x(i) += elec_dist_vec_x(i,j)*tmp + jast_elec_Simple_grad_y(i) += elec_dist_vec_y(i,j)*tmp + jast_elec_Simple_grad_z(i) += elec_dist_vec_z(i,j)*tmp + jast_elec_Simple_grad_x(j) -= elec_dist_vec_x(i,j)*tmp + jast_elec_Simple_grad_y(j) -= elec_dist_vec_y(i,j)*tmp + jast_elec_Simple_grad_z(j) -= elec_dist_vec_z(i,j)*tmp + enddo + enddo + + a = jast_a_up_dn + b = jast_b_up_dn + + do j=1,elec_alpha_num + !DIR$ LOOP COUNT (50) + do i=elec_alpha_num+1,elec_num + rij = elec_dist(i,j) + tmp = a/(rij*(1.d0+b*rij*(2.d0+b*rij))) + jast_elec_Simple_grad_x(i) += elec_dist_vec_x(i,j)*tmp + jast_elec_Simple_grad_y(i) += elec_dist_vec_y(i,j)*tmp + jast_elec_Simple_grad_z(i) += elec_dist_vec_z(i,j)*tmp + jast_elec_Simple_grad_x(j) -= elec_dist_vec_x(i,j)*tmp + jast_elec_Simple_grad_y(j) -= elec_dist_vec_y(i,j)*tmp + jast_elec_Simple_grad_z(j) -= elec_dist_vec_z(i,j)*tmp + enddo + enddo + +END_PROVIDER + +BEGIN_PROVIDER [ double precision , jast_elec_Simple_lapl, (elec_num_8) ] + implicit none + BEGIN_DOC +! Laplacian of the Jastrow factor + END_DOC + + integer :: i,j + double precision :: a, b, rij, tmp, x, y, z + + do i=1,elec_num + jast_elec_Simple_lapl(i) = 0.d0 + !DIR$ LOOP COUNT (100) + do j=1,nucl_num + a = jast_pen(j) + rij = a*nucl_elec_dist(j,i) + tmp = 6.d0*a*a/(1.d0+rij*(4.d0+rij*(6.d0+rij*(4.d0+rij)))) + jast_elec_Simple_lapl(i) -= tmp + enddo + enddo + + + a = jast_a_up_up+jast_a_up_up + b = jast_b_up_up + do j=1,elec_alpha_num + !DIR$ LOOP COUNT (50) + do i=1,j-1 + rij = b*elec_dist(i,j) + tmp = a/(elec_dist(i,j)*(1.d0+rij*(3.d0+rij*(3.d0+rij)))) + jast_elec_Simple_lapl(i) += tmp + jast_elec_Simple_lapl(j) += tmp + enddo + enddo + + do j=elec_alpha_num+1,elec_num + !DIR$ LOOP COUNT (100) + do i=j+1,elec_num + rij = b*elec_dist(i,j) + tmp = a/(elec_dist(i,j)*(1.d0+rij*(3.d0+rij*(3.d0+rij)))) + jast_elec_Simple_lapl(i) += tmp + jast_elec_Simple_lapl(j) += tmp + enddo + enddo + + a = jast_a_up_dn+jast_a_up_dn + b = jast_b_up_dn + + do j=1,elec_alpha_num + !DIR$ LOOP COUNT (100) + do i=elec_alpha_num+1,elec_num + rij = b*elec_dist(i,j) + tmp = a/(elec_dist(i,j)*(1.d0+rij*(3.d0+rij*(3.d0+rij)))) + jast_elec_Simple_lapl(i) += tmp + jast_elec_Simple_lapl(j) += tmp + enddo + enddo + + +END_PROVIDER diff --git a/src/MAIN/.gitignore b/src/MAIN/.gitignore new file mode 100644 index 0000000..73952b5 --- /dev/null +++ b/src/MAIN/.gitignore @@ -0,0 +1,3 @@ +qmc_create_walkers +qmc +qmcchem_info diff --git a/src/MAIN/qmc.irp.f b/src/MAIN/qmc.irp.f new file mode 100644 index 0000000..52dd7ef --- /dev/null +++ b/src/MAIN/qmc.irp.f @@ -0,0 +1,3 @@ +program qmc + call main_qmc +end diff --git a/src/MAIN/qmc_create_walkers.irp.f b/src/MAIN/qmc_create_walkers.irp.f new file mode 100644 index 0000000..a8cfaa3 --- /dev/null +++ b/src/MAIN/qmc_create_walkers.irp.f @@ -0,0 +1,51 @@ +program main_prepare_walkers + implicit none + + ! Delete old walkers files if they exist + call system ('bash -c "rm -f '//trim(ezfio_filename)//'/electrons/elec_coord_pool{.gz,_size}"') + + call set_parameters + call draw_init_points + call run_prepare_walkers + call save_elec_coord_full + call run +end + +subroutine run + implicit none + TOUCH elec_coord_full + + E_loc_min = huge(1.d0) + E_loc_max = -huge(1.d0) + print *, ' min max' + integer :: i + do i=1,4 + call test_block + call save_elec_coord_full + TOUCH elec_coord_full + E_loc_min = huge(1.d0) + E_loc_max = -huge(1.d0) + enddo + print *, 'Generated ', walk_num, ' walkers' +end + +subroutine set_parameters + implicit none + include '../types.F' + do_prepare = .True. + call ezfio_set_properties_e_loc(.True.) + qmc_method = t_VMC + vmc_algo = t_Langevin + ci_threshold = 0.99999 + time_step = 0.1 + block_time = 2. + prepare_walkers_t = 0. + time_step_inv = 1./time_step + dtime_step = dble(time_step) + SOFT_TOUCH vmc_algo ci_threshold time_step prepare_walkers_t block_time time_step_inv dtime_step do_prepare +end + +subroutine test_block + implicit none + print *, real(E_loc_vmc_block_walk), real(E_loc_min), real(E_loc_max) +end diff --git a/src/MAIN/qmcchem_info.irp.f b/src/MAIN/qmcchem_info.irp.f new file mode 100644 index 0000000..d26f1d2 --- /dev/null +++ b/src/MAIN/qmcchem_info.irp.f @@ -0,0 +1,46 @@ +program qmcchem_info + implicit none + PROVIDE ezfio_filename + double precision :: cpu0, cpu1 + character*(8) :: str_n + integer :: iargc + integer :: imax + if (command_argument_count() > 1) then + call get_command_argument(2,str_n) + read(str_n,*) imax + else + imax = 100 + endif + print *, 'Number of determinants : ', det_num + print *, 'Number of unique alpha/beta determinants : ', det_alpha_num, det_beta_num +! print *, 'Number of vectors in SVD : ', psi_svd_size + print *, 'Closed-shell MOs : ', mo_closed_num + print *, 'Number of MOs in determinants : ', num_present_mos +! print *, 'do SVD? : ', do_det_svd +! print *, 'det_coef matrix is sparse : ', det_coef_matrix_is_sparse + print *, 'Det alpha norm:' + print *, det_alpha_norm + print *, 'Det beta norm:' + print *, det_beta_norm + call step1 + call cpu_time (cpu0) + call step2(imax) + call cpu_time (cpu1) + print *, 'Time for the calculation of E_loc (ms) : ', 1000.*(cpu1-cpu0)/float(imax) +end + +subroutine step1 + implicit none + print *, 'E_loc : ', E_loc + PROVIDE E_loc +end + +subroutine step2(imax) + implicit none + integer, intent(in) :: imax + integer :: i + do i=1,imax + PROVIDE E_loc + TOUCH elec_coord + enddo +end diff --git a/src/PROPERTIES/properties.irp.f b/src/PROPERTIES/properties.irp.f new file mode 100644 index 0000000..93a4658 --- /dev/null +++ b/src/PROPERTIES/properties.irp.f @@ -0,0 +1,77 @@ +BEGIN_SHELL [ /usr/bin/env python ] +import os +from properties import properties +root = os.environ['QMCCHEM_PATH'] + +template = """ +BEGIN_PROVIDER [ logical, calc_%(p)s ] + implicit none + BEGIN_DOC + ! If true, calculate %(p)s + END_DOC + calc_%(p)s = .False. + logical :: has_%(p)s + if (.not.is_worker) then + call ezfio_has_properties_%(p)s(has_%(p)s) + if (has_%(p)s) then + call ezfio_get_properties_%(p)s(calc_%(p)s) + endif + else + call zmq_ezfio_has('properties_%(p)s',has_%(p)s) + if (has_%(p)s) then + call zmq_ezfio_get_logical('properties_%(p)s',calc_%(p)s,1) + endif + endif +END_PROVIDER +""" + +for p in properties: + print template%{'p':p[1]} + +t=""" + BEGIN_PROVIDER [ $T, $X_min ] +&BEGIN_PROVIDER [ $T, $X_max ] + implicit none + BEGIN_DOC + ! Minimum and maximum values of $X + END_DOC + $X_min = huge(1.) + $X_max =-huge(1.) +END_PROVIDER + +BEGIN_PROVIDER [ $T, $X_2 $D ] + implicit none + BEGIN_DOC + ! Square of $X + END_DOC + $X_2= $X*$X +END_PROVIDER +""" + +for p in properties: + d = "" + if p[2] != '': + d = ", %s"%(p[2]) + print t.replace("$T",p[0]).replace("$X",p[1]).replace("$D",d) +END_SHELL + +!==========================================================================! +! DIMENSIONS +!==========================================================================! + +BEGIN_SHELL [ /usr/bin/python ] +from properties import * +make_dims() +END_SHELL + +!==========================================================================! +! ! +!==========================================================================! + + +!==========================================================================! +! PROPERTIES ! +!==========================================================================! + + + diff --git a/src/PROPERTIES/properties_energy.irp.f b/src/PROPERTIES/properties_energy.irp.f new file mode 100644 index 0000000..0234da8 --- /dev/null +++ b/src/PROPERTIES/properties_energy.irp.f @@ -0,0 +1,253 @@ +!==========================================================================! +! DIMENSIONS +!==========================================================================! + + +BEGIN_PROVIDER [ double precision, single_det_E_kin ] + implicit none + BEGIN_DOC + ! Electronic Kinetic energy : -1/2 (Lapl.Psi)/Psi + END_DOC + integer :: i + single_det_E_kin = 0.d0 + do i=1,elec_num + single_det_E_kin -= 0.5d0*single_det_lapl(i)/single_det_value + enddo + +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, single_det_E_loc ] + implicit none + BEGIN_DOC + ! Local energy : single_det_E_kin + E_pot + E_nucl + END_DOC + + single_det_E_loc = single_det_E_kin + E_pot + E_nucl +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, E_pot_grad, (elec_num,3) ] + implicit none + BEGIN_DOC + ! Gradient of the Electronic Potential energy + END_DOC + + integer :: i,j + double precision :: dinv + do i=1,elec_num + E_pot_grad(i,1) = 0.d0 + E_pot_grad(i,2) = 0.d0 + E_pot_grad(i,3) = 0.d0 + enddo + do j=1,elec_num + do i=1,j-1 + dinv = elec_dist_inv(i,j) + dinv = dinv*dinv*dinv + E_pot_grad(i,1) -= elec_dist_vec_x(i,j)*dinv + E_pot_grad(i,2) -= elec_dist_vec_y(i,j)*dinv + E_pot_grad(i,3) -= elec_dist_vec_z(i,j)*dinv + enddo + do i=j+1,elec_num + dinv = elec_dist_inv(i,j) + dinv = dinv*dinv*dinv + E_pot_grad(i,1) -= elec_dist_vec_x(i,j)*dinv + E_pot_grad(i,2) -= elec_dist_vec_y(i,j)*dinv + E_pot_grad(i,3) -= elec_dist_vec_z(i,j)*dinv + enddo + enddo + do i=1,elec_num + do j=1,nucl_num + dinv = nucl_charge(j)*nucl_elec_dist_inv(j,i)**3 + E_pot_grad(i,1) += nucl_elec_dist_vec(1,j,i)*dinv + E_pot_grad(i,2) += nucl_elec_dist_vec(2,j,i)*dinv + E_pot_grad(i,3) += nucl_elec_dist_vec(3,j,i)*dinv + enddo + enddo + +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, E_pot_elec, (elec_num) ] + implicit none + BEGIN_DOC + ! Electronic Potential energy + END_DOC + + integer :: i, j + if (do_pseudo) then + do i=1,elec_num + E_pot_elec(i) = v_pseudo_local(i) + pseudo_non_local(i) + enddo + else + do i=1,elec_num + E_pot_elec(i) = 0.d0 + enddo + endif + + do i=1,elec_num + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(50) + do j=1,elec_num + E_pot_elec(i) = E_pot_elec(i) + 0.5d0*elec_dist_inv(j,i) + enddo + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(50) + do j=1,nucl_num + E_pot_elec(i) = E_pot_elec(i) - nucl_charge(j)*nucl_elec_dist_inv(j,i) + enddo + enddo + +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, E_pot_elec_one, (elec_num) ] + implicit none + BEGIN_DOC + ! Electronic Potential energy + END_DOC + + integer :: i, j + do i=1,elec_num + E_pot_elec_one(i) = 0.d0 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(100) + do j=1,nucl_num + E_pot_elec_one(i) -= nucl_charge(j)*nucl_elec_dist_inv(j,i) + enddo + enddo + +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, E_pot_elec_two, (elec_num) ] + implicit none + BEGIN_DOC + ! Electronic Potential energy + END_DOC + + integer :: i, j + do i=1,elec_num + E_pot_elec_two(i) = 0.d0 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(200) + do j=1,elec_num + if (j==i) then + cycle + endif + E_pot_elec_two(i) += 0.5d0*elec_dist_inv(j,i) + enddo + enddo + +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, E_kin_elec, (elec_num) ] + implicit none + + BEGIN_DOC + ! Electronic Kinetic energy : -1/2 (Lapl.Psi)/Psi + END_DOC + + integer :: i + do i=1,elec_num + E_kin_elec(i) = -0.5d0*psi_lapl_psi_inv(i) + enddo + +END_PROVIDER + + +!==========================================================================! +! PROPERTIES ! +!==========================================================================! + +BEGIN_PROVIDER [ double precision, E_nucl ] + implicit none + BEGIN_DOC + ! Nuclear potential energy + END_DOC + + E_nucl = 0.d0 + integer :: i, j + do i=1,nucl_num + do j=1,i-1 + E_nucl += nucl_charge(i)*nucl_charge(j)/nucl_dist(j,i) + enddo + enddo + + E_nucl_min = min(E_nucl,E_nucl_min) + E_nucl_max = max(E_nucl,E_nucl_max) + SOFT_TOUCH E_nucl_min E_nucl_max +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, E_pot ] + implicit none + BEGIN_DOC + ! Electronic Potential energy + END_DOC + + E_pot = 0.d0 + integer :: i, j + do i=1,elec_num + E_pot += E_pot_elec(i) + enddo + + E_pot_min = min(E_pot,E_pot_min) + E_pot_max = max(E_pot,E_pot_max) + SOFT_TOUCH E_pot_min E_pot_max +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, E_kin ] + implicit none + BEGIN_DOC + ! Electronic Kinetic energy : -1/2 (Lapl.Psi)/Psi + END_DOC + + E_kin = 0.d0 + + integer :: i + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(200) + do i=1,elec_num + E_kin -= 0.5d0*psi_lapl_psi_inv(i) + enddo + + E_kin_min = min(E_kin,E_kin_min) + E_kin_max = max(E_kin,E_kin_max) + SOFT_TOUCH E_kin_min E_kin_max +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, E_loc ] + implicit none + include '../types.F' + BEGIN_DOC + ! Local energy : E_kin + E_pot + E_nucl + END_DOC + + integer :: i + E_loc = E_nucl + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(200) + do i=1,elec_num + E_loc += E_kin_elec(i) + E_pot_elec(i) + enddo + + ! Avoid divergence of E_loc + if (qmc_method == t_DMC) then + double precision :: delta_e + delta_e = E_loc-E_ref + E_loc = E_ref + erf(1.d0/(time_step*delta_e*time_step*delta_e)) * delta_e + endif + + + E_loc_min = min(E_loc,E_loc_min) + E_loc_max = max(E_loc,E_loc_max) + SOFT_TOUCH E_loc_min E_loc_max +END_PROVIDER + + + + diff --git a/src/PROPERTIES/properties_general.irp.f b/src/PROPERTIES/properties_general.irp.f new file mode 100644 index 0000000..b6c0ae9 --- /dev/null +++ b/src/PROPERTIES/properties_general.irp.f @@ -0,0 +1,72 @@ +!==========================================================================! +! PROPERTIES ! +!==========================================================================! + + +BEGIN_PROVIDER [ double precision, dipole, (3) ] + implicit none + BEGIN_DOC + ! Dipole moment + END_DOC + + integer :: i + dipole = 0.d0 + do i=1,nucl_num + dipole(1) += nucl_coord(i,1)*nucl_charge(i) + dipole(2) += nucl_coord(i,2)*nucl_charge(i) + dipole(3) += nucl_coord(i,3)*nucl_charge(i) + enddo + + do i=1,elec_num + dipole(1) -= elec_coord(i,1) + dipole(2) -= elec_coord(i,2) + dipole(3) -= elec_coord(i,3) + enddo + + dipole *= 2.541765d0 + dipole_min = min(minval(dipole),dipole_min) + dipole_max = max(minval(dipole),dipole_max) + SOFT_TOUCH dipole_min dipole_max +END_PROVIDER + +BEGIN_PROVIDER [ double precision, wf_extension ] + implicit none + + BEGIN_DOC + ! Wave function extension + END_DOC + + wf_extension = 0.d0 + integer :: i + do i=1,elec_num + wf_extension += elec_coord(i,1)*elec_coord(i,1) + elec_coord(i,2)*elec_coord(i,2) + elec_coord(i,3)*elec_coord(i,3) + enddo + + wf_extension_min = min(wf_extension,wf_extension_min) + wf_extension_max = max(wf_extension,wf_extension_max) + SOFT_TOUCH wf_extension_min wf_extension_max +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, drift_mod, (size_drift_mod) ] + implicit none + BEGIN_DOC + ! Modulus of the drift per electron + ! + ! Dimensions : elec_num + END_DOC + + integer :: i, j + do i=1,elec_num + drift_mod(i) = sqrt( & + psi_grad_psi_inv_x(i)*psi_grad_psi_inv_x(i) + & + psi_grad_psi_inv_y(i)*psi_grad_psi_inv_y(i) + & + psi_grad_psi_inv_z(i)*psi_grad_psi_inv_z(i) ) + enddo + drift_mod_min = min(minval(drift_mod),drift_mod_min) + drift_mod_max = max(maxval(drift_mod),drift_mod_max) + SOFT_TOUCH drift_mod_min drift_mod_max + +END_PROVIDER + + diff --git a/src/SAMPLING/block.irp.f b/src/SAMPLING/block.irp.f new file mode 100644 index 0000000..9e800ad --- /dev/null +++ b/src/SAMPLING/block.irp.f @@ -0,0 +1,53 @@ +! Providers of *_block_walk +!============================== +BEGIN_SHELL [ /usr/bin/python ] +from properties import * + +t = """ + BEGIN_PROVIDER [ $T, $X_block_walk $D1 ] +&BEGIN_PROVIDER [ $T, $X_2_block_walk $D1 ] + implicit none + include '../types.F' + BEGIN_DOC +! Properties averaged over the block per walker + END_DOC + + if (qmc_method == t_VMC) then + PROVIDE E_loc_vmc_block_walk + if (calc_$X) then + $X_block_walk = $X_vmc_block_walk + $X_2_block_walk = $X_2_vmc_block_walk + endif + else if (qmc_method == t_DMC) then + PROVIDE E_loc_dmc_block_walk + if (calc_$X) then + $X_block_walk = $X_dmc_block_walk + $X_2_block_walk = $X_2_dmc_block_walk + endif + endif + +END_PROVIDER +""" + +for p in properties: + if p[2] == "": + D1 = "" + else: + D1 = ", ("+p[2][1:-1]+")" + print t.replace("$X",p[1]).replace("$T",p[0]).replace("$D1",D1) +END_SHELL + + + + +BEGIN_PROVIDER [ double precision, block_weight ] + implicit none + include '../types.F' + BEGIN_DOC +! Weight of the current block in the full average of the simulation + END_DOC + integer :: i + block_weight = 0.d0 +END_PROVIDER + + diff --git a/src/SAMPLING/brownian_step.irp.f b/src/SAMPLING/brownian_step.irp.f new file mode 100644 index 0000000..6d601aa --- /dev/null +++ b/src/SAMPLING/brownian_step.irp.f @@ -0,0 +1,195 @@ +subroutine brownian_step(p,q,accepted,delta_x) + + implicit none + include '../types.F' + + double precision,intent(out) :: p,q + logical, intent(out) :: accepted + real,intent(out) :: delta_x + + real :: xold_x(elec_num+1) + real :: xold_y(elec_num+1) + real :: xold_z(elec_num+1) + double precision :: psiold + double precision :: bold0_x(elec_num),bold_x(elec_num),bnew_x(elec_num) + double precision :: bold0_y(elec_num),bold_y(elec_num),bnew_y(elec_num) + double precision :: bold0_z(elec_num),bold_z(elec_num),bnew_z(elec_num) + double precision :: E_old + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: xold_x + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: xold_y + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: xold_z + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: bold_x + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: bold_y + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: bold_z + + integer :: i,l + + psiold = psi_value + if (psiold == 0.) then + call abrt(irp_here,'Walker is on the nodal surface.') + endif + + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(100) + do i=1,elec_num+1 + xold_x(i) = elec_coord(i,1) + xold_y(i) = elec_coord(i,2) + xold_z(i) = elec_coord(i,3) + enddo + !DIR$ VECTOR ALIGNED + bold0_x = psi_grad_psi_inv_x + bold_x = psi_grad_psi_inv_x + !DIR$ VECTOR ALIGNED + bold0_y = psi_grad_psi_inv_y + bold_y = psi_grad_psi_inv_y + !DIR$ VECTOR ALIGNED + bold0_z = psi_grad_psi_inv_z + bold_z = psi_grad_psi_inv_z + +! real :: time_step_inv +! time_step_inv = 1./time_step +! !DIR$ VECTOR ALIGNED +! do i=1,elec_num +! if (bold_x(i) > time_step_inv) then +! bold_x(i) = time_step_inv +! else if (bold_x(i) < -time_step_inv) then +! bold_x(i) = -time_step_inv +! endif +! if (bold_y(i) > time_step_inv) then +! bold_y(i) = time_step_inv +! else if (bold_y(i) < -time_step_inv) then +! bold_y(i) = -time_step_inv +! endif +! if (bold_z(i) > time_step_inv) then +! bold_z(i) = time_step_inv +! else if (bold_z(i) < -time_step_inv) then +! bold_z(i) = -time_step_inv +! endif +! enddo + + + double precision :: b2old, b2max, tmp + b2old = 0.d0 + b2max = 0.d0 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(100) + do i=1,elec_num + b2old += bold_x(i)*bold_x(i) + bold_y(i)*bold_y(i) + bold_z(i)*bold_z(i) + enddo + + real :: xdiff_x (elec_num) + real :: xdiff_y (elec_num) + real :: xdiff_z (elec_num) + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: xdiff_x + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: xdiff_y + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: xdiff_z + double precision :: gauss +! integer :: k +! k=0 + do l=1,3 + do i=1,elec_num + !k=k+1 + !double precision :: halton_gauss + !xbrown(i,l) = halton_gauss(k)*time_step_sq + xbrown(i,l) = gauss()*time_step_sq + enddo + enddo + delta_x = 0. + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(100) + do i=1,elec_num + xdiff_x(i) = bold_x(i)*time_step + xbrown(i,1) + xdiff_y(i) = bold_y(i)*time_step + xbrown(i,2) + xdiff_z(i) = bold_z(i)*time_step + xbrown(i,3) + delta_x += xdiff_x(i)*xdiff_x(i) + xdiff_y(i)*xdiff_y(i) + xdiff_z(i)*xdiff_z(i) + enddo + delta_x = sqrt(delta_x) + + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(100) + do i=1,elec_num + elec_coord(i,1) = xold_x(i) + xdiff_x(i) + elec_coord(i,2) = xold_y(i) + xdiff_y(i) + elec_coord(i,3) = xold_z(i) + xdiff_z(i) + enddo + + TOUCH elec_coord + + !DIR$ VECTOR ALIGNED + bnew_x = psi_grad_psi_inv_x + !DIR$ VECTOR ALIGNED + bnew_y = psi_grad_psi_inv_y + !DIR$ VECTOR ALIGNED + bnew_z = psi_grad_psi_inv_z + +! !DIR$ VECTOR ALIGNED +! do i=1,elec_num +! if (bnew_x(i) > time_step_inv) then +! bnew_x(i) = time_step_inv +! else if (bnew_x(i) < -time_step_inv) then +! bnew_x(i) = -time_step_inv +! endif +! if (bnew_y(i) > time_step_inv) then +! bnew_y(i) = time_step_inv +! else if (bnew_y(i) < -time_step_inv) then +! bnew_y(i) = -time_step_inv +! endif +! if (bnew_z(i) > time_step_inv) then +! bnew_z(i) = time_step_inv +! else if (bnew_z(i) < -time_step_inv) then +! bnew_z(i) = -time_step_inv +! endif +! enddo + double precision :: ratio + + ratio = psi_value/psiold + ratio *= ratio + + double precision :: b2new + b2new = 0.d0 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(100) + do i=1,elec_num + b2new += bnew_x(i)*bnew_x(i) + bnew_y(i)*bnew_y(i) + bnew_z(i)*bnew_z(i) + enddo + + double precision :: prod + prod = 0.d0 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(100) + do i=1,elec_num + prod += ( bnew_x(i)+bold_x(i) )*xdiff_x(i) & + + ( bnew_y(i)+bold_y(i) )*xdiff_y(i) & + + ( bnew_z(i)+bold_z(i) )*xdiff_z(i) + enddo + + double precision :: argexpo + argexpo = 0.5d0*(b2new-b2old)*time_step+prod + + p = min (1.d0,ratio*exp(-argexpo)) + q = 1.d0 - p + + double precision :: qmc_ranf + accepted = p > qmc_ranf() + if (accepted) then + accepted_num += 1. + else + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(100) + do i=1,elec_num + elec_coord(i,1) = xold_x(i) + elec_coord(i,2) = xold_y(i) + elec_coord(i,3) = xold_z(i) + enddo + !DIR$ VECTOR ALIGNED + psi_grad_psi_inv_x = bold0_x + !DIR$ VECTOR ALIGNED + psi_grad_psi_inv_y = bold0_y + !DIR$ VECTOR ALIGNED + psi_grad_psi_inv_z = bold0_z + psi_value = psiold + rejected_num += 1. + SOFT_TOUCH elec_coord psi_grad_psi_inv_x psi_grad_psi_inv_y psi_grad_psi_inv_z psi_value + endif + +end diff --git a/src/SAMPLING/dmc_step.irp.f b/src/SAMPLING/dmc_step.irp.f new file mode 100644 index 0000000..769a91d --- /dev/null +++ b/src/SAMPLING/dmc_step.irp.f @@ -0,0 +1,268 @@ +! Providers of *_dmc_block_walk +!============================== +BEGIN_SHELL [ /usr/bin/python ] +from properties import * + +t = """ + BEGIN_PROVIDER [ $T, $X_dmc_block_walk $D1 ] +&BEGIN_PROVIDER [ $T, $X_dmc_block_walk_kahan $D2 ] +&BEGIN_PROVIDER [ $T, $X_2_dmc_block_walk $D1 ] +&BEGIN_PROVIDER [ $T, $X_2_dmc_block_walk_kahan $D2 ] + implicit none + BEGIN_DOC +! VMC averages of $X + END_DOC + $X_dmc_block_walk = 0.d0 + $X_dmc_block_walk_kahan = 0.d0 + $X_2_dmc_block_walk = 0.d0 + $X_2_dmc_block_walk_kahan = 0.d0 +END_PROVIDER +""" +for p in properties: + if p[1] != 'e_loc': + if p[2] == "": + D1 = "" + D2 = ", (3)" + else: + D1 = ", ("+p[2][1:-1]+")" + D2 = ", ("+p[2][1:-1]+",3)" + print t.replace("$X",p[1]).replace("$T",p[0]).replace("$D1",D1).replace("$D2",D2) +END_SHELL + + + + BEGIN_PROVIDER [ double precision, E_loc_dmc_block_walk ] +&BEGIN_PROVIDER [ double precision, E_loc_2_dmc_block_walk ] +&BEGIN_PROVIDER [ double precision, E_loc_dmc_block_walk_kahan, (3) ] +&BEGIN_PROVIDER [ double precision, E_loc_2_dmc_block_walk_kahan, (3) + implicit none + include '../types.F' + BEGIN_DOC +! Properties averaged over the block using the DMC method + END_DOC + + real, allocatable :: elec_coord_tmp(:,:,:) + integer :: mod_align + double precision, allocatable :: psi_grad_psi_inv_save_tmp(:,:,:) + double precision :: psi_value_save_tmp(walk_num) + integer :: trapped_walk_tmp(walk_num) + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: psi_grad_psi_inv_save_tmp + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: psi_value_save_tmp + allocate ( elec_coord_tmp(mod_align(elec_num+1),3,walk_num) ) + allocate ( psi_grad_psi_inv_save_tmp(elec_num_8,3,walk_num) ) + +! Initialization + if (vmc_algo /= t_Brownian) then + call abrt(irp_here,'DMC should run with Brownian algorithm') + endif + PROVIDE E_loc_vmc_block_walk + + integer :: k, i_walk, i_step + +BEGIN_SHELL [ /usr/bin/python ] +from properties import * +t = """ + if (calc_$X) then + $X_dmc_block_walk = 0.d0 + $X_dmc_block_walk_kahan = 0.d0 + $X_2_dmc_block_walk = 0.d0 + $X_2_dmc_block_walk_kahan = 0.d0 + $X_min = huge(1.) + $X_max =-huge(1.) + endif +""" +for p in properties: + print t.replace("$X",p[1]) +END_SHELL + + double precision :: icount + + icount = 0.d0 + + logical :: loop + integer*8 :: cpu0, cpu1, cpu2, count_rate, count_max + + loop = .True. + call system_clock(cpu0, count_rate, count_max) + cpu2 = cpu0 + do while (loop) + dmc_projection_step = mod(dmc_projection_step+1,dmc_projection)+1 + + pop_weight_mult *= 1.d0/pop_weight(dmc_projection_step) + pop_weight(dmc_projection_step) = 0.d0 + do k=1,walk_num + pop_weight(dmc_projection_step) += dmc_weight(k) + enddo + + do k=1,walk_num + dmc_weight(k) = dmc_weight(k)/pop_weight(dmc_projection_step) + enddo + + pop_weight(dmc_projection_step) = pop_weight(dmc_projection_step)/dble(walk_num) + pop_weight_mult *= pop_weight(dmc_projection_step) + +BEGIN_SHELL [ /usr/bin/python ] +from properties import * +t = """ + if (calc_$X) then +! Kahan's summation algorithm to compute these sums reducing the rounding error: +! $X_dmc_block_walk($D) += $X * pop_weight_mult +! $X_2_dmc_block_walk($D) += $X_2 * pop_weight_mult +! see http://en.wikipedia.org/wiki/Kahan_summation_algorithm + + $X_dmc_block_walk_kahan($D2 3) = $X * pop_weight_mult - $X_dmc_block_walk_kahan($D2 1) + $X_dmc_block_walk_kahan($D2 2) = $X_dmc_block_walk $D1 + $X_dmc_block_walk_kahan($D2 3) + $X_dmc_block_walk_kahan($D2 1) = ($X_dmc_block_walk_kahan($D2 2) - $X_dmc_block_walk $D1 ) & + - $X_dmc_block_walk_kahan($D2 3) + $X_dmc_block_walk $D1 = $X_dmc_block_walk_kahan($D2 2) + + + $X_2_dmc_block_walk_kahan($D2 3) = $X_2 * pop_weight_mult - $X_2_dmc_block_walk_kahan($D2 1) + $X_2_dmc_block_walk_kahan($D2 2) = $X_2_dmc_block_walk $D1 + $X_2_dmc_block_walk_kahan($D2 3) + $X_2_dmc_block_walk_kahan($D2 1) = ($X_2_dmc_block_walk_kahan($D2 2) - $X_2_dmc_block_walk $D1 ) & + - $X_2_dmc_block_walk_kahan($D2 3) + $X_2_dmc_block_walk $D1 = $X_2_dmc_block_walk_kahan($D2 2) + endif +""" +for p in properties: + if p[2] == "": + D1 = "" + D2 = "" + else: + D1 = "("+":"*(p[2].count(',')+1)+")" + D2 = ":"*(p[2].count(',')+1)+"," + print t.replace("$X",p[1]).replace("$D1",D1).replace("$D2",D2) +END_SHELL + icount += pop_weight_mult + +! Reconfiguration + + integer :: ipos(walk_num) + call reconfigure(ipos,dmc_weight) + + do k=1,walk_num + integer :: i, l + do l=1,3 + do i=1,elec_num+1 + elec_coord_tmp(i,l,k) = elec_coord_full(i,l,k) + enddo + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(200) + do i=1,elec_num + psi_grad_psi_inv_save_tmp(i,l,k) = psi_grad_psi_inv_save(i,l,k) + enddo + enddo + psi_value_save_tmp(k) = psi_value_save(k) + trapped_walk_tmp(k) = trapped_walk(k) + enddo + + integer :: ipm + do k=1,walk_num + ipm = ipos(k) + do l=1,3 + do i=1,elec_num+1 + elec_coord_full(i,l,k) = elec_coord_tmp(i,l,ipm) + enddo + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(200) + do i=1,elec_num + psi_grad_psi_inv_save(i,l,k) = psi_grad_psi_inv_save_tmp(i,l,ipm) + enddo + enddo + psi_value_save(k) = psi_value_save_tmp(ipm) + trapped_walk(k) = trapped_walk_tmp(ipm) + enddo + + ! Set 1st walker + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(200) + do i=1,elec_num + psi_grad_psi_inv_x(i) = psi_grad_psi_inv_save(i,1,1) + psi_grad_psi_inv_y(i) = psi_grad_psi_inv_save(i,2,1) + psi_grad_psi_inv_z(i) = psi_grad_psi_inv_save(i,3,1) + enddo + + !DIR$ VECTOR UNALIGNED + !DIR$ LOOP COUNT(200) + do i=1,elec_num + elec_coord(i,1) = elec_coord_full(i,1,1) + elec_coord(i,2) = elec_coord_full(i,2,1) + elec_coord(i,3) = elec_coord_full(i,3,1) + enddo + psi_value = psi_value_save(1) + + TOUCH elec_coord_full psi_value_save psi_grad_psi_inv_save psi_value psi_grad_psi_inv_x psi_grad_psi_inv_y psi_grad_psi_inv_z elec_coord + + call system_clock(cpu1, count_rate, count_max) + if (cpu1 < cpu0) then + cpu1 = cpu1+cpu0 + endif + loop = dble(cpu1-cpu0) < dble(block_time)*dble(count_rate) + if (cpu1-cpu2 > count_rate) then + integer :: do_run + call get_running(do_run) + loop = do_run == t_Running + cpu2 = cpu1 + endif + + enddo + + + + double precision :: factor + factor = 1.d0/icount + block_weight *= icount + SOFT_TOUCH block_weight +BEGIN_SHELL [ /usr/bin/python ] +from properties import * +t = """ + if (calc_$X) then + $X_dmc_block_walk *= factor + $X_2_dmc_block_walk *= factor + endif +""" +for p in properties: + print t.replace("$X",p[1]) +END_SHELL + + deallocate ( elec_coord_tmp ) + deallocate ( psi_grad_psi_inv_save_tmp ) + +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, E_ref ] + implicit none + BEGIN_DOC +! Weight of the DMC population + END_DOC + E_ref = 0.d0 + call get_simulation_E_ref(E_ref) +END_PROVIDER + +BEGIN_PROVIDER [ double precision, pop_weight_mult ] + implicit none + BEGIN_DOC +! Population weight of DMC + END_DOC + pop_weight_mult = 1.d0 +END_PROVIDER + + BEGIN_PROVIDER [ integer, dmc_projection ] +&BEGIN_PROVIDER [ integer, dmc_projection_step ] + implicit none + BEGIN_DOC +! Number of projection steps for SRMC + END_DOC + dmc_projection = int( 10.d0/time_step) + dmc_projection_step = 0 +END_PROVIDER + +BEGIN_PROVIDER [ double precision, pop_weight, (dmc_projection) ] + implicit none + BEGIN_DOC +! Population weight of DMC + END_DOC + pop_weight = 1.d0 +END_PROVIDER + diff --git a/src/SAMPLING/langevin_step.irp.f b/src/SAMPLING/langevin_step.irp.f new file mode 100644 index 0000000..082e1e8 --- /dev/null +++ b/src/SAMPLING/langevin_step.irp.f @@ -0,0 +1,314 @@ +BEGIN_PROVIDER [ double precision, elec_mass ] + implicit none + BEGIN_DOC +! Electron mass in the Langevin algorithm + END_DOC + + integer :: i + elec_mass=0.d0 + do i=1,nucl_num + elec_mass=max(dble(nucl_charge(i)),dble(elec_mass)) + enddo + elec_mass=elec_mass**1.5 +END_PROVIDER + +BEGIN_PROVIDER [ real, elec_impuls_full, (elec_num,4,walk_num)] + implicit none + BEGIN_DOC +! Impulsions used in the Langevin algorithm + END_DOC + integer :: i,k,l + double precision :: temp(elec_num,3) + + do k=1,walk_num + call gauss_array(elec_num*3,temp) + do l=1,3 + do i=1,elec_num + elec_impuls_full(i,l,k) = time_step*temp(i,l) + enddo + enddo + enddo + +END_PROVIDER + +BEGIN_PROVIDER [ real, elec_impuls, (elec_num_8,3)] + implicit none + BEGIN_DOC +! Impulsions used in the Langevin algorithm for the current walker + END_DOC + integer :: i,l + do l=1,3 + do i=1,elec_num + elec_impuls(i,l) = elec_impuls_full(i,l,walk_i) + enddo + enddo +END_PROVIDER + +BEGIN_PROVIDER [ double precision, langevin_sigma, (2) ] +&BEGIN_PROVIDER [ double precision, langevin_c12 ] +&BEGIN_PROVIDER [ double precision, langevin_coef, (4) ] + implicit none + BEGIN_DOC +! Quantities needed for the Langevin algorithm. + END_DOC + langevin_sigma(1) = sqrt (time_step/elec_mass * & + (2.d0- (3.d0-4.d0*time_step_exp + time_step_exp**2)/time_step) ) + langevin_sigma(2) = sqrt (elec_mass * (1.d0-time_step_exp**2)) + langevin_c12 = (1.d0-time_step_exp)**2/(langevin_sigma(1)*langevin_sigma(2)) + langevin_coef(1) = 1.d0/elec_mass*time_step*time_step_exp_sq + langevin_coef(2) = time_step_exp_sq_sq*time_step**2/elec_mass + langevin_coef(3) = langevin_sigma(2)*sqrt(1.d0-langevin_c12**2) + langevin_coef(4) = langevin_c12*langevin_sigma(2)/langevin_sigma(1) +END_PROVIDER + + + +subroutine langevin_step(p,q,accepted,delta_x) + + implicit none + + double precision, intent(out) :: p,q + logical, intent(out) :: accepted + real, intent(out) :: delta_x + + real :: xold(elec_num_8,3) + real :: pold(elec_num_8,3) + double precision :: psiold + double precision :: bold(elec_num_8,3) + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: xold, bold, pold + + integer :: i,l + + psiold = psi_value + if (psiold == 0.d0) then + call abrt(irp_here,'Walker is on the nodal surface.') + endif + + do l=1,3 + !DIR$ LOOP COUNT (200) + !DIR$ VECTOR ALIGNED + do i=1,elec_num + xold(i,l) = elec_coord(i,l) + pold(i,l) = elec_impuls(i,l) + enddo + enddo + !DIR$ LOOP COUNT (200) + !DIR$ VECTOR ALIGNED + do i=1,elec_num + bold(i,1) = psi_grad_psi_inv_x(i) + bold(i,2) = psi_grad_psi_inv_y(i) + bold(i,3) = psi_grad_psi_inv_z(i) + enddo + +! First move +! W1 + call gauss_array(size(xbrown),xbrown) + do l=1,3 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (200) + do i=1,elec_num + xbrown(i,l) = xbrown(i,l)*langevin_sigma(1) + enddo + enddo + + real :: xdiff (elec_num_8,3) +! q(n+1) + delta_x = 0. + do l=1,3 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (200) + do i=1,elec_num + xdiff(i,l) = pold(i,l)*langevin_coef(1) & + + bold(i,l)*langevin_coef(2) & + + xbrown(i,l) + delta_x += xdiff(i,l)*xdiff(i,l) + enddo + enddo + delta_x = sqrt(delta_x) + + do l=1,3 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (200) + do i=1,elec_num + elec_coord(i,l) = elec_coord(i,l) + xdiff(i,l) + enddo + enddo + + TOUCH elec_coord + +! Second move +! W2 + double precision :: gauss + do l=1,3 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (200) + do i=1,elec_num + xbrown(i,l) = langevin_coef(3)*gauss() + & + langevin_coef(4)*xbrown(i,l) + enddo + enddo + +! p(n+1) + !DIR$ LOOP COUNT (200) + do i=1,elec_num + elec_impuls(i,1) = time_step_exp*pold(i,1) & + + time_step*(bold(i,1)+psi_grad_psi_inv_x(i))*time_step_exp_sq & + + xbrown(i,1) + elec_impuls(i,2) = time_step_exp*pold(i,2) & + + time_step*(bold(i,2)+psi_grad_psi_inv_y(i))*time_step_exp_sq & + + xbrown(i,2) + elec_impuls(i,3) = time_step_exp*pold(i,3) & + + time_step*(bold(i,3)+psi_grad_psi_inv_z(i))*time_step_exp_sq & + + xbrown(i,3) + enddo +! TOUCH elec_impuls +! (touch moved after acceptation) + + p = 1.d0 + + double precision :: ratio + ratio = (psi_value/psiold)**2 + + double precision :: dt_dm + dt_dm = time_step/elec_mass + !DIR$ LOOP COUNT (200) + do i=1,elec_num + + double precision :: d11 + double precision :: d12 + double precision :: temp + double precision :: d21 + double precision :: d22 + double precision :: re + + d11 = -xdiff(i,1) & + + dt_dm*elec_impuls(i,1)*time_step_exp_sq & + - time_step*dt_dm*psi_grad_psi_inv_x(i)*time_step_exp_sq_sq + + d12 = xdiff(i,1) & + - dt_dm*pold(i,1)*time_step_exp_sq & + - time_step*dt_dm*bold(i,1)*time_step_exp_sq_sq + + temp = time_step*(psi_grad_psi_inv_x(i)+bold(i,1))*time_step_exp_sq + + d21 = -pold(i,1)+elec_impuls(i,1)*time_step_exp - temp + + + d22 = elec_impuls(i,1)-pold(i,1)*time_step_exp -temp + + re = (d11/langevin_sigma(1))**2+(d21/langevin_sigma(2))**2 & + - 2.d0*langevin_c12*d11*d21/(langevin_sigma(1)*langevin_sigma(2)) + + re -= (d12/langevin_sigma(1))**2+(d22/langevin_sigma(2))**2 & + - 2.d0*langevin_c12*d12*d22/(langevin_sigma(1)*langevin_sigma(2)) + + re = re / (2.d0*(1.d0-langevin_c12**2)) + + if (re < 35.d0) then + p=p*exp(-re) + else + p = 0.d0 + endif + + + d11 = -xdiff(i,2) & + + dt_dm*elec_impuls(i,2)*time_step_exp_sq & + - time_step*dt_dm*psi_grad_psi_inv_y(i)*time_step_exp_sq_sq + + d12 = xdiff(i,2) & + - dt_dm*pold(i,2)*time_step_exp_sq & + - time_step*dt_dm*bold(i,2)*time_step_exp_sq_sq + + temp = time_step*(psi_grad_psi_inv_y(i)+bold(i,2))*time_step_exp_sq + + d21 = -pold(i,2)+elec_impuls(i,2)*time_step_exp - temp + + + d22 = elec_impuls(i,2)-pold(i,2)*time_step_exp -temp + + re = (d11/langevin_sigma(1))**2+(d21/langevin_sigma(2))**2 & + - 2.d0*langevin_c12*d11*d21/(langevin_sigma(1)*langevin_sigma(2)) + + re -= (d12/langevin_sigma(1))**2+(d22/langevin_sigma(2))**2 & + - 2.d0*langevin_c12*d12*d22/(langevin_sigma(1)*langevin_sigma(2)) + + re = re / (2.d0*(1.d0-langevin_c12**2)) + + if (re < 35.d0) then + p=p*exp(-re) + else + p = 0.d0 + endif + + d11 = -xdiff(i,3) & + + dt_dm*elec_impuls(i,3)*time_step_exp_sq & + - time_step*dt_dm*psi_grad_psi_inv_z(i)*time_step_exp_sq_sq + + d12 = xdiff(i,3) & + - dt_dm*pold(i,3)*time_step_exp_sq & + - time_step*dt_dm*bold(i,3)*time_step_exp_sq_sq + + temp = time_step*(psi_grad_psi_inv_z(i)+bold(i,3))*time_step_exp_sq + + d21 = -pold(i,3)+elec_impuls(i,3)*time_step_exp - temp + + + d22 = elec_impuls(i,3)-pold(i,3)*time_step_exp -temp + + re = (d11/langevin_sigma(1))**2+(d21/langevin_sigma(2))**2 & + - 2.d0*langevin_c12*d11*d21/(langevin_sigma(1)*langevin_sigma(2)) + + re -= (d12/langevin_sigma(1))**2+(d22/langevin_sigma(2))**2 & + - 2.d0*langevin_c12*d12*d22/(langevin_sigma(1)*langevin_sigma(2)) + + re = re / (2.d0*(1.d0-langevin_c12**2)) + + if (re < 35.d0) then + p=p*exp(-re) + else + p = 0.d0 + endif + + + double precision :: sumup, sumdn + sumup = elec_impuls(i,1)*elec_impuls(i,1) & + + elec_impuls(i,2)*elec_impuls(i,2) & + + elec_impuls(i,3)*elec_impuls(i,3) + sumdn = pold(i,1)*pold(i,1) + pold(i,2)*pold(i,2) + pold(i,3)*pold(i,3) + re = exp(-(sumup-sumdn)/(2.d0*elec_mass)) + p = p*re + + enddo + + + p = min (1.d0,ratio*p) + q = 1.d0 - p + + double precision :: qmc_ranf + accepted = p > qmc_ranf() + if (accepted) then + accepted_num += 1. + SOFT_TOUCH elec_impuls + else + do l=1,3 + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(200) + do i=1,elec_num + elec_coord(i,l) = xold(i,l) + elec_impuls(i,l) = -pold(i,l) + enddo + enddo + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(200) + do i=1,elec_num + psi_grad_psi_inv_x(i) = bold(i,1) + psi_grad_psi_inv_y(i) = bold(i,2) + psi_grad_psi_inv_z(i) = bold(i,3) + enddo + psi_value = psiold + rejected_num += 1. + SOFT_TOUCH elec_coord psi_grad_psi_inv_x psi_grad_psi_inv_y psi_grad_psi_inv_z psi_value + endif + +end + diff --git a/src/SAMPLING/reconfigure.irp.f b/src/SAMPLING/reconfigure.irp.f new file mode 100644 index 0000000..c4e9002 --- /dev/null +++ b/src/SAMPLING/reconfigure.irp.f @@ -0,0 +1,84 @@ +subroutine reconfigure(ipos,w) + implicit none + integer, intent(inout) :: ipos(*) + double precision, intent(in) :: w(*) + + integer :: kp, km + double precision :: accup, accum + integer :: k + + double precision :: dwalk_num + dwalk_num = dble(walk_num) + + integer :: kptab(walk_num), kmtab(walk_num) + double precision :: wp(walk_num), wm(walk_num) + double precision :: tmp + + do k=1,walk_num + ipos(k) = k + enddo + + kp=0 + km=0 + accup = 0.d0 + accum = 0.d0 + do k=1,walk_num + tmp = dwalk_num*w(k)-1.d0 + if (tmp >= 0.d0) then + kp += 1 + wp(kp) = abs(tmp) + accup += wp(kp) + kptab(kp) = k + else + km += 1 + wm(km) = abs(tmp) + accum += wm(km) + kmtab(km) = k + endif + enddo + if(kp+km /= walk_num) then + print *, kp, km + call abrt(irp_here,'pb in reconfiguration +/-') + endif + if(abs(accup-accum).gt.1.d-11) then + print *, accup, accum + call abrt(irp_here,'pb in reconfiguration') + endif + + double precision :: qmc_ranf, rand + double precision :: rando(walk_num) + rand = qmc_ranf() + do k=1,walk_num + rando(k) = dble(k-1)+rand + enddo + + double precision :: averageconf, current + integer :: kcp + integer :: kadd, kremove + + averageconf = accup + kcp = 1 + rand = rando(kcp) + do while (rand < averageconf) + k=1 + current=wm(k) + do while (rand > current) + k += 1 + current += wm(k) + enddo + kremove = kmtab(k) + + k=1 + current=wp(k) + do while (rand > current) + k += 1 + current += wp(k) + enddo + kadd = kptab(k) + ipos(kremove) = kadd + kcp += 1 + rand = rando(kcp) + enddo + +end + diff --git a/src/SAMPLING/vmc_step.irp.f b/src/SAMPLING/vmc_step.irp.f new file mode 100644 index 0000000..0cac8d2 --- /dev/null +++ b/src/SAMPLING/vmc_step.irp.f @@ -0,0 +1,269 @@ +! Providers of *_vmc_block_walk +!============================== +BEGIN_SHELL [ /usr/bin/python ] +from properties import * + +t = """ + BEGIN_PROVIDER [ $T, $X_vmc_block_walk $D1 ] +&BEGIN_PROVIDER [ $T, $X_vmc_block_walk_kahan $D2 ] +&BEGIN_PROVIDER [ $T, $X_2_vmc_block_walk $D1 ] +&BEGIN_PROVIDER [ $T, $X_2_vmc_block_walk_kahan $D2 ] + implicit none + BEGIN_DOC +! VMC averages of $X + END_DOC +END_PROVIDER +""" +for p in properties: + if p[1] != 'e_loc': + if p[2] == "": + D1 = "" + D2 = ", (3)" + else: + D1 = ", ("+p[2][1:-1]+")" + D2 = ", ("+p[2][1:-1]+",3)" + print t.replace("$X",p[1]).replace("$T",p[0]).replace("$D1",D1).replace("$D2",D2) +END_SHELL + + BEGIN_PROVIDER [ double precision, E_loc_vmc_block_walk ] +&BEGIN_PROVIDER [ double precision, E_loc_2_vmc_block_walk ] +&BEGIN_PROVIDER [ double precision, E_loc_vmc_block_walk_kahan, (3) ] +&BEGIN_PROVIDER [ double precision, E_loc_2_vmc_block_walk_kahan, (3) ] + implicit none + include '../types.F' + BEGIN_DOC +! Properties averaged over the block per walker using the VMC method + END_DOC + + integer :: i_walk + + PROVIDE time_step + + +BEGIN_SHELL [ /usr/bin/python ] +from properties import * +t = """ + if (calc_$X) then + !DIR$ VECTOR ALIGNED + $X_vmc_block_walk = 0.d0 + !DIR$ VECTOR ALIGNED + $X_vmc_block_walk_kahan = 0.d0 + !DIR$ VECTOR ALIGNED + $X_2_vmc_block_walk = 0.d0 + !DIR$ VECTOR ALIGNED + $X_2_vmc_block_walk_kahan = 0.d0 + $X_min = huge(1.) + $X_max =-huge(1.) + endif +""" +for p in properties: + print t.replace("$X",p[1]) +END_SHELL + + double precision :: dnorm + !DIR$ VECTOR ALIGNED + block_weight = 0.d0 + do i_walk=1,walk_num + integer :: i,j,l + if (i_walk > 1) then + do l=1,3 + do i=1,elec_num+1 + elec_coord(i,l) = elec_coord_full(i,l,i_walk) + enddo + enddo + endif + + PROVIDE psi_grad_psi_inv_save psi_value_save + + if (psi_value_save(walk_num) /= 0.) then + psi_value = psi_value_save(i_walk) + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT(200) + do i=1,elec_num + psi_grad_psi_inv_x(i) = psi_grad_psi_inv_save(i,1,i_walk) + psi_grad_psi_inv_y(i) = psi_grad_psi_inv_save(i,2,i_walk) + psi_grad_psi_inv_z(i) = psi_grad_psi_inv_save(i,3,i_walk) + enddo + TOUCH psi_value psi_grad_psi_inv_x psi_grad_psi_inv_y psi_grad_psi_inv_z elec_coord + else + if (i_walk > 1) then + TOUCH elec_coord + endif + endif + + logical :: loop + integer*8 :: cpu0, cpu1, cpu2, count_rate, count_max + loop = .True. + + call system_clock(cpu0, count_rate, count_max) + cpu2 = cpu0 + do while (loop) + double precision :: p,q + real :: delta_x + logical :: accepted + double precision :: E_old + if (vmc_algo == t_Brownian) then + call brownian_step(p,q,accepted,delta_x) + else if (vmc_algo == t_Langevin) then + call langevin_step(p,q,accepted,delta_x) + endif + elec_coord(elec_num+1,1) += p*time_step + elec_coord(elec_num+1,2) = E_loc + elec_coord(elec_num+1,3) += p*time_step + if (accepted) then + trapped_walk(i_walk) = 0 + else + trapped_walk(i_walk) += 1 + endif + + block_weight += 1.d0 + +BEGIN_SHELL [ /usr/bin/python ] +from properties import * +t = """ + if (calc_$X) then + ! Kahan's summation algorithm to compute these sums reducing the rounding error: + ! $X_vmc_block_walk $D1 += $X + ! $X_2_vmc_block_walk $D1 += $X_2 + ! see http://en.wikipedia.org/wiki/Kahan_summation_algorithm + + $X_vmc_block_walk_kahan($D2 3) = $X - $X_vmc_block_walk_kahan($D2 1) + $X_vmc_block_walk_kahan($D2 2) = $X_vmc_block_walk $D1 + $X_vmc_block_walk_kahan($D2 3) + $X_vmc_block_walk_kahan($D2 1) = ($X_vmc_block_walk_kahan($D2 2) - $X_vmc_block_walk $D1 ) & + - $X_vmc_block_walk_kahan($D2 3) + $X_vmc_block_walk $D1 = $X_vmc_block_walk_kahan($D2 2) + + + $X_2_vmc_block_walk_kahan($D2 3) = $X_2 - $X_2_vmc_block_walk_kahan($D2 1) + $X_2_vmc_block_walk_kahan($D2 2) = $X_2_vmc_block_walk $D1 + $X_2_vmc_block_walk_kahan($D2 3) + $X_2_vmc_block_walk_kahan($D2 1) = ($X_2_vmc_block_walk_kahan($D2 2) - $X_2_vmc_block_walk $D1 ) & + - $X_2_vmc_block_walk_kahan($D2 3) + $X_2_vmc_block_walk $D1 = $X_2_vmc_block_walk_kahan($D2 2) + endif +""" +for p in properties: + if p[2] == "": + D1 = "" + D2 = "" + else: + D1 = "("+":"*(p[2].count(',')+1)+")" + D2 = ":"*(p[2].count(',')+1)+"," + print t.replace("$X",p[1]).replace("$D1",D1).replace("$D2",D2) + +END_SHELL + + + if ( qmc_method == t_VMC ) then + call system_clock(cpu1, count_rate, count_max) + if (cpu1 < cpu0) then + cpu1 = cpu1+cpu0 + endif + loop = dble(cpu1-cpu0)*dble(walk_num) < dble(block_time)*dble(count_rate) + if (cpu1-cpu2 > count_rate) then + integer :: do_run + call get_running(do_run) + loop = do_run == t_Running + cpu2 = cpu1 + endif + else + loop = .False. + endif + + enddo ! while (loop) + + do l=1,3 + do i=1,elec_num+1 + elec_coord_full(i,l,i_walk) = elec_coord(i,l) + enddo + enddo + + if (qmc_method == t_DMC) then + psi_value_save(i_walk) = psi_value + !DIR$ VECTOR ALIGNED + !DIR$ LOOP COUNT (200) + do i=1,elec_num + psi_grad_psi_inv_save(i,1,i_walk) = psi_grad_psi_inv_x(i) + psi_grad_psi_inv_save(i,2,i_walk) = psi_grad_psi_inv_y(i) + psi_grad_psi_inv_save(i,3,i_walk) = psi_grad_psi_inv_z(i) + enddo + +! if ( (trapped_walk(i_walk) < trapped_walk_max).and. & +! (psi_value * psi_value_save(i_walk) > 0.d0).and. & +! (dabs(E_ref-E_loc)*time_step_sq < -.2d0*E_ref) ) then + if ( (trapped_walk(i_walk) < trapped_walk_max).and. & + (psi_value * psi_value_save(i_walk) > 0.d0) ) then + dmc_weight(i_walk) = exp(time_step*(E_ref - E_loc)) + + else + dmc_weight(i_walk) = 0.d0 + trapped_walk(i_walk) = 0 + endif + endif + + enddo + + + double precision :: factor + factor = 1.d0/block_weight + SOFT_TOUCH block_weight +BEGIN_SHELL [ /usr/bin/python ] +from properties import * +t = """ + if (calc_$X) then + $X_vmc_block_walk *= factor + $X_2_vmc_block_walk *= factor + endif +""" +for p in properties: + print t.replace("$X",p[1]) +END_SHELL + + SOFT_TOUCH elec_coord_full + +END_PROVIDER + + +BEGIN_PROVIDER [ double precision, dmc_weight, (walk_num_8) ] + implicit none + BEGIN_DOC +! Weight of the walkers in the DMC algorithm: exp(-time_step*(E_loc-E_ref)) + END_DOC + !DIR$ VECTOR ALIGNED + dmc_weight = 1.d0 +END_PROVIDER + + + + +BEGIN_PROVIDER [ double precision, psi_grad_psi_inv_save, (elec_num_8,3,walk_num) ] +&BEGIN_PROVIDER [ double precision, psi_value_save, (walk_num_8) ] + implicit none + BEGIN_DOC +! psi_grad_psi_inv of the previous step to accelerate DMC +! +! updated in vmc_step + END_DOC + integer, save :: ifirst = 0 + if (ifirst == 0) then + psi_grad_psi_inv_save = 0.d0 + psi_value_save = 0.d0 + ifirst = 1 + endif +END_PROVIDER + +BEGIN_PROVIDER [ integer, trapped_walk, (walk_num_8) ] + implicit none + BEGIN_DOC +! Number of steps when the walkers were trapped + END_DOC + trapped_walk = 0 +END_PROVIDER + +BEGIN_PROVIDER [ integer, trapped_walk_max ] + implicit none + BEGIN_DOC +! Max number of trapped MC steps before killing walker + END_DOC + trapped_walk_max = 5 +END_PROVIDER + diff --git a/src/TOOLS/Util.irp.f b/src/TOOLS/Util.irp.f new file mode 100644 index 0000000..53c780c --- /dev/null +++ b/src/TOOLS/Util.irp.f @@ -0,0 +1,289 @@ +BEGIN_PROVIDER [ character*(8), current_PID ] + implicit none + BEGIN_DOC + ! Process ID + END_DOC + integer :: getpid + write(current_PID,'(I8)') getpid() + current_PID = adjustl(trim(current_PID)) + +END_PROVIDER + + + BEGIN_PROVIDER [ integer, simd_sp ] +&BEGIN_PROVIDER [ integer, simd_dp ] + implicit none + BEGIN_DOC +! Number of array elements for vectorization + END_DOC + simd_sp = max(1,$IRP_ALIGN / 4) + simd_dp = max(1,$IRP_ALIGN / 8) +END_PROVIDER + +integer function mod_align(n) + implicit none + integer, intent(in) :: n + + if (mod(n,simd_sp) /= 0) then + mod_align = n + simd_sp - mod(n,simd_sp) + else + mod_align = n + endif +end function + +real function fact2(n) + implicit none + integer :: n + real :: dblefact_even + real :: dblefact_odd + if (mod(n,2) == 0) then + fact2 = dblefact_even(n) + else + fact2 = dblefact_odd(n) + endif +end + + +real function dblefact_even(n) + implicit none + integer :: n + real, save :: memo(1:100) + integer, save :: memomax = 2 + integer :: i + if (n<=memomax) then + if (n<2) then + dblefact_even = 1. + else + dblefact_even = memo(n) + endif + return + endif + + memo(2) = 2. + do i=memomax+2,min(n,100),2 + memo(i) = memo(i-2)* float(i) + enddo + memomax = min(n,100) + dblefact_even = memo(memomax) + + do i=102,n,2 + dblefact_even = dblefact_even*float(i) + enddo + +end + + +real function dblefact_odd(n) + implicit none + integer :: n + real, save :: memo(1:100) + integer, save :: memomax = 1 + integer :: i + + if (n<=memomax) then + if (n<3) then + dblefact_odd = 1. + else + dblefact_odd = memo(n) + endif + return + endif + + memo(1) = 1. + do i=memomax+2,min(n,99),2 + memo(i) = memo(i-2)* float(i) + enddo + memomax = min(n,99) + dblefact_odd = memo(memomax) + + do i=101,n,2 + dblefact_odd = dblefact_odd*float(i) + enddo + +end + + + +real function fact(n) + implicit none + integer :: n + real , save :: memo(1:100) + integer, save :: memomax = 1 + + if (n<=memomax) then + if (n<2) then + fact = 1. + else + fact = memo(n) + endif + return + endif + + integer :: i + memo(1) = 1. + do i=memomax+1,min(n,100) + memo(i) = memo(i-1)*float(i) + enddo + memomax = min(n,100) + fact = memo(memomax) + do i=101,n + fact = fact*float(i) + enddo +end function + + +real function rintgauss(n) + implicit none + include '../constants.F' + + integer :: n + + rintgauss = sqrt(pi) + if ( n == 0 ) then + return + else if ( n == 1 ) then + rintgauss = 0. + else if ( mod(n,2) == 1) then + rintgauss = 0. + else + real :: fact2 + rintgauss = rintgauss/(2.**(n/2)) + rintgauss = rintgauss * fact2(n-1) + endif +end function + +real function goverlap(gamA,gamB,nA) + implicit none + + real :: gamA, gamB + integer :: nA(3) + + real :: gamtot + gamtot = gamA+gamB + + goverlap=1.0 + + integer :: l + real :: rintgauss + do l=1,3 + goverlap *= rintgauss(nA(l)+nA(l))/ (gamtot**(0.5+float(nA(l)))) + enddo + +end function + +double precision function binom(n,k) + implicit none + integer, intent(in) :: k,n + real :: fact + binom=fact(n)/(fact(k)*fact(n-k)) +end + +!DIR$ attributes forceinline :: transpose +recursive subroutine transpose(A,LDA,B,LDB,d1,d2) + implicit none + BEGIN_DOC +! Transpose input matrix A into output matrix B + END_DOC + integer, intent(in) :: d1, d2, LDA, LDB + real, intent(in) :: A(LDA,d2) + real, intent(out) :: B(LDB,d1) + + integer :: i,j,k, mod_align + if ( d2 < 32 ) then + do j=1,d1 + !DIR$ LOOP COUNT (16) + do i=1,d2 + B(i,j ) = A(j ,i) + enddo + enddo + return + else if (d1 > d2) then + !DIR$ forceinline + k=d1/2 + !DIR$ forceinline recursive + call transpose(A(1,1),LDA,B(1,1),LDB,k,d2) + !DIR$ forceinline recursive + call transpose(A(k+1,1),LDA,B(1,k+1),LDB,d1-k,d2) + return + else + !DIR$ forceinline + k=d2/2 + !DIR$ forceinline recursive + call transpose(A(1,k+1),LDA,B(k+1,1),LDB,d1,d2-k) + !DIR$ forceinline recursive + call transpose(A(1,1),LDA,B(1,1),LDB,d1,k) + return + endif + +end + +subroutine gaussian_product(a,xa,b,xb,k,p,xp) + implicit none +! e^{-a (x-x_A)^2} e^{-b (x-x_B)^2} = K_{ab}^x e^{-p (x-x_P)^2} + + real, intent(in) :: a,b ! Exponents + real, intent(in) :: xa(3),xb(3) ! Centers + real, intent(out) :: p ! New exponent + real, intent(out) :: xp(3) ! New center + real, intent(inout) :: k ! Constant + + real :: p_inv + + ASSERT (a>0.) + ASSERT (b>0.) + + real :: xab(4), ab + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: xab + + p_inv = 1./(a+b) + p = a+b + ab = a*b + xab(1) = xa(1)-xb(1) + xab(2) = xa(2)-xb(2) + xab(3) = xa(3)-xb(3) + xp(1) = (a*xa(1)+b*xb(1))*p_inv + xp(2) = (a*xa(2)+b*xb(2))*p_inv + xp(3) = (a*xa(3)+b*xb(3))*p_inv + k *= exp(-ab*p_inv*(xab(1)*xab(1)+xab(2)*xab(2)+xab(3)*xab(3))) + +end subroutine + +!DIR$ attributes forceinline :: transpose_to_dp +recursive subroutine transpose_to_dp(A,LDA,B,LDB,d1,d2) + implicit none + BEGIN_DOC +! Transpose SP input matrix A into DP output matrix B + END_DOC + integer, intent(in) :: d1, d2, LDA, LDB + real, intent(in) :: A(LDA,d2) + double precision, intent(out) :: B(LDB,d1) + + integer :: i,j,k, mod_align + if ( d2 < 32 ) then + do j=1,d1 + !DIR$ LOOP COUNT (16) + do i=1,d2 + B(i,j ) = A(j ,i) + enddo + enddo + return + else if (d1 > d2) then + !DIR$ forceinline + k=d1/2 + !DIR$ forceinline recursive + call transpose_to_dp(A(1,1),LDA,B(1,1),LDB,k,d2) + !DIR$ forceinline recursive + call transpose_to_dp(A(k+1,1),LDA,B(1,k+1),LDB,d1-k,d2) + return + else + !DIR$ forceinline + k=d2/2 + !DIR$ forceinline recursive + call transpose_to_dp(A(1,k+1),LDA,B(k+1,1),LDB,d1,d2-k) + !DIR$ forceinline recursive + call transpose_to_dp(A(1,1),LDA,B(1,1),LDB,d1,k) + return + endif + +end + diff --git a/src/TOOLS/determinant.irp.f b/src/TOOLS/determinant.irp.f new file mode 100644 index 0000000..c9d0b23 --- /dev/null +++ b/src/TOOLS/determinant.irp.f @@ -0,0 +1,204 @@ +subroutine determinant(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + + integer :: i,j + select case (na) + case default +!DIR$ forceinline + call determinant_general(a,LDA,na,det_l) + case (5) +!DIR$ forceinline + call determinant5(a,LDA,na,det_l) + case (4) +!DIR$ forceinline + call determinant4(a,LDA,na,det_l) + case (3) +!DIR$ forceinline + call determinant3(a,LDA,na,det_l) + case (2) +!DIR$ forceinline + call determinant2(a,LDA,na,det_l) + case (1) +!DIR$ forceinline + call determinant1(a,LDA,na,det_l) + case (0) + det_l=1.d0 + end select +end + +subroutine determinant_general(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + + double precision :: work(LDA*max(na,64)) +!DIR$ ATTRIBUTES ALIGN: $IRP_ALIGN :: work + integer :: inf + integer :: ipiv(LDA) +!DIR$ ATTRIBUTES ALIGN: $IRP_ALIGN :: ipiv + integer :: lwork + double precision :: f + integer :: i, j + call dgetrf(na, na, a, LDA, ipiv, inf ) + det_l = 1.d0 + j=0 + !DIR$ VECTOR ALIGNED + do i=1,na + j = j+min(abs(ipiv(i)-i),1) + det_l = det_l*a(i,i) + enddo + if (iand(j,1) /= 0) then + det_l = -det_l + endif +end + +subroutine sdeterminant(a,LDA,na,det_l) + implicit none + real :: a (LDA,na) + integer :: LDA + integer :: na + real :: det_l + + real :: work(LDA*max(na,64)) +!DIR$ ATTRIBUTES ALIGN: $IRP_ALIGN :: work + integer :: inf + integer :: ipiv(LDA) +!DIR$ ATTRIBUTES ALIGN: $IRP_ALIGN :: ipiv + integer :: lwork + real :: f + integer :: i, j + call sgetrf(na, na, a, LDA, ipiv, inf ) + det_l = 1.d0 + j=0 + !DIR$ VECTOR ALIGNED + do i=1,na + if (ipiv(i) /= i) then + j = j+1 + endif + det_l = det_l*a(i,i) + enddo + if (iand(j,1) /= 0) then + det_l = -det_l + endif +end + +subroutine determinant1(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + + det_l = a(1,1) +end + +subroutine determinant2(a,LDA,na,det_l) + implicit none + double precision :: a (LDA,na) + integer :: LDA + integer :: na + double precision :: det_l + double precision :: b(2,2) + + double precision :: f + det_l = a(1,1)*a(2,2) - a(1,2)*a(2,1) +end + +subroutine determinant3(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + double precision :: b(4,3) + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: b + integer :: i + double precision :: f + det_l = a(1,1)*(a(2,2)*a(3,3)-a(2,3)*a(3,2)) & + -a(1,2)*(a(2,1)*a(3,3)-a(2,3)*a(3,1)) & + +a(1,3)*(a(2,1)*a(3,2)-a(2,2)*a(3,1)) + +end + +subroutine determinant4(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + double precision :: b(4,4) + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: b + integer :: i,j + double precision :: f + det_l = a(1,1)*(a(2,2)*(a(3,3)*a(4,4)-a(3,4)*a(4,3)) & + -a(2,3)*(a(3,2)*a(4,4)-a(3,4)*a(4,2)) & + +a(2,4)*(a(3,2)*a(4,3)-a(3,3)*a(4,2))) & + -a(1,2)*(a(2,1)*(a(3,3)*a(4,4)-a(3,4)*a(4,3)) & + -a(2,3)*(a(3,1)*a(4,4)-a(3,4)*a(4,1)) & + +a(2,4)*(a(3,1)*a(4,3)-a(3,3)*a(4,1))) & + +a(1,3)*(a(2,1)*(a(3,2)*a(4,4)-a(3,4)*a(4,2)) & + -a(2,2)*(a(3,1)*a(4,4)-a(3,4)*a(4,1)) & + +a(2,4)*(a(3,1)*a(4,2)-a(3,2)*a(4,1))) & + -a(1,4)*(a(2,1)*(a(3,2)*a(4,3)-a(3,3)*a(4,2)) & + -a(2,2)*(a(3,1)*a(4,3)-a(3,3)*a(4,1)) & + +a(2,3)*(a(3,1)*a(4,2)-a(3,2)*a(4,1))) + +end + +subroutine determinant5(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + double precision :: b(5,5) + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: b + integer :: i,j + double precision :: f + det_l = a(1,1)*(a(2,2)*(a(3,3)*(a(4,4)*a(5,5)-a(4,5)*a(5,4))-a(3,4)*( & + a(4,3)*a(5,5)-a(4,5)*a(5,3))+a(3,5)*(a(4,3)*a(5,4)-a(4,4)*a(5,3)))- & + a(2,3)*(a(3,2)*(a(4,4)*a(5,5)-a(4,5)*a(5,4))-a(3,4)*(a(4,2)*a(5,5)- & + a(4,5)*a(5,2))+a(3,5)*(a(4,2)*a(5,4)-a(4,4)*a(5,2)))+a(2,4)*(a(3,2)*( & + a(4,3)*a(5,5)-a(4,5)*a(5,3))-a(3,3)*(a(4,2)*a(5,5)-a(4,5)*a(5,2))+ & + a(3,5)*(a(4,2)*a(5,3)-a(4,3)*a(5,2)))-a(2,5)*(a(3,2)*(a(4,3)*a(5,4)- & + a(4,4)*a(5,3))-a(3,3)*(a(4,2)*a(5,4)-a(4,4)*a(5,2))+a(3,4)*(a(4,2)* & + a(5,3)-a(4,3)*a(5,2))))-a(1,2)*(a(2,1)*(a(3,3)*(a(4,4)*a(5,5)-a(4,5)* & + a(5,4))-a(3,4)*(a(4,3)*a(5,5)-a(4,5)*a(5,3))+a(3,5)*(a(4,3)*a(5,4)- & + a(4,4)*a(5,3)))-a(2,3)*(a(3,1)*(a(4,4)*a(5,5)-a(4,5)*a(5,4))-a(3,4)*( & + a(4,1)*a(5,5)-a(4,5)*a(5,1))+a(3,5)*(a(4,1)*a(5,4)-a(4,4)*a(5,1)))+ & + a(2,4)*(a(3,1)*(a(4,3)*a(5,5)-a(4,5)*a(5,3))-a(3,3)*(a(4,1)*a(5,5)- & + a(4,5)*a(5,1))+a(3,5)*(a(4,1)*a(5,3)-a(4,3)*a(5,1)))-a(2,5)*(a(3,1)*( & + a(4,3)*a(5,4)-a(4,4)*a(5,3))-a(3,3)*(a(4,1)*a(5,4)-a(4,4)*a(5,1))+ & + a(3,4)*(a(4,1)*a(5,3)-a(4,3)*a(5,1))))+a(1,3)*(a(2,1)*(a(3,2)*(a(4,4)* & + a(5,5)-a(4,5)*a(5,4))-a(3,4)*(a(4,2)*a(5,5)-a(4,5)*a(5,2))+a(3,5)*( & + a(4,2)*a(5,4)-a(4,4)*a(5,2)))-a(2,2)*(a(3,1)*(a(4,4)*a(5,5)-a(4,5)* & + a(5,4))-a(3,4)*(a(4,1)*a(5,5)-a(4,5)*a(5,1))+a(3,5)*(a(4,1)*a(5,4)- & + a(4,4)*a(5,1)))+a(2,4)*(a(3,1)*(a(4,2)*a(5,5)-a(4,5)*a(5,2))-a(3,2)*( & + a(4,1)*a(5,5)-a(4,5)*a(5,1))+a(3,5)*(a(4,1)*a(5,2)-a(4,2)*a(5,1)))- & + a(2,5)*(a(3,1)*(a(4,2)*a(5,4)-a(4,4)*a(5,2))-a(3,2)*(a(4,1)*a(5,4)- & + a(4,4)*a(5,1))+a(3,4)*(a(4,1)*a(5,2)-a(4,2)*a(5,1))))-a(1,4)*(a(2,1)*( & + a(3,2)*(a(4,3)*a(5,5)-a(4,5)*a(5,3))-a(3,3)*(a(4,2)*a(5,5)-a(4,5)* & + a(5,2))+a(3,5)*(a(4,2)*a(5,3)-a(4,3)*a(5,2)))-a(2,2)*(a(3,1)*(a(4,3)* & + a(5,5)-a(4,5)*a(5,3))-a(3,3)*(a(4,1)*a(5,5)-a(4,5)*a(5,1))+a(3,5)*( & + a(4,1)*a(5,3)-a(4,3)*a(5,1)))+a(2,3)*(a(3,1)*(a(4,2)*a(5,5)-a(4,5)* & + a(5,2))-a(3,2)*(a(4,1)*a(5,5)-a(4,5)*a(5,1))+a(3,5)*(a(4,1)*a(5,2)- & + a(4,2)*a(5,1)))-a(2,5)*(a(3,1)*(a(4,2)*a(5,3)-a(4,3)*a(5,2))-a(3,2)*( & + a(4,1)*a(5,3)-a(4,3)*a(5,1))+a(3,3)*(a(4,1)*a(5,2)-a(4,2)*a(5,1))))+ & + a(1,5)*(a(2,1)*(a(3,2)*(a(4,3)*a(5,4)-a(4,4)*a(5,3))-a(3,3)*(a(4,2)* & + a(5,4)-a(4,4)*a(5,2))+a(3,4)*(a(4,2)*a(5,3)-a(4,3)*a(5,2)))-a(2,2)*( & + a(3,1)*(a(4,3)*a(5,4)-a(4,4)*a(5,3))-a(3,3)*(a(4,1)*a(5,4)-a(4,4)* & + a(5,1))+a(3,4)*(a(4,1)*a(5,3)-a(4,3)*a(5,1)))+a(2,3)*(a(3,1)*(a(4,2)* & + a(5,4)-a(4,4)*a(5,2))-a(3,2)*(a(4,1)*a(5,4)-a(4,4)*a(5,1))+a(3,4)*( & + a(4,1)*a(5,2)-a(4,2)*a(5,1)))-a(2,4)*(a(3,1)*(a(4,2)*a(5,3)-a(4,3)* & + a(5,2))-a(3,2)*(a(4,1)*a(5,3)-a(4,3)*a(5,1))+a(3,3)*(a(4,1)*a(5,2)- & + a(4,2)*a(5,1)))) + + +end + diff --git a/src/TOOLS/info.irp.f b/src/TOOLS/info.irp.f new file mode 100644 index 0000000..39944a1 --- /dev/null +++ b/src/TOOLS/info.irp.f @@ -0,0 +1,38 @@ +BEGIN_TEMPLATE + + subroutine $Xinfo (here,token,value) + implicit none + character*(*), intent(in) :: here + character*(*), intent(in) :: token + $Y, intent(in) :: value + if (print_level>1) then + write(0,*) trim(here)//':' + $Z + endif + end + +SUBST [ X, Y, Z ] + r; real; + write(0,*) ' -> ', trim(token), '=', value;; + d; double precision; + write(0,*) ' -> ', trim(token), '=', value;; + i; integer; + write(0,*) ' -> ', trim(token), '=', value;; + c; character*(*); + write(0,*) ' -> ', trim(token), '=', value;; + l; logical; + if (value) then + write(0,*) ' -> ', trim(token), '= True' + else + write(0,*) ' -> ', trim(token), '= False' + endif ;; +END_TEMPLATE + +subroutine info(here,message) + implicit none + character*(*), intent(in) :: here, message + if (print_level > 1) then + write(0,*) trim(here)//':' + write(0,*) ' -> ', trim(message) + endif +end diff --git a/src/TOOLS/invert.irp.f b/src/TOOLS/invert.irp.f new file mode 100644 index 0000000..98e1ff3 --- /dev/null +++ b/src/TOOLS/invert.irp.f @@ -0,0 +1,441 @@ +subroutine invert(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + + integer :: i,j + select case (na) + case default +!DIR$ forceinline + call invert_general(a,LDA,na,det_l) + case (5) +!DIR$ forceinline + call invert5(a,LDA,na,det_l) + case (4) +!DIR$ forceinline + call invert4(a,LDA,na,det_l) + case (3) +!DIR$ forceinline + call invert3(a,LDA,na,det_l) + case (2) +!DIR$ forceinline + call invert2(a,LDA,na,det_l) + case (1) +!DIR$ forceinline + call invert1(a,LDA,na,det_l) + case (0) + det_l=1.d0 + end select +end + +subroutine invert_general(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + + double precision :: work(LDA*max(na,64)) +!DIR$ ATTRIBUTES ALIGN: $IRP_ALIGN :: work + integer :: inf + integer :: ipiv(LDA) +!DIR$ ATTRIBUTES ALIGN: $IRP_ALIGN :: ipiv + integer :: lwork + double precision :: f + integer :: i, j + call dgetrf(na, na, a, LDA, ipiv, inf ) + det_l = 1.d0 + j=0 + !DIR$ VECTOR ALIGNED + do i=1,na + j = j+min(abs(ipiv(i)-i),1) + det_l = det_l*a(i,i) + enddo + if (iand(j,1) /= 0) then + det_l = -det_l + endif + lwork = SIZE(work) + call dgetri(na, a, LDA, ipiv, work, lwork, inf ) + a = a*det_l +end + +subroutine sinvert(a,LDA,na,det_l) + implicit none + real :: a (LDA,na) + integer :: LDA + integer :: na + real :: det_l + + real :: work(LDA*max(na,64)) +!DIR$ ATTRIBUTES ALIGN: $IRP_ALIGN :: work + integer :: inf + integer :: ipiv(LDA) +!DIR$ ATTRIBUTES ALIGN: $IRP_ALIGN :: ipiv + integer :: lwork + real :: f + integer :: i, j + call sgetrf(na, na, a, LDA, ipiv, inf ) + det_l = 1.d0 + j=0 + !DIR$ VECTOR ALIGNED + do i=1,na + if (ipiv(i) /= i) then + j = j+1 + endif + det_l = det_l*a(i,i) + enddo + if (iand(j,1) /= 0) then + det_l = -det_l + endif + lwork = SIZE(work) + call sgetri(na, a, LDA, ipiv, work, lwork, inf ) + a = a*det_l +end + +subroutine invert1(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + + det_l = a(1,1) + a(1,1) = 1.d0 +end + +subroutine invert2(a,LDA,na,det_l) + implicit none + double precision :: a (LDA,na) + integer :: LDA + integer :: na + double precision :: det_l + double precision :: b(2,2) + + double precision :: f + b(1,1) = a(1,1) + b(2,1) = a(2,1) + b(1,2) = a(1,2) + b(2,2) = a(2,2) + det_l = a(1,1)*a(2,2) - a(1,2)*a(2,1) + a(1,1) = b(2,2) + a(2,1) = -b(2,1) + a(1,2) = -b(1,2) + a(2,2) = b(1,1) +end + +subroutine invert3(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + double precision :: b(4,3) + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: b + integer :: i + double precision :: f + det_l = a(1,1)*(a(2,2)*a(3,3)-a(2,3)*a(3,2)) & + -a(1,2)*(a(2,1)*a(3,3)-a(2,3)*a(3,1)) & + +a(1,3)*(a(2,1)*a(3,2)-a(2,2)*a(3,1)) + do i=1,4 + b(i,1) = a(i,1) + b(i,2) = a(i,2) + b(i,3) = a(i,3) + enddo + a(1,1) = b(2,2)*b(3,3) - b(2,3)*b(3,2) + a(2,1) = b(2,3)*b(3,1) - b(2,1)*b(3,3) + a(3,1) = b(2,1)*b(3,2) - b(2,2)*b(3,1) + + a(1,2) = b(1,3)*b(3,2) - b(1,2)*b(3,3) + a(2,2) = b(1,1)*b(3,3) - b(1,3)*b(3,1) + a(3,2) = b(1,2)*b(3,1) - b(1,1)*b(3,2) + + a(1,3) = b(1,2)*b(2,3) - b(1,3)*b(2,2) + a(2,3) = b(1,3)*b(2,1) - b(1,1)*b(2,3) + a(3,3) = b(1,1)*b(2,2) - b(1,2)*b(2,1) + +end + +subroutine invert4(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + double precision :: b(4,4) + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: b + integer :: i,j + double precision :: f + det_l = a(1,1)*(a(2,2)*(a(3,3)*a(4,4)-a(3,4)*a(4,3)) & + -a(2,3)*(a(3,2)*a(4,4)-a(3,4)*a(4,2)) & + +a(2,4)*(a(3,2)*a(4,3)-a(3,3)*a(4,2))) & + -a(1,2)*(a(2,1)*(a(3,3)*a(4,4)-a(3,4)*a(4,3)) & + -a(2,3)*(a(3,1)*a(4,4)-a(3,4)*a(4,1)) & + +a(2,4)*(a(3,1)*a(4,3)-a(3,3)*a(4,1))) & + +a(1,3)*(a(2,1)*(a(3,2)*a(4,4)-a(3,4)*a(4,2)) & + -a(2,2)*(a(3,1)*a(4,4)-a(3,4)*a(4,1)) & + +a(2,4)*(a(3,1)*a(4,2)-a(3,2)*a(4,1))) & + -a(1,4)*(a(2,1)*(a(3,2)*a(4,3)-a(3,3)*a(4,2)) & + -a(2,2)*(a(3,1)*a(4,3)-a(3,3)*a(4,1)) & + +a(2,3)*(a(3,1)*a(4,2)-a(3,2)*a(4,1))) + do i=1,4 + b(1,i) = a(1,i) + b(2,i) = a(2,i) + b(3,i) = a(3,i) + b(4,i) = a(4,i) + enddo + + a(1,1) = b(2,2)*(b(3,3)*b(4,4)-b(3,4)*b(4,3))-b(2,3)*(b(3,2)*b(4,4)-b(3,4)*b(4,2))+b(2,4)*(b(3,2)*b(4,3)-b(3,3)*b(4,2)) + a(2,1) = -b(2,1)*(b(3,3)*b(4,4)-b(3,4)*b(4,3))+b(2,3)*(b(3,1)*b(4,4)-b(3,4)*b(4,1))-b(2,4)*(b(3,1)*b(4,3)-b(3,3)*b(4,1)) + a(3,1) = b(2,1)*(b(3,2)*b(4,4)-b(3,4)*b(4,2))-b(2,2)*(b(3,1)*b(4,4)-b(3,4)*b(4,1))+b(2,4)*(b(3,1)*b(4,2)-b(3,2)*b(4,1)) + a(4,1) = -b(2,1)*(b(3,2)*b(4,3)-b(3,3)*b(4,2))+b(2,2)*(b(3,1)*b(4,3)-b(3,3)*b(4,1))-b(2,3)*(b(3,1)*b(4,2)-b(3,2)*b(4,1)) + + a(1,2) = -b(1,2)*(b(3,3)*b(4,4)-b(3,4)*b(4,3))+b(1,3)*(b(3,2)*b(4,4)-b(3,4)*b(4,2))-b(1,4)*(b(3,2)*b(4,3)-b(3,3)*b(4,2)) + a(2,2) = b(1,1)*(b(3,3)*b(4,4)-b(3,4)*b(4,3))-b(1,3)*(b(3,1)*b(4,4)-b(3,4)*b(4,1))+b(1,4)*(b(3,1)*b(4,3)-b(3,3)*b(4,1)) + a(3,2) = -b(1,1)*(b(3,2)*b(4,4)-b(3,4)*b(4,2))+b(1,2)*(b(3,1)*b(4,4)-b(3,4)*b(4,1))-b(1,4)*(b(3,1)*b(4,2)-b(3,2)*b(4,1)) + a(4,2) = b(1,1)*(b(3,2)*b(4,3)-b(3,3)*b(4,2))-b(1,2)*(b(3,1)*b(4,3)-b(3,3)*b(4,1))+b(1,3)*(b(3,1)*b(4,2)-b(3,2)*b(4,1)) + + a(1,3) = b(1,2)*(b(2,3)*b(4,4)-b(2,4)*b(4,3))-b(1,3)*(b(2,2)*b(4,4)-b(2,4)*b(4,2))+b(1,4)*(b(2,2)*b(4,3)-b(2,3)*b(4,2)) + a(2,3) = -b(1,1)*(b(2,3)*b(4,4)-b(2,4)*b(4,3))+b(1,3)*(b(2,1)*b(4,4)-b(2,4)*b(4,1))-b(1,4)*(b(2,1)*b(4,3)-b(2,3)*b(4,1)) + a(3,3) = b(1,1)*(b(2,2)*b(4,4)-b(2,4)*b(4,2))-b(1,2)*(b(2,1)*b(4,4)-b(2,4)*b(4,1))+b(1,4)*(b(2,1)*b(4,2)-b(2,2)*b(4,1)) + a(4,3) = -b(1,1)*(b(2,2)*b(4,3)-b(2,3)*b(4,2))+b(1,2)*(b(2,1)*b(4,3)-b(2,3)*b(4,1))-b(1,3)*(b(2,1)*b(4,2)-b(2,2)*b(4,1)) + + a(1,4) = -b(1,2)*(b(2,3)*b(3,4)-b(2,4)*b(3,3))+b(1,3)*(b(2,2)*b(3,4)-b(2,4)*b(3,2))-b(1,4)*(b(2,2)*b(3,3)-b(2,3)*b(3,2)) + a(2,4) = b(1,1)*(b(2,3)*b(3,4)-b(2,4)*b(3,3))-b(1,3)*(b(2,1)*b(3,4)-b(2,4)*b(3,1))+b(1,4)*(b(2,1)*b(3,3)-b(2,3)*b(3,1)) + a(3,4) = -b(1,1)*(b(2,2)*b(3,4)-b(2,4)*b(3,2))+b(1,2)*(b(2,1)*b(3,4)-b(2,4)*b(3,1))-b(1,4)*(b(2,1)*b(3,2)-b(2,2)*b(3,1)) + a(4,4) = b(1,1)*(b(2,2)*b(3,3)-b(2,3)*b(3,2))-b(1,2)*(b(2,1)*b(3,3)-b(2,3)*b(3,1))+b(1,3)*(b(2,1)*b(3,2)-b(2,2)*b(3,1)) + +end + +subroutine invert5(a,LDA,na,det_l) + implicit none + double precision, intent(inout) :: a (LDA,na) + integer, intent(in) :: LDA + integer, intent(in) :: na + double precision, intent(inout) :: det_l + double precision :: b(5,5) + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: b + integer :: i,j + double precision :: f + det_l = a(1,1)*(a(2,2)*(a(3,3)*(a(4,4)*a(5,5)-a(4,5)*a(5,4))-a(3,4)*( & + a(4,3)*a(5,5)-a(4,5)*a(5,3))+a(3,5)*(a(4,3)*a(5,4)-a(4,4)*a(5,3)))- & + a(2,3)*(a(3,2)*(a(4,4)*a(5,5)-a(4,5)*a(5,4))-a(3,4)*(a(4,2)*a(5,5)- & + a(4,5)*a(5,2))+a(3,5)*(a(4,2)*a(5,4)-a(4,4)*a(5,2)))+a(2,4)*(a(3,2)*( & + a(4,3)*a(5,5)-a(4,5)*a(5,3))-a(3,3)*(a(4,2)*a(5,5)-a(4,5)*a(5,2))+ & + a(3,5)*(a(4,2)*a(5,3)-a(4,3)*a(5,2)))-a(2,5)*(a(3,2)*(a(4,3)*a(5,4)- & + a(4,4)*a(5,3))-a(3,3)*(a(4,2)*a(5,4)-a(4,4)*a(5,2))+a(3,4)*(a(4,2)* & + a(5,3)-a(4,3)*a(5,2))))-a(1,2)*(a(2,1)*(a(3,3)*(a(4,4)*a(5,5)-a(4,5)* & + a(5,4))-a(3,4)*(a(4,3)*a(5,5)-a(4,5)*a(5,3))+a(3,5)*(a(4,3)*a(5,4)- & + a(4,4)*a(5,3)))-a(2,3)*(a(3,1)*(a(4,4)*a(5,5)-a(4,5)*a(5,4))-a(3,4)*( & + a(4,1)*a(5,5)-a(4,5)*a(5,1))+a(3,5)*(a(4,1)*a(5,4)-a(4,4)*a(5,1)))+ & + a(2,4)*(a(3,1)*(a(4,3)*a(5,5)-a(4,5)*a(5,3))-a(3,3)*(a(4,1)*a(5,5)- & + a(4,5)*a(5,1))+a(3,5)*(a(4,1)*a(5,3)-a(4,3)*a(5,1)))-a(2,5)*(a(3,1)*( & + a(4,3)*a(5,4)-a(4,4)*a(5,3))-a(3,3)*(a(4,1)*a(5,4)-a(4,4)*a(5,1))+ & + a(3,4)*(a(4,1)*a(5,3)-a(4,3)*a(5,1))))+a(1,3)*(a(2,1)*(a(3,2)*(a(4,4)* & + a(5,5)-a(4,5)*a(5,4))-a(3,4)*(a(4,2)*a(5,5)-a(4,5)*a(5,2))+a(3,5)*( & + a(4,2)*a(5,4)-a(4,4)*a(5,2)))-a(2,2)*(a(3,1)*(a(4,4)*a(5,5)-a(4,5)* & + a(5,4))-a(3,4)*(a(4,1)*a(5,5)-a(4,5)*a(5,1))+a(3,5)*(a(4,1)*a(5,4)- & + a(4,4)*a(5,1)))+a(2,4)*(a(3,1)*(a(4,2)*a(5,5)-a(4,5)*a(5,2))-a(3,2)*( & + a(4,1)*a(5,5)-a(4,5)*a(5,1))+a(3,5)*(a(4,1)*a(5,2)-a(4,2)*a(5,1)))- & + a(2,5)*(a(3,1)*(a(4,2)*a(5,4)-a(4,4)*a(5,2))-a(3,2)*(a(4,1)*a(5,4)- & + a(4,4)*a(5,1))+a(3,4)*(a(4,1)*a(5,2)-a(4,2)*a(5,1))))-a(1,4)*(a(2,1)*( & + a(3,2)*(a(4,3)*a(5,5)-a(4,5)*a(5,3))-a(3,3)*(a(4,2)*a(5,5)-a(4,5)* & + a(5,2))+a(3,5)*(a(4,2)*a(5,3)-a(4,3)*a(5,2)))-a(2,2)*(a(3,1)*(a(4,3)* & + a(5,5)-a(4,5)*a(5,3))-a(3,3)*(a(4,1)*a(5,5)-a(4,5)*a(5,1))+a(3,5)*( & + a(4,1)*a(5,3)-a(4,3)*a(5,1)))+a(2,3)*(a(3,1)*(a(4,2)*a(5,5)-a(4,5)* & + a(5,2))-a(3,2)*(a(4,1)*a(5,5)-a(4,5)*a(5,1))+a(3,5)*(a(4,1)*a(5,2)- & + a(4,2)*a(5,1)))-a(2,5)*(a(3,1)*(a(4,2)*a(5,3)-a(4,3)*a(5,2))-a(3,2)*( & + a(4,1)*a(5,3)-a(4,3)*a(5,1))+a(3,3)*(a(4,1)*a(5,2)-a(4,2)*a(5,1))))+ & + a(1,5)*(a(2,1)*(a(3,2)*(a(4,3)*a(5,4)-a(4,4)*a(5,3))-a(3,3)*(a(4,2)* & + a(5,4)-a(4,4)*a(5,2))+a(3,4)*(a(4,2)*a(5,3)-a(4,3)*a(5,2)))-a(2,2)*( & + a(3,1)*(a(4,3)*a(5,4)-a(4,4)*a(5,3))-a(3,3)*(a(4,1)*a(5,4)-a(4,4)* & + a(5,1))+a(3,4)*(a(4,1)*a(5,3)-a(4,3)*a(5,1)))+a(2,3)*(a(3,1)*(a(4,2)* & + a(5,4)-a(4,4)*a(5,2))-a(3,2)*(a(4,1)*a(5,4)-a(4,4)*a(5,1))+a(3,4)*( & + a(4,1)*a(5,2)-a(4,2)*a(5,1)))-a(2,4)*(a(3,1)*(a(4,2)*a(5,3)-a(4,3)* & + a(5,2))-a(3,2)*(a(4,1)*a(5,3)-a(4,3)*a(5,1))+a(3,3)*(a(4,1)*a(5,2)- & + a(4,2)*a(5,1)))) + + do i=1,5 + b(1,i) = a(1,i) + b(2,i) = a(2,i) + b(3,i) = a(3,i) + b(4,i) = a(4,i) + b(5,i) = a(5,i) + enddo + + a(1,1) = & + (b(2,2)*(b(3,3)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))+b(3,5)*(b(4,3)*b(5,4)-b(4,4)*b(5,3)))-b(2,3)* & + (b(3,2)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(3,5)*(b(4,2)*b(5,4)-b(4,4)*b(5,2)))+b(2,4)* & + (b(3,2)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(3,3)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(3,5)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))-b(2,5)* & + (b(3,2)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(3,3)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))+b(3,4)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))) + a(2,1) = & + (-b(2,1)*(b(3,3)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))+b(3,5)*(b(4,3)*b(5,4)-b(4,4)*b(5,3)))+b(2,3)* & + (b(3,1)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,4)-b(4,4)*b(5,1)))-b(2,4)* & + (b(3,1)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(3,3)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))+b(2,5)* & + (b(3,1)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(3,3)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(3,4)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))) + a(3,1) = & + (b(2,1)*(b(3,2)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(3,5)*(b(4,2)*b(5,4)-b(4,4)*b(5,2)))-b(2,2)* & + (b(3,1)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,4)-b(4,4)*b(5,1)))+b(2,4)* & + (b(3,1)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))-b(3,2)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))-b(2,5)* & + (b(3,1)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))-b(3,2)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(3,4)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))) + a(4,1) = & + (-b(2,1)*(b(3,2)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(3,3)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(3,5)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))+b(2,2)* & + (b(3,1)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(3,3)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))-b(2,3)* & + (b(3,1)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))-b(3,2)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))+b(2,5)* & + (b(3,1)*(b(4,2)*b(5,3)-b(4,3)*b(5,2))-b(3,2)*(b(4,1)*b(5,3)-b(4,3)*b(5,1))+b(3,3)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))) + a(5,1) = & + (b(2,1)*(b(3,2)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(3,3)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))+b(3,4)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))-b(2,2)* & + (b(3,1)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(3,3)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(3,4)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))+b(2,3)* & + (b(3,1)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))-b(3,2)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(3,4)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))-b(2,4)* & + (b(3,1)*(b(4,2)*b(5,3)-b(4,3)*b(5,2))-b(3,2)*(b(4,1)*b(5,3)-b(4,3)*b(5,1))+b(3,3)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))) + + a(1,2) = & + (-b(1,2)*(b(3,3)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))+b(3,5)*(b(4,3)*b(5,4)-b(4,4)*b(5,3)))+b(1,3)* & + (b(3,2)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(3,5)*(b(4,2)*b(5,4)-b(4,4)*b(5,2)))-b(1,4)* & + (b(3,2)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(3,3)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(3,5)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))+b(1,5)* & + (b(3,2)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(3,3)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))+b(3,4)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))) + a(2,2) = & + (b(1,1)*(b(3,3)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))+b(3,5)*(b(4,3)*b(5,4)-b(4,4)*b(5,3)))-b(1,3)* & + (b(3,1)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,4)-b(4,4)*b(5,1)))+b(1,4)* & + (b(3,1)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(3,3)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))-b(1,5)* & + (b(3,1)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(3,3)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(3,4)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))) + a(3,2) = & + (-b(1,1)*(b(3,2)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(3,5)*(b(4,2)*b(5,4)-b(4,4)*b(5,2)))+b(1,2)* & + (b(3,1)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(3,4)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,4)-b(4,4)*b(5,1)))-b(1,4)* & + (b(3,1)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))-b(3,2)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))+b(1,5)* & + (b(3,1)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))-b(3,2)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(3,4)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))) + a(4,2) = & + (b(1,1)*(b(3,2)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(3,3)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(3,5)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))-b(1,2)* & + (b(3,1)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(3,3)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))+b(1,3)* & + (b(3,1)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))-b(3,2)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(3,5)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))-b(1,5)* & + (b(3,1)*(b(4,2)*b(5,3)-b(4,3)*b(5,2))-b(3,2)*(b(4,1)*b(5,3)-b(4,3)*b(5,1))+b(3,3)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))) + a(5,2) = & + (-b(1,1)*(b(3,2)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(3,3)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))+b(3,4)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))+b(1,2)* & + (b(3,1)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(3,3)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(3,4)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))-b(1,3)* & + (b(3,1)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))-b(3,2)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(3,4)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))+b(1,4)* & + (b(3,1)*(b(4,2)*b(5,3)-b(4,3)*b(5,2))-b(3,2)*(b(4,1)*b(5,3)-b(4,3)*b(5,1))+b(3,3)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))) + + a(1,3) = & + (b(1,2)*(b(2,3)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(2,4)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))+b(2,5)*(b(4,3)*b(5,4)-b(4,4)*b(5,3)))-b(1,3)* & + (b(2,2)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(2,4)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(2,5)*(b(4,2)*b(5,4)-b(4,4)*b(5,2)))+b(1,4)* & + (b(2,2)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(2,3)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(2,5)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))-b(1,5)* & + (b(2,2)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(2,3)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))+b(2,4)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))) + a(2,3) = & + (-b(1,1)*(b(2,3)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(2,4)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))+b(2,5)*(b(4,3)*b(5,4)-b(4,4)*b(5,3)))+b(1,3)* & + (b(2,1)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(2,4)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(2,5)*(b(4,1)*b(5,4)-b(4,4)*b(5,1)))-b(1,4)* & + (b(2,1)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(2,3)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(2,5)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))+b(1,5)* & + (b(2,1)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(2,3)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(2,4)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))) + a(3,3) = & + (b(1,1)*(b(2,2)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(2,4)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(2,5)*(b(4,2)*b(5,4)-b(4,4)*b(5,2)))-b(1,2)* & + (b(2,1)*(b(4,4)*b(5,5)-b(4,5)*b(5,4))-b(2,4)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(2,5)*(b(4,1)*b(5,4)-b(4,4)*b(5,1)))+b(1,4)* & + (b(2,1)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))-b(2,2)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(2,5)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))-b(1,5)* & + (b(2,1)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))-b(2,2)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(2,4)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))) + a(4,3) = & + (-b(1,1)*(b(2,2)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(2,3)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))+b(2,5)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))+b(1,2)* & + (b(2,1)*(b(4,3)*b(5,5)-b(4,5)*b(5,3))-b(2,3)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(2,5)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))-b(1,3)* & + (b(2,1)*(b(4,2)*b(5,5)-b(4,5)*b(5,2))-b(2,2)*(b(4,1)*b(5,5)-b(4,5)*b(5,1))+b(2,5)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))+b(1,5)* & + (b(2,1)*(b(4,2)*b(5,3)-b(4,3)*b(5,2))-b(2,2)*(b(4,1)*b(5,3)-b(4,3)*b(5,1))+b(2,3)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))) + a(5,3) = & + (b(1,1)*(b(2,2)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(2,3)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))+b(2,4)*(b(4,2)*b(5,3)-b(4,3)*b(5,2)))-b(1,2)* & + (b(2,1)*(b(4,3)*b(5,4)-b(4,4)*b(5,3))-b(2,3)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(2,4)*(b(4,1)*b(5,3)-b(4,3)*b(5,1)))+b(1,3)* & + (b(2,1)*(b(4,2)*b(5,4)-b(4,4)*b(5,2))-b(2,2)*(b(4,1)*b(5,4)-b(4,4)*b(5,1))+b(2,4)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))-b(1,4)* & + (b(2,1)*(b(4,2)*b(5,3)-b(4,3)*b(5,2))-b(2,2)*(b(4,1)*b(5,3)-b(4,3)*b(5,1))+b(2,3)*(b(4,1)*b(5,2)-b(4,2)*b(5,1)))) + + a(1,4) = & + (-b(1,2)*(b(2,3)*(b(3,4)*b(5,5)-b(3,5)*b(5,4))-b(2,4)*(b(3,3)*b(5,5)-b(3,5)*b(5,3))+b(2,5)*(b(3,3)*b(5,4)-b(3,4)*b(5,3)))+b(1,3)* & + (b(2,2)*(b(3,4)*b(5,5)-b(3,5)*b(5,4))-b(2,4)*(b(3,2)*b(5,5)-b(3,5)*b(5,2))+b(2,5)*(b(3,2)*b(5,4)-b(3,4)*b(5,2)))-b(1,4)* & + (b(2,2)*(b(3,3)*b(5,5)-b(3,5)*b(5,3))-b(2,3)*(b(3,2)*b(5,5)-b(3,5)*b(5,2))+b(2,5)*(b(3,2)*b(5,3)-b(3,3)*b(5,2)))+b(1,5)* & + (b(2,2)*(b(3,3)*b(5,4)-b(3,4)*b(5,3))-b(2,3)*(b(3,2)*b(5,4)-b(3,4)*b(5,2))+b(2,4)*(b(3,2)*b(5,3)-b(3,3)*b(5,2)))) + a(2,4) = & + (b(1,1)*(b(2,3)*(b(3,4)*b(5,5)-b(3,5)*b(5,4))-b(2,4)*(b(3,3)*b(5,5)-b(3,5)*b(5,3))+b(2,5)*(b(3,3)*b(5,4)-b(3,4)*b(5,3)))-b(1,3)* & + (b(2,1)*(b(3,4)*b(5,5)-b(3,5)*b(5,4))-b(2,4)*(b(3,1)*b(5,5)-b(3,5)*b(5,1))+b(2,5)*(b(3,1)*b(5,4)-b(3,4)*b(5,1)))+b(1,4)* & + (b(2,1)*(b(3,3)*b(5,5)-b(3,5)*b(5,3))-b(2,3)*(b(3,1)*b(5,5)-b(3,5)*b(5,1))+b(2,5)*(b(3,1)*b(5,3)-b(3,3)*b(5,1)))-b(1,5)* & + (b(2,1)*(b(3,3)*b(5,4)-b(3,4)*b(5,3))-b(2,3)*(b(3,1)*b(5,4)-b(3,4)*b(5,1))+b(2,4)*(b(3,1)*b(5,3)-b(3,3)*b(5,1)))) + a(3,4) = & + (-b(1,1)*(b(2,2)*(b(3,4)*b(5,5)-b(3,5)*b(5,4))-b(2,4)*(b(3,2)*b(5,5)-b(3,5)*b(5,2))+b(2,5)*(b(3,2)*b(5,4)-b(3,4)*b(5,2)))+b(1,2)* & + (b(2,1)*(b(3,4)*b(5,5)-b(3,5)*b(5,4))-b(2,4)*(b(3,1)*b(5,5)-b(3,5)*b(5,1))+b(2,5)*(b(3,1)*b(5,4)-b(3,4)*b(5,1)))-b(1,4)* & + (b(2,1)*(b(3,2)*b(5,5)-b(3,5)*b(5,2))-b(2,2)*(b(3,1)*b(5,5)-b(3,5)*b(5,1))+b(2,5)*(b(3,1)*b(5,2)-b(3,2)*b(5,1)))+b(1,5)* & + (b(2,1)*(b(3,2)*b(5,4)-b(3,4)*b(5,2))-b(2,2)*(b(3,1)*b(5,4)-b(3,4)*b(5,1))+b(2,4)*(b(3,1)*b(5,2)-b(3,2)*b(5,1)))) + a(4,4) = & + (b(1,1)*(b(2,2)*(b(3,3)*b(5,5)-b(3,5)*b(5,3))-b(2,3)*(b(3,2)*b(5,5)-b(3,5)*b(5,2))+b(2,5)*(b(3,2)*b(5,3)-b(3,3)*b(5,2)))-b(1,2)* & + (b(2,1)*(b(3,3)*b(5,5)-b(3,5)*b(5,3))-b(2,3)*(b(3,1)*b(5,5)-b(3,5)*b(5,1))+b(2,5)*(b(3,1)*b(5,3)-b(3,3)*b(5,1)))+b(1,3)* & + (b(2,1)*(b(3,2)*b(5,5)-b(3,5)*b(5,2))-b(2,2)*(b(3,1)*b(5,5)-b(3,5)*b(5,1))+b(2,5)*(b(3,1)*b(5,2)-b(3,2)*b(5,1)))-b(1,5)* & + (b(2,1)*(b(3,2)*b(5,3)-b(3,3)*b(5,2))-b(2,2)*(b(3,1)*b(5,3)-b(3,3)*b(5,1))+b(2,3)*(b(3,1)*b(5,2)-b(3,2)*b(5,1)))) + a(5,4) = & + (-b(1,1)*(b(2,2)*(b(3,3)*b(5,4)-b(3,4)*b(5,3))-b(2,3)*(b(3,2)*b(5,4)-b(3,4)*b(5,2))+b(2,4)*(b(3,2)*b(5,3)-b(3,3)*b(5,2)))+b(1,2)* & + (b(2,1)*(b(3,3)*b(5,4)-b(3,4)*b(5,3))-b(2,3)*(b(3,1)*b(5,4)-b(3,4)*b(5,1))+b(2,4)*(b(3,1)*b(5,3)-b(3,3)*b(5,1)))-b(1,3)* & + (b(2,1)*(b(3,2)*b(5,4)-b(3,4)*b(5,2))-b(2,2)*(b(3,1)*b(5,4)-b(3,4)*b(5,1))+b(2,4)*(b(3,1)*b(5,2)-b(3,2)*b(5,1)))+b(1,4)* & + (b(2,1)*(b(3,2)*b(5,3)-b(3,3)*b(5,2))-b(2,2)*(b(3,1)*b(5,3)-b(3,3)*b(5,1))+b(2,3)*(b(3,1)*b(5,2)-b(3,2)*b(5,1)))) + + a(1,5) = & + (b(1,2)*(b(2,3)*(b(3,4)*b(4,5)-b(3,5)*b(4,4))-b(2,4)*(b(3,3)*b(4,5)-b(3,5)*b(4,3))+b(2,5)*(b(3,3)*b(4,4)-b(3,4)*b(4,3)))-b(1,3)* & + (b(2,2)*(b(3,4)*b(4,5)-b(3,5)*b(4,4))-b(2,4)*(b(3,2)*b(4,5)-b(3,5)*b(4,2))+b(2,5)*(b(3,2)*b(4,4)-b(3,4)*b(4,2)))+b(1,4)* & + (b(2,2)*(b(3,3)*b(4,5)-b(3,5)*b(4,3))-b(2,3)*(b(3,2)*b(4,5)-b(3,5)*b(4,2))+b(2,5)*(b(3,2)*b(4,3)-b(3,3)*b(4,2)))-b(1,5)* & + (b(2,2)*(b(3,3)*b(4,4)-b(3,4)*b(4,3))-b(2,3)*(b(3,2)*b(4,4)-b(3,4)*b(4,2))+b(2,4)*(b(3,2)*b(4,3)-b(3,3)*b(4,2)))) + a(2,5) = & + (-b(1,1)*(b(2,3)*(b(3,4)*b(4,5)-b(3,5)*b(4,4))-b(2,4)*(b(3,3)*b(4,5)-b(3,5)*b(4,3))+b(2,5)*(b(3,3)*b(4,4)-b(3,4)*b(4,3)))+b(1,3)* & + (b(2,1)*(b(3,4)*b(4,5)-b(3,5)*b(4,4))-b(2,4)*(b(3,1)*b(4,5)-b(3,5)*b(4,1))+b(2,5)*(b(3,1)*b(4,4)-b(3,4)*b(4,1)))-b(1,4)* & + (b(2,1)*(b(3,3)*b(4,5)-b(3,5)*b(4,3))-b(2,3)*(b(3,1)*b(4,5)-b(3,5)*b(4,1))+b(2,5)*(b(3,1)*b(4,3)-b(3,3)*b(4,1)))+b(1,5)* & + (b(2,1)*(b(3,3)*b(4,4)-b(3,4)*b(4,3))-b(2,3)*(b(3,1)*b(4,4)-b(3,4)*b(4,1))+b(2,4)*(b(3,1)*b(4,3)-b(3,3)*b(4,1)))) + a(3,5) = & + (b(1,1)*(b(2,2)*(b(3,4)*b(4,5)-b(3,5)*b(4,4))-b(2,4)*(b(3,2)*b(4,5)-b(3,5)*b(4,2))+b(2,5)*(b(3,2)*b(4,4)-b(3,4)*b(4,2)))-b(1,2)* & + (b(2,1)*(b(3,4)*b(4,5)-b(3,5)*b(4,4))-b(2,4)*(b(3,1)*b(4,5)-b(3,5)*b(4,1))+b(2,5)*(b(3,1)*b(4,4)-b(3,4)*b(4,1)))+b(1,4)* & + (b(2,1)*(b(3,2)*b(4,5)-b(3,5)*b(4,2))-b(2,2)*(b(3,1)*b(4,5)-b(3,5)*b(4,1))+b(2,5)*(b(3,1)*b(4,2)-b(3,2)*b(4,1)))-b(1,5)* & + (b(2,1)*(b(3,2)*b(4,4)-b(3,4)*b(4,2))-b(2,2)*(b(3,1)*b(4,4)-b(3,4)*b(4,1))+b(2,4)*(b(3,1)*b(4,2)-b(3,2)*b(4,1)))) + a(4,5) = & + (-b(1,1)*(b(2,2)*(b(3,3)*b(4,5)-b(3,5)*b(4,3))-b(2,3)*(b(3,2)*b(4,5)-b(3,5)*b(4,2))+b(2,5)*(b(3,2)*b(4,3)-b(3,3)*b(4,2)))+b(1,2)* & + (b(2,1)*(b(3,3)*b(4,5)-b(3,5)*b(4,3))-b(2,3)*(b(3,1)*b(4,5)-b(3,5)*b(4,1))+b(2,5)*(b(3,1)*b(4,3)-b(3,3)*b(4,1)))-b(1,3)* & + (b(2,1)*(b(3,2)*b(4,5)-b(3,5)*b(4,2))-b(2,2)*(b(3,1)*b(4,5)-b(3,5)*b(4,1))+b(2,5)*(b(3,1)*b(4,2)-b(3,2)*b(4,1)))+b(1,5)* & + (b(2,1)*(b(3,2)*b(4,3)-b(3,3)*b(4,2))-b(2,2)*(b(3,1)*b(4,3)-b(3,3)*b(4,1))+b(2,3)*(b(3,1)*b(4,2)-b(3,2)*b(4,1)))) + a(5,5) = & + (b(1,1)*(b(2,2)*(b(3,3)*b(4,4)-b(3,4)*b(4,3))-b(2,3)*(b(3,2)*b(4,4)-b(3,4)*b(4,2))+b(2,4)*(b(3,2)*b(4,3)-b(3,3)*b(4,2)))-b(1,2)* & + (b(2,1)*(b(3,3)*b(4,4)-b(3,4)*b(4,3))-b(2,3)*(b(3,1)*b(4,4)-b(3,4)*b(4,1))+b(2,4)*(b(3,1)*b(4,3)-b(3,3)*b(4,1)))+b(1,3)* & + (b(2,1)*(b(3,2)*b(4,4)-b(3,4)*b(4,2))-b(2,2)*(b(3,1)*b(4,4)-b(3,4)*b(4,1))+b(2,4)*(b(3,1)*b(4,2)-b(3,2)*b(4,1)))-b(1,4)* & + (b(2,1)*(b(3,2)*b(4,3)-b(3,3)*b(4,2))-b(2,2)*(b(3,1)*b(4,3)-b(3,3)*b(4,1))+b(2,3)*(b(3,1)*b(4,2)-b(3,2)*b(4,1)))) + +end + +subroutine invert_update(a,LDA,na,det_l,b) + implicit none + double precision :: a (LDA,na) + double precision :: b (LDA,na) + integer :: LDA + integer :: na + double precision :: det_l + + double precision :: work(LDA*max(na,64)) +!DIR$ ATTRIBUTES ALIGN: $IRP_ALIGN :: work + integer :: inf + integer :: ipiv(LDA) +!DIR$ ATTRIBUTES ALIGN: $IRP_ALIGN :: ipiv + integer :: lwork + double precision :: f + integer :: i, j + double precision :: bold(LDA,na) + double precision :: ba(LDA,na) + integer :: k + ba = a + call dgetrf(na, na, ba, LDA, ipiv, inf ) + det_l = 1.d0 + j=0 + !DIR$ VECTOR ALIGNED + do i=1,na + if (ipiv(i) /= i) then + j = j+1 + endif + det_l = det_l*a(i,i) + enddo + if (iand(j,1) /= 0) then + det_l = -det_l + endif + do k=1,3 + bold = b + call dgemm('N','N',na,na,na,1.d0,a,LDA,bold,LDA, & + 0.d0, ba, LDA) + call dgemm('N','N',na,na,na,-1.d0,bold,LDA,ba,LDA, & + 2.d0, b, LDA) + enddo + b = b*det_l +end + diff --git a/src/TOOLS/random.irp.f b/src/TOOLS/random.irp.f new file mode 100644 index 0000000..6939e5c --- /dev/null +++ b/src/TOOLS/random.irp.f @@ -0,0 +1,147 @@ +double precision function qmc_ranf() +! L'Ecuyer, P. (1999) `Tables of maximally equidistributed combined LFSR +! generators', Math. of Comput., 68, 261-269. + implicit none + integer*8 :: b(2) + b(1) = ISHFT( IEOR( ISHFT(seed(1),1), seed(1)), -53) + b(2) = ISHFT( IAND(seed(1),-2_8), 10) + seed(1) = IEOR( b(2), b(1)) + + b(1) = ISHFT( IEOR( ISHFT(seed(2),24), seed(2)), -50) + b(2) = ISHFT( IAND(seed(2),-512_8), 5) + seed(2) = IEOR( b(2), b(1)) + + b(1) = ISHFT( IEOR( ISHFT(seed(3),3), seed(3)), -23) + b(2) = ISHFT( IAND(seed(3),-4096_8), 29) + seed(3) = IEOR( b(2), b(1)) + + b(1) = ISHFT( IEOR( ISHFT(seed(4),5), seed(4)), -24) + b(2) = ISHFT( IAND(seed(4),-131072_8), 23) + seed(4) = IEOR( b(2), b(1)) + + b(1) = ISHFT( IEOR( ISHFT(seed(5),3), seed(5)), -33) + b(2) = ISHFT( IAND(seed(5),-8388608_8), 8) + seed(5) = IEOR( b(2), b(1)) + + qmc_ranf = IEOR( IEOR( IEOR( IEOR(seed(1),seed(2)), seed(3)), & + seed(4)), seed(5)) * 5.4210108624275221D-20 + 0.5D0 + ASSERT ( qmc_ranf >= 0.d0 ) + ASSERT ( qmc_ranf <= 1.d0 ) + +end + +subroutine ranf_array(isize,res) + implicit none + integer :: isize + double precision :: res(isize) + integer :: i + double precision :: qmc_ranf + + do i=1,isize + res(i) = qmc_ranf() + enddo +end + +BEGIN_PROVIDER [ integer*8, seed, (5) ] + implicit none + BEGIN_DOC +! Seeds data +! Initialized by init_random + END_DOC + integer :: iargc + integer*8 :: i,j + integer*4 :: clock(12) + double precision :: r + integer*8 :: pid8 + read(current_PID,*) pid8 + pid8 = iand( ishft(pid8, 32), pid8) + do i=1,12 + clock(i) = i + enddo + call system_clock(count=clock(1)) + call random_seed(put=clock) + do i=1,5 + call random_number(r) + seed(i) = (r-0.5d0)*huge(1_8) + seed(i) = ieor( seed(i), pid8) + do j=1,16 + seed(i) = ishft(seed(i),1)+1 + enddo + enddo + +END_PROVIDER + +subroutine gauss_array(isize,res) + implicit none + include '../constants.F' + integer isize + double precision res(isize) + + double precision u1(isize),u2(isize) + integer i + + call ranf_array(isize,u1) + call ranf_array(isize,u2) + do i=1,isize + res(i)=sqrt(-2.d0*log(u1(i)))*cos(dtwo_pi*u2(i)) + enddo +end + +double precision function gauss() + implicit none +! include 'constants.F' + double precision :: qmc_ranf +! double precision :: u1,u2 +! u1=qmc_ranf() +! u2=qmc_ranf() +! gauss=sqrt(-2.d0*dlog(u1))*cos(dfour_pi*u2) + double precision :: inverse_normal_cdf + gauss = inverse_normal_cdf(qmc_ranf()) +end + +double precision function inverse_normal_cdf(p) + implicit none + double precision, intent(in) :: p + double precision :: p_low,p_high + double precision :: a1,a2,a3,a4,a5,a6 + double precision :: b1,b2,b3,b4,b5 + double precision :: c1,c2,c3,c4,c5,c6 + double precision :: d1,d2,d3,d4 + double precision :: z,q,r + double precision :: qmc_ranf + a1=-39.6968302866538d0 + a2=220.946098424521d0 + a3=-275.928510446969d0 + a4=138.357751867269d0 + a5=-30.6647980661472d0 + a6=2.50662827745924d0 + b1=-54.4760987982241d0 + b2=161.585836858041d0 + b3=-155.698979859887d0 + b4=66.8013118877197d0 + b5=-13.2806815528857d0 + c1=-0.00778489400243029d0 + c2=-0.322396458041136d0 + c3=-2.40075827716184d0 + c4=-2.54973253934373d0 + c5=4.37466414146497d0 + c6=2.93816398269878d0 + d1=0.00778469570904146d0 + d2=0.32246712907004d0 + d3=2.445134137143d0 + d4=3.75440866190742d0 + p_low=0.02425d0 + p_high=1.d0-0.02425d0 + if(p < p_low) then + q=dsqrt(-2.d0*dlog(p)) + inverse_normal_cdf=(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q+c6)/((((d1*q+d2)*q+d3)*q+d4)*q+1.d0) + else if(p <= p_high) then + q=p-0.5d0 + r=q*q + inverse_normal_cdf=(((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r+a6)*q/(((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r+1.d0) + else + q=dsqrt(-2.d0*dlog(max(tiny(1.d0),1.d0-p))) + inverse_normal_cdf=-(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q+c6)/((((d1*q+d2)*q+d3)*q+d4)*q+1) + endif +end + diff --git a/src/TOOLS/sort.irp.f b/src/TOOLS/sort.irp.f new file mode 100644 index 0000000..eb6ccc7 --- /dev/null +++ b/src/TOOLS/sort.irp.f @@ -0,0 +1,128 @@ +BEGIN_TEMPLATE + subroutine insertion_$Xsort (x,iorder,isize) + implicit none + $type,intent(inout) :: x(isize) + integer,intent(inout) :: iorder(isize) + integer,intent(in) :: isize + $type :: xtmp + integer :: i, i0, j, jmax + + do i=1,isize + xtmp = x(i) + i0 = iorder(i) + j = i-1 + do j=i-1,1,-1 + if ( x(j) > xtmp ) then + x(j+1) = x(j) + iorder(j+1) = iorder(j) + else + exit + endif + enddo + x(j+1) = xtmp + iorder(j+1) = i0 + enddo + + end subroutine insertion_$Xsort + + subroutine heap_$Xsort(x,iorder,isize) + implicit none + $type,intent(inout) :: x(isize) + integer,intent(inout) :: iorder(isize) + integer,intent(in) :: isize + + integer :: i, k, j, l, i0 + $type :: xtemp + + l = isize/2+1 + k = isize + do while (.True.) + if (l>1) then + l=l-1 + xtemp = x(l) + i0 = iorder(l) + else + xtemp = x(k) + i0 = iorder(k) + x(k) = x(1) + iorder(k) = iorder(1) + k = k-1 + if (k == 1) then + x(1) = xtemp + iorder(1) = i0 + exit + endif + endif + i=l + j = ishft(l,1) + do while (j 0) then + exit + endif + if (l==2*block_time) then + call abrt(irp_here, 'Unable to get walkers') + endif + call sleep(1) + enddo + sze += rc + rc = f77_zmq_msg_copy_from_data(msg, buffer) + rc = f77_zmq_msg_close(msg) + buffer = trim(adjustl(buffer)) + read(buffer, '(F20.14)') elec_coord_full_out(i,j,k) + enddo + enddo + enddo + call worker_log(irp_here, 'Walkers received') + rc = f77_zmq_msg_destroy(msg) +end + + + + + diff --git a/src/ZMQ/worker.irp.f b/src/ZMQ/worker.irp.f new file mode 100644 index 0000000..d66e2e1 --- /dev/null +++ b/src/ZMQ/worker.irp.f @@ -0,0 +1,394 @@ + +! Functions +! ========= + +subroutine zmq_register_worker(msg) + use f77_zmq + implicit none + BEGIN_DOC +! Register a new worker to the forwarder + END_DOC + integer(ZMQ_PTR) :: msg + integer :: i,rc + + rc = f77_zmq_msg_init_size(msg,8) + rc = f77_zmq_msg_copy_to_data(msg, 'register',8) + rc = f77_zmq_msg_send(msg,zmq_to_dataserver_socket,ZMQ_SNDMORE) + rc = f77_zmq_msg_close(msg) + + + character*(64) :: buffer + + rc = f77_zmq_msg_init_size(msg,64) + write(buffer,'(A)') trim(hostname) + buffer = adjustl(trim(buffer)) + rc = f77_zmq_msg_copy_to_data(msg, buffer, len(buffer)) + rc = f77_zmq_msg_send(msg,zmq_to_dataserver_socket,ZMQ_SNDMORE) + if (rc == -1) then + call abrt(irp_here, 'Unable to send register message (1)') + endif + rc = f77_zmq_msg_close(msg) + + call worker_log(irp_here, 'Registering') + + rc = f77_zmq_msg_init_size(msg,8) + rc = f77_zmq_msg_copy_to_data(msg, current_PID, 8) + rc = f77_zmq_msg_send(msg,zmq_to_dataserver_socket,0) + if (rc == -1) then + call abrt(irp_here, 'Unable to send register message (2)') + endif + rc = f77_zmq_msg_close(msg) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer, 32, 0) + if (buffer(1:2)/='OK') then + call abrt(irp_here, 'Register failed '//trim(http_server)) + endif + call worker_log(irp_here, 'Registered') + +end + + +subroutine zmq_unregister_worker(msg) + use f77_zmq + implicit none + BEGIN_DOC +! Unregister a new worker to the forwarder + END_DOC + integer(ZMQ_PTR) :: msg + integer :: i,rc + + call worker_log(irp_here, 'Unregistering') + rc = f77_zmq_msg_init_size(msg,10) + rc = f77_zmq_msg_copy_to_data(msg, 'unregister',10) + rc = f77_zmq_msg_send(msg,zmq_to_dataserver_socket,ZMQ_SNDMORE) + if (rc == -1) then + call abrt(irp_here, 'Unable to send unregister message (1)') + endif + rc = f77_zmq_msg_close(msg) + + character*(64) :: buffer + + rc = f77_zmq_msg_init_size(msg,64) + write(buffer,'(A)') trim(hostname) + buffer = adjustl(trim(buffer)) + rc = f77_zmq_msg_copy_to_data(msg, buffer, len(buffer)) + rc = f77_zmq_msg_send(msg,zmq_to_dataserver_socket,ZMQ_SNDMORE) + if (rc == -1) then + call abrt(irp_here, 'Unable to send unregister message (2)') + endif + rc = f77_zmq_msg_close(msg) + + rc = f77_zmq_msg_init_size(msg,8) + rc = f77_zmq_msg_copy_to_data(msg, current_PID, 8) + rc = f77_zmq_msg_send(msg,zmq_to_dataserver_socket,0) + if (rc == -1) then + call abrt(irp_here, 'Unable to send unregister message (3)') + endif + rc = f77_zmq_msg_close(msg) + + ! Timeout 15 seconds + rc = -1 + do i=1,15 + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer, 32, ZMQ_NOBLOCK) + if (rc == 2) then + call worker_log(irp_here, 'Unregistered') + return + endif + call worker_log(irp_here, 'Unregister failed. Retrying') + call sleep(1) + enddo + call abrt(irp_here, 'Unregister failed') + + +end + + + +subroutine zmq_send_header(msg,header,block_id) + use f77_zmq + implicit none + BEGIN_DOC + ! Receive the header of the multi-part message + END_DOC + integer(ZMQ_PTR), intent(in) :: msg + character*(*), intent(in) :: header + integer, intent(in) :: block_id + integer :: rc, size + character*(16) :: pid_str + + size = len(trim(header)) + rc = f77_zmq_msg_init_size(msg,size) + rc = f77_zmq_msg_copy_to_data(msg, header,size) + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + rc = f77_zmq_msg_close(msg) + + character*(64) :: buffer + + rc = f77_zmq_msg_init_size(msg,64) + write(buffer,'(A)') trim(hostname) + buffer = adjustl(trim(buffer)) + rc = f77_zmq_msg_copy_to_data(msg, buffer, len(buffer)) + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + rc = f77_zmq_msg_close(msg) + + call worker_log(irp_here, header) + rc = f77_zmq_msg_init_size(msg,8) + rc = f77_zmq_msg_copy_to_data(msg, current_PID, 8) + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + rc = f77_zmq_msg_close(msg) + + rc = f77_zmq_msg_init_size(msg,8) + write(buffer,'(I8)') block_id + buffer = adjustl(trim(buffer)) + rc = f77_zmq_msg_copy_to_data(msg, buffer, 8) + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + rc = f77_zmq_msg_close(msg) + +end + + + +subroutine zmq_send_scalar_prop(msg,weight,value) + use f77_zmq + implicit none + BEGIN_DOC + ! Send a double precision average over the trajectory + END_DOC + integer(ZMQ_PTR) :: msg + double precision :: weight, value + integer :: rc,sze + character*(32) :: buffer + + write(buffer,'(E32.16)') weight + buffer = adjustl(trim(buffer)) + sze = len(buffer) + rc = f77_zmq_msg_init_size(msg,len(buffer)) + rc = f77_zmq_msg_copy_to_data(msg, buffer,sze) + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + rc = f77_zmq_msg_close(msg) + + write(buffer,'(E32.16)') value + buffer = adjustl(trim(buffer)) + rc = f77_zmq_msg_init_size(msg,len(buffer)) + rc = f77_zmq_msg_copy_to_data(msg, buffer,len(buffer)) + rc = f77_zmq_msg_send(msg,zmq_socket_push,0) + rc = f77_zmq_msg_close(msg) + + call worker_log(irp_here,'') +end + + +subroutine zmq_send_array_prop(msg,weight,value,isize) + use f77_zmq + implicit none + BEGIN_DOC + ! Send a double precision average over the trajectory + END_DOC + integer(ZMQ_PTR) :: msg + integer :: isize + double precision :: weight, value(isize) + integer :: rc,i,l, sze + character*(32) :: buffer + + write(buffer,'(I8)') isize + buffer = adjustl(trim(buffer)) + l = len(buffer) + rc = f77_zmq_msg_init_size(msg,l) + rc = f77_zmq_msg_copy_to_data(msg, buffer,l) + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + rc = f77_zmq_msg_close(msg) + sze = l + + write(buffer,'(E32.16)') weight + buffer = adjustl(trim(buffer)) + l = len(buffer) + rc = f77_zmq_msg_init_size(msg,l) + rc = f77_zmq_msg_copy_to_data(msg, buffer,l) + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + rc = f77_zmq_msg_close(msg) + sze += l + + do i=1,isize + write(buffer,'(E32.16)') value(i) + buffer = adjustl(trim(buffer)) + l = len(buffer) + sze += l + rc = f77_zmq_msg_init_size(msg,l) + rc = f77_zmq_msg_copy_to_data(msg, buffer,l) + if (i < isize) then + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + else + rc = f77_zmq_msg_send(msg,zmq_socket_push,0) + endif + rc = f77_zmq_msg_close(msg) + enddo + + call worker_log(irp_here,'') +end + + + +subroutine zmq_send_info(msg,message) + implicit none + BEGIN_DOC + ! Send an info message to the forwarder + END_DOC + integer(ZMQ_PTR) :: msg + character*(64) :: message + integer :: rc + integer :: isize + + isize = len(trim(message)) + rc = f77_zmq_msg_init_size(msg,isize) + rc = f77_zmq_msg_copy_to_data(msg, trim(message),isize) + rc = f77_zmq_msg_send(msg,zmq_socket_push,0) + rc = f77_zmq_msg_close(msg) + call worker_log(irp_here, message) + +end + + +subroutine zmq_send_int(msg,value,isize) + use f77_zmq + implicit none + BEGIN_DOC + ! Send an integer array of size n to the forwarder + END_DOC + integer(ZMQ_PTR) :: msg + integer :: isize + integer :: value(isize) + integer :: rc,i,l, sze + character*(32) :: buffer + + write(buffer,'(I8)') isize + buffer = adjustl(trim(buffer)) + l = len(buffer) + rc = f77_zmq_msg_init_size(msg,l) + rc = f77_zmq_msg_copy_to_data(msg, buffer,l) + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + rc = f77_zmq_msg_close(msg) + sze = l + + do i=1,isize + write(buffer,'(I16)') value(i) + buffer = adjustl(trim(buffer)) + l = len(buffer) + sze += l + rc = f77_zmq_msg_init_size(msg,l) + rc = f77_zmq_msg_copy_to_data(msg, buffer,l) + if (i < isize) then + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + else + rc = f77_zmq_msg_send(msg,zmq_socket_push,0) + endif + rc = f77_zmq_msg_close(msg) + enddo + + call worker_log(irp_here,'') +end + + +subroutine zmq_send_real(msg,value,isize) + use f77_zmq + implicit none + BEGIN_DOC + ! Send a real array of size n to the forwarder + END_DOC + integer(ZMQ_PTR) :: msg + integer :: isize + real :: value(isize) + integer :: rc,i,l, sze + character*(32) :: buffer + + write(buffer,'(I8)') isize + buffer = adjustl(trim(buffer)) + l = len(buffer) + rc = f77_zmq_msg_init_size(msg,l) + rc = f77_zmq_msg_copy_to_data(msg, buffer,l) + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + rc = f77_zmq_msg_close(msg) + sze = l + + do i=1,isize + write(buffer,'(E32.16)') value(i) + buffer = adjustl(trim(buffer)) + l = len(buffer) + sze += l + rc = f77_zmq_msg_init_size(msg,l) + rc = f77_zmq_msg_copy_to_data(msg, buffer,l) + if (i < isize) then + rc = f77_zmq_msg_send(msg,zmq_socket_push,ZMQ_SNDMORE) + else + rc = f77_zmq_msg_send(msg,zmq_socket_push,0) + endif + rc = f77_zmq_msg_close(msg) + enddo + + call worker_log(irp_here,'') +end + + + +subroutine get_running(do_run) + use f77_zmq + implicit none + include '../types.F' + BEGIN_DOC +! Fetches the 'do_run' information + END_DOC + integer :: do_run + integer :: rc, timeout + character*(16) :: buffer + integer(ZMQ_PTR), save :: pollitem = 0_ZMQ_PTR + + if (.not.is_worker) then + do_run = t_Running + return + else + + timeout = 5 ! seconds + + ! Polling items + ! ------------- + + if (pollitem == 0_ZMQ_PTR) then + pollitem = f77_zmq_pollitem_new(1) + rc = f77_zmq_pollitem_set_socket(pollitem,1,zmq_socket_running) + rc = f77_zmq_pollitem_set_events(pollitem,1,ZMQ_POLLIN) + endif + + ! Check for disconnected forwarder after timeout + ! ---------------------------------------------- + + buffer = 'Stopped' + do while (timeout > 0) + rc = f77_zmq_poll(pollitem, 1, 100_8) + if (iand(f77_zmq_pollitem_revents(pollitem,1), ZMQ_POLLIN) /= 0) then + exit + endif + timeout = timeout-1 + call sleep(1) + enddo + + ! Empty the queue to get only the last value + ! ------------------------------------------ + + do + rc = f77_zmq_poll(pollitem, 1, 0) + if (iand(f77_zmq_pollitem_revents(pollitem,1), ZMQ_POLLIN) == 0) then + exit + endif + rc = f77_zmq_recv(zmq_socket_running, buffer, 16, 0) + enddo + + if (buffer == 'Running') then + do_run = t_Running + else if (buffer == 'Queued') then + do_run = t_Running + else + do_run = t_Stopped + endif + call worker_log(irp_here,buffer) + + endif +end diff --git a/src/ZMQ/zmq_ezfio.irp.f b/src/ZMQ/zmq_ezfio.irp.f new file mode 100644 index 0000000..e239ea8 --- /dev/null +++ b/src/ZMQ/zmq_ezfio.irp.f @@ -0,0 +1,234 @@ + +subroutine zmq_ezfio_has(cmd_in,exists) + use f77_zmq + implicit none + character*(*) :: cmd_in + logical, intent(out) :: exists + BEGIN_DOC + ! ezfio_has through a ZMQ connexion + END_DOC + integer :: rc + character*(128) :: cmd + + cmd = 'has_'//cmd_in + rc = f77_zmq_send(zmq_to_dataserver_socket, 'Ezfio', 5, ZMQ_SNDMORE) + if (rc /= 5) then + print *, irp_here, rc + stop 1 + endif + rc = f77_zmq_send(zmq_to_dataserver_socket, cmd, len_trim(cmd), 0) + if (rc < 0) then + print *, irp_here, rc + stop 2 + endif + + character(len=8) :: buffer + integer :: buffer_size + character*(4) :: buffer_size_char + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer_size_char, & + len(buffer_size_char), ZMQ_SNDMORE) + read(buffer_size_char(1:rc),*) buffer_size + + logical :: w + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer, buffer_size, 0) + read( buffer(1:rc), *) w + + exists = w + +end + +subroutine zmq_ezfio_get_logical(cmd_in,w,d) + implicit none + use f77_zmq + BEGIN_DOC +! Fetch a logical variable in EZFIO using ZMQ + END_DOC + + integer :: d + logical :: w(d) + character*(*) :: cmd_in + character*(128) :: cmd + + character(len=:), allocatable :: buffer + integer :: buffer_size + character*(20) :: buffer_size_char + integer :: rc + + cmd = 'get_'//cmd_in + rc = f77_zmq_send(zmq_to_dataserver_socket, 'Ezfio', 5, ZMQ_SNDMORE) + rc = f77_zmq_send(zmq_to_dataserver_socket, cmd, len_trim(cmd), 0) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer_size_char, len(buffer_size_char), ZMQ_SNDMORE) + read(buffer_size_char(1:rc),*) buffer_size + allocate (character(len=buffer_size) :: buffer) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer, buffer_size, 0) + read( buffer(1:rc), *) w(1:d) + deallocate(buffer) +end + + + +subroutine zmq_ezfio_get_double_precision(cmd_in,w,d) + implicit none + use f77_zmq + BEGIN_DOC + ! Fetch a double precision variable + END_DOC + + integer :: d + double precision :: w(d) + character*(*) :: cmd_in + character*(128) :: cmd + + character(len=:), allocatable :: buffer + integer :: buffer_size + character*(20) :: buffer_size_char + integer :: rc + + cmd = 'get_'//cmd_in + rc = f77_zmq_send(zmq_to_dataserver_socket, 'Ezfio', 5, ZMQ_SNDMORE) + rc = f77_zmq_send(zmq_to_dataserver_socket, cmd, len_trim(cmd), 0) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer_size_char, len(buffer_size_char), ZMQ_SNDMORE) + read(buffer_size_char(1:rc),*) buffer_size + allocate (character(len=buffer_size) :: buffer) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer, buffer_size, 0) + read( buffer(1:rc), *) w(1:d) + + deallocate(buffer) +end + + +subroutine zmq_ezfio_get_integer(cmd_in,w,d) + implicit none + use f77_zmq + BEGIN_DOC + ! Fetch an integer variable + END_DOC + + integer :: d + integer :: w(d) + character*(*) :: cmd_in + character*(128) :: cmd + + character(len=:), allocatable :: buffer + integer :: buffer_size + character*(20) :: buffer_size_char + integer :: rc + + cmd = 'get_'//cmd_in + rc = f77_zmq_send(zmq_to_dataserver_socket, 'Ezfio', 5, ZMQ_SNDMORE) + rc = f77_zmq_send(zmq_to_dataserver_socket, cmd, len_trim(cmd), 0) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer_size_char, len(buffer_size_char), ZMQ_SNDMORE) + read(buffer_size_char(1:rc),*) buffer_size + allocate (character(len=buffer_size) :: buffer) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer, buffer_size, 0) + read( buffer(1:rc), *) w(1:d) + + deallocate(buffer) +end + + +subroutine zmq_ezfio_get_integer8(cmd_in,w,d) + implicit none + use f77_zmq + BEGIN_DOC + ! Fetch an integer*8 variable + END_DOC + + integer :: d + integer*8 :: w(d) + character*(*) :: cmd_in + character*(128) :: cmd + + character(len=:), allocatable :: buffer + integer :: buffer_size + character*(20) :: buffer_size_char + integer :: rc + + cmd = 'get_'//cmd_in + rc = f77_zmq_send(zmq_to_dataserver_socket, 'Ezfio', 5, ZMQ_SNDMORE) + rc = f77_zmq_send(zmq_to_dataserver_socket, cmd, len_trim(cmd), 0) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer_size_char, len(buffer_size_char), ZMQ_SNDMORE) + read(buffer_size_char(1:rc),*) buffer_size + allocate (character(len=buffer_size) :: buffer) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer, buffer_size, 0) + read( buffer(1:rc), *) w(1:d) + + deallocate(buffer) +end + + +subroutine zmq_ezfio_get_real(cmd_in,w,d) + implicit none + use f77_zmq + BEGIN_DOC + ! Fetch a real variable + END_DOC + + integer :: rc + + integer :: d + real :: w(d) + character*(*) :: cmd_in + character*(128) :: cmd + + character(len=:), allocatable :: buffer + integer :: buffer_size + character*(20) :: buffer_size_char + + cmd = 'get_'//cmd_in + rc = f77_zmq_send(zmq_to_dataserver_socket, 'Ezfio', 5, ZMQ_SNDMORE) + rc = f77_zmq_send(zmq_to_dataserver_socket, cmd, len_trim(cmd), 0) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer_size_char, len(buffer_size_char), ZMQ_SNDMORE) + read(buffer_size_char(1:rc),*) buffer_size + allocate (character(len=buffer_size) :: buffer) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer, buffer_size, 0) + read( buffer(1:rc), *) w(1:d) + + deallocate(buffer) +end + + +subroutine zmq_ezfio_get_character(cmd_in,w,d) + implicit none + use f77_zmq + BEGIN_DOC + ! Fetch a text variable + END_DOC + + integer :: rc + + integer :: d + character*(*) :: w + character*(*) :: cmd_in + character*(128) :: cmd + + character(len=:), allocatable :: buffer + integer :: buffer_size + character*(20) :: buffer_size_char + + cmd = 'get_'//cmd_in + rc = f77_zmq_send(zmq_to_dataserver_socket, 'Ezfio', 5, ZMQ_SNDMORE) + rc = f77_zmq_send(zmq_to_dataserver_socket, cmd, len_trim(cmd), 0) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer_size_char, len(buffer_size_char), ZMQ_SNDMORE) + read(buffer_size_char(1:rc),*) buffer_size + allocate (character(len=buffer_size) :: buffer) + + rc = f77_zmq_recv(zmq_to_dataserver_socket, buffer, buffer_size, 0) + read( buffer(1:rc), '(A)') w + + deallocate(buffer) +end + +