From 6513358da3907d80c863c45a45a85e4f732a4116 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Tue, 11 Oct 2022 11:03:10 +0200 Subject: [PATCH 01/10] merged with dev-tc --- src/ao_tc_eff_map/EZFIO.cfg | 12 +- src/ao_tc_eff_map/compute_ints_eff_pot.irp.f | 22 +- src/ao_tc_eff_map/j1b_1eInteg.py | 299 ------ src/ao_tc_eff_map/j1b_pen.irp.f | 71 +- src/ao_tc_eff_map/map_integrals_eff_pot.irp.f | 84 +- src/ao_tc_eff_map/one_e_1bgauss_grad2.irp.f | 332 +++++++ src/ao_tc_eff_map/one_e_1bgauss_hermit.irp.f | 519 ----------- src/ao_tc_eff_map/one_e_1bgauss_lap.irp.f | 303 +++++++ src/ao_tc_eff_map/one_e_1bgauss_nonherm.irp.f | 142 ++- src/ao_tc_eff_map/two_e_1bgauss_coul.irp.f | 800 ---------------- .../two_e_1bgauss_coul_acc.irp.f | 433 --------- .../two_e_1bgauss_coul_debug.irp.f | 397 -------- .../two_e_1bgauss_coul_modifdebug.irp.f | 324 ------- src/ao_tc_eff_map/two_e_1bgauss_coulerf.irp.f | 102 --- src/ao_tc_eff_map/two_e_1bgauss_erf.irp.f | 854 ------------------ src/ao_tc_eff_map/two_e_1bgauss_erf_acc.irp.f | 433 --------- ..._schwartz.irp.f => two_e_1bgauss_j1.irp.f} | 148 ++- src/ao_tc_eff_map/two_e_1bgauss_j2.irp.f | 729 +++++++++++++++ src/ao_tc_eff_map/useful_sub.irp.f | 364 ++++++++ src/bi_ort_ints/one_e_bi_ort.irp.f | 4 +- 20 files changed, 2089 insertions(+), 4283 deletions(-) delete mode 100644 src/ao_tc_eff_map/j1b_1eInteg.py create mode 100644 src/ao_tc_eff_map/one_e_1bgauss_grad2.irp.f delete mode 100644 src/ao_tc_eff_map/one_e_1bgauss_hermit.irp.f create mode 100644 src/ao_tc_eff_map/one_e_1bgauss_lap.irp.f delete mode 100644 src/ao_tc_eff_map/two_e_1bgauss_coul.irp.f delete mode 100644 src/ao_tc_eff_map/two_e_1bgauss_coul_acc.irp.f delete mode 100644 src/ao_tc_eff_map/two_e_1bgauss_coul_debug.irp.f delete mode 100644 src/ao_tc_eff_map/two_e_1bgauss_coul_modifdebug.irp.f delete mode 100644 src/ao_tc_eff_map/two_e_1bgauss_coulerf.irp.f delete mode 100644 src/ao_tc_eff_map/two_e_1bgauss_erf.irp.f delete mode 100644 src/ao_tc_eff_map/two_e_1bgauss_erf_acc.irp.f rename src/ao_tc_eff_map/{two_e_1bgauss_coulerf_schwartz.irp.f => two_e_1bgauss_j1.irp.f} (86%) create mode 100644 src/ao_tc_eff_map/two_e_1bgauss_j2.irp.f create mode 100644 src/ao_tc_eff_map/useful_sub.irp.f diff --git a/src/ao_tc_eff_map/EZFIO.cfg b/src/ao_tc_eff_map/EZFIO.cfg index 1c72e2f5..1df2a130 100644 --- a/src/ao_tc_eff_map/EZFIO.cfg +++ b/src/ao_tc_eff_map/EZFIO.cfg @@ -1,12 +1,18 @@ -[j1b_gauss_pen] +[j1b_pen] type: double precision doc: exponents of the 1-body Jastrow interface: ezfio size: (nuclei.nucl_num) -[j1b_gauss] +[j1b_coeff] +type: double precision +doc: coeff of the 1-body Jastrow +interface: ezfio +size: (nuclei.nucl_num) + +[j1b_type] type: integer -doc: Use 1-body Gaussian Jastrow +doc: type of 1-body Jastrow interface: ezfio, provider, ocaml default: 0 diff --git a/src/ao_tc_eff_map/compute_ints_eff_pot.irp.f b/src/ao_tc_eff_map/compute_ints_eff_pot.irp.f index 6196f56e..2e7e21c0 100644 --- a/src/ao_tc_eff_map/compute_ints_eff_pot.irp.f +++ b/src/ao_tc_eff_map/compute_ints_eff_pot.irp.f @@ -1,9 +1,11 @@ + + subroutine compute_ao_tc_sym_two_e_pot_jl(j, l, n_integrals, buffer_i, buffer_value) use map_module BEGIN_DOC - ! Parallel client for AO integrals of the TC integrals involving purely hermitian operators + ! Parallel client for AO integrals END_DOC implicit none @@ -21,13 +23,10 @@ subroutine compute_ao_tc_sym_two_e_pot_jl(j, l, n_integrals, buffer_i, buffer_va logical, external :: ao_two_e_integral_zero double precision :: ao_tc_sym_two_e_pot, ao_two_e_integral_erf - double precision :: j1b_gauss_erf, j1b_gauss_coul - double precision :: j1b_gauss_coul_debug - double precision :: j1b_gauss_coul_modifdebug - double precision :: j1b_gauss_coulerf + double precision :: j1b_gauss_2e_j1, j1b_gauss_2e_j2 - PROVIDE j1b_gauss + PROVIDE j1b_type thr = ao_integrals_threshold @@ -45,7 +44,7 @@ subroutine compute_ao_tc_sym_two_e_pot_jl(j, l, n_integrals, buffer_i, buffer_va exit endif - if (ao_two_e_integral_erf_schwartz(i,k)*ao_two_e_integral_erf_schwartz(j,l) < thr ) then + if (ao_two_e_integral_erf_schwartz(i,k)*ao_two_e_integral_erf_schwartz(j,l) < thr) then cycle endif @@ -54,9 +53,12 @@ subroutine compute_ao_tc_sym_two_e_pot_jl(j, l, n_integrals, buffer_i, buffer_va integral_erf = ao_two_e_integral_erf(i, k, j, l) integral = integral_erf + integral_pot - if( j1b_gauss .eq. 1 ) then - integral = integral & - + j1b_gauss_coulerf(i, k, j, l) + if( j1b_type .eq. 1 ) then + !print *, ' j1b type 1 is added' + integral = integral + j1b_gauss_2e_j1(i, k, j, l) + elseif( j1b_type .eq. 2 ) then + !print *, ' j1b type 2 is added' + integral = integral + j1b_gauss_2e_j2(i, k, j, l) endif diff --git a/src/ao_tc_eff_map/j1b_1eInteg.py b/src/ao_tc_eff_map/j1b_1eInteg.py deleted file mode 100644 index 53fb1a41..00000000 --- a/src/ao_tc_eff_map/j1b_1eInteg.py +++ /dev/null @@ -1,299 +0,0 @@ -import sys, os -QP_PATH=os.environ["QP_EZFIO"] -sys.path.insert(0,QP_PATH+"/Python/") -from ezfio import ezfio -from datetime import datetime -import time -from math import exp, sqrt, pi -import numpy as np -import subprocess -from scipy.integrate import tplquad -import multiprocessing -from multiprocessing import Pool - - -# _____________________________________________________________________________ -# -def read_ao(): - - with open('ao_data') as f: - lines = f.readlines() - - ao_prim_num = np.zeros((ao_num), dtype=int) - ao_nucl = np.zeros((ao_num), dtype=int) - ao_power = np.zeros((ao_num, 3)) - nucl_coord = np.zeros((ao_num, 3)) - ao_expo = np.zeros((ao_num, ao_num)) - ao_coef = np.zeros((ao_num, ao_num)) - - iline = 0 - for j in range(ao_num): - - line = lines[iline] - iline += 1 - ao_nucl[j] = int(line) - 1 - - line = lines[iline].split() - iline += 1 - ao_power[j, 0] = float(line[0]) - ao_power[j, 1] = float(line[1]) - ao_power[j, 2] = float(line[2]) - - line = lines[iline].split() - iline += 1 - nucl_coord[ao_nucl[j], 0] = float(line[0]) - nucl_coord[ao_nucl[j], 1] = float(line[1]) - nucl_coord[ao_nucl[j], 2] = float(line[2]) - - line = lines[iline] - iline += 1 - ao_prim_num[j] = int(line) - - for l in range(ao_prim_num[j]): - - line = lines[iline].split() - iline += 1 - ao_expo[l, j] = float(line[0]) - ao_coef[l, j] = float(line[1]) - - return( ao_prim_num - , ao_nucl - , ao_power - , nucl_coord - , ao_expo - , ao_coef ) -# _____________________________________________________________________________ - - -# _____________________________________________________________________________ -# -def Gao(X, i_ao): - - ii = ao_nucl[i_ao] - C = np.array([nucl_coord[ii,0], nucl_coord[ii,1], nucl_coord[ii,2]]) - Y = X - C - dis = np.dot(Y,Y) - - ip = np.array([ao_power[i_ao,0], ao_power[i_ao,1], ao_power[i_ao,2]]) - pol = np.prod(Y**ip) - - xi = np.sum( ao_coef[:,i_ao] * np.exp(-dis*ao_expo[:,i_ao]) ) - - return(xi*pol) -# _____________________________________________________________________________ - - -# _____________________________________________________________________________ -# -def grad_Gao(X, i_ao): - - ii = ao_nucl[i_ao] - C = np.array([nucl_coord[ii,0], nucl_coord[ii,1], nucl_coord[ii,2]]) - - ix = ao_power[i_ao,0] - iy = ao_power[i_ao,1] - iz = ao_power[i_ao,2] - - Y = X - C - dis = np.dot(Y,Y) - - xm = np.sum( ao_coef[:,i_ao]*np.exp(-dis*ao_expo[:,i_ao])) - xp = np.sum(ao_expo[:,i_ao]*ao_coef[:,i_ao]*np.exp(-dis*ao_expo[:,i_ao])) - - ip = np.array([ix+1, iy, iz]) - dx = -2. * np.prod(Y**ip) * xp - if(ix > 0): - ip = np.array([ix-1, iy, iz]) - dx += ix * np.prod(Y**ip) * xm - - ip = np.array([ix, iy+1, iz]) - dy = -2. * np.prod(Y**ip) * xp - if(iy > 0): - ip = np.array([ix, iy-1, iz]) - dy += iy * np.prod(Y**ip) * xm - - ip = np.array([ix, iy, iz+1]) - dz = -2. * np.prod(Y**ip) * xp - if(iz > 0): - ip = np.array([ix, iy, iz-1]) - dz += iz * np.prod(Y**ip) * xm - - return(np.array([dx, dy, dz])) -# _____________________________________________________________________________ - - -# _____________________________________________________________________________ -# -# 3 x < XA | exp[-gama r_C^2] | XB > -# - 2 x < XA | r_A^2 exp[-gama r_C^2] | XB > -# -def integ_lap(z, y, x, i_ao, j_ao): - - X = np.array([x, y, z]) - - Gi = Gao(X, i_ao) - Gj = Gao(X, j_ao) - - c = 0. - for k in range(nucl_num): - gama = j1b_gauss_pen[k] - C = nucl_coord[k,:] - Y = X - C - dis = np.dot(Y, Y) - arg = exp(-gama*dis) - arg = exp(-gama*dis) - c += ( 3. - 2. * dis * gama ) * arg * gama * Gi * Gj - - return(c) -# _____________________________________________________________________________ - - -# _____________________________________________________________________________ -# -# -def integ_grad2(z, y, x, i_ao, j_ao): - - X = np.array([x, y, z]) - - Gi = Gao(X, i_ao) - Gj = Gao(X, j_ao) - - c = np.zeros((3)) - for k in range(nucl_num): - gama = j1b_gauss_pen[k] - C = nucl_coord[k,:] - Y = X - C - c += gama * exp(-gama*np.dot(Y, Y)) * Y - - return(-2*np.dot(c,c)*Gi*Gj) -# _____________________________________________________________________________ - - -# _____________________________________________________________________________ -# -# -def integ_nonh(z, y, x, i_ao, j_ao): - - X = np.array([x, y, z]) - - Gi = Gao(X, i_ao) - - c = 0. - for k in range(nucl_num): - gama = j1b_gauss_pen[k] - C = nucl_coord[k,:] - Y = X - C - grad = grad_Gao(X, j_ao) - c += gama * exp(-gama*np.dot(Y,Y)) * np.dot(Y,grad) - - return(2*c*Gi) -# _____________________________________________________________________________ - - -# _____________________________________________________________________________ -# -def perform_integ( ind_ao ): - - i_ao = ind_ao[0] - j_ao = ind_ao[1] - - a = -15. #-np.Inf - b = +15. #+np.Inf - epsrel = 1e-5 - - res_lap, err_lap = tplquad( integ_lap - , a, b - , lambda x : a, lambda x : b - , lambda x,y: a, lambda x,y: b - , (i_ao, j_ao) - , epsrel=epsrel ) - - res_grd, err_grd = tplquad( integ_grad2 - , a, b - , lambda x : a, lambda x : b - , lambda x,y: a, lambda x,y: b - , (i_ao, j_ao) - , epsrel=epsrel ) - - res_nnh, err_nnh = tplquad( integ_nonh - , a, b - , lambda x : a, lambda x : b - , lambda x,y: a, lambda x,y: b - , (i_ao, j_ao) - , epsrel=epsrel ) - - return( [ res_lap, err_lap - , res_grd, err_grd - , res_nnh, err_nnh ]) -# _____________________________________________________________________________ - - -# _____________________________________________________________________________ -# -def integ_eval(): - - list_ind = [] - for i_ao in range(ao_num): - for j_ao in range(ao_num): - list_ind.append( [i_ao, j_ao] ) - - nb_proc = multiprocessing.cpu_count() - print(" --- Excexution with {} processors ---\n".format(nb_proc)) - - p = Pool(nb_proc) - res = np.array( p.map( perform_integ, list_ind ) ) - - ii = 0 - for i_ao in range(ao_num): - for j_ao in range(ao_num): - print(" {} {} {:+e} {:+e} {:+e} {:+e}".format( i_ao, j_ao - , res[ii][0], res[ii][1], res[ii][2], res[ii][3]) ) - ii += 1 - - p.close() -# _____________________________________________________________________________ - - - -# _____________________________________________________________________________ -# -if __name__=="__main__": - - t0 = time.time() - - EZFIO_file = sys.argv[1] - ezfio.set_file(EZFIO_file) - - print(" Today's date:", datetime.now() ) - print(" EZFIO file = {}".format(EZFIO_file)) - - nucl_num = ezfio.get_nuclei_nucl_num() - ao_num = ezfio.get_ao_basis_ao_num() - j1b_gauss_pen = ezfio.get_ao_tc_eff_map_j1b_gauss_pen() - - ao_prim_num, ao_nucl, ao_power, nucl_coord, ao_expo, ao_coef = read_ao() - - #integ_eval() - - i_ao = 0 - j_ao = 0 - - a = -5. - b = +5. - epsrel = 1e-1 - res_grd, err_grd = tplquad( integ_nonh - , a, b - , lambda x : a, lambda x : b - , lambda x,y: a, lambda x,y: b - , (i_ao, j_ao) - , epsrel=epsrel ) - - print(res_grd, err_grd) - - - tf = time.time() - t0 - print(' end after {} min'.format(tf/60.)) -# _____________________________________________________________________________ - - - diff --git a/src/ao_tc_eff_map/j1b_pen.irp.f b/src/ao_tc_eff_map/j1b_pen.irp.f index 9587cfe2..f829108b 100644 --- a/src/ao_tc_eff_map/j1b_pen.irp.f +++ b/src/ao_tc_eff_map/j1b_pen.irp.f @@ -1,7 +1,7 @@ ! --- -BEGIN_PROVIDER [ double precision, j1b_gauss_pen, (nucl_num) ] +BEGIN_PROVIDER [ double precision, j1b_pen, (nucl_num) ] BEGIN_DOC ! exponents of the 1-body Jastrow @@ -13,7 +13,7 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_pen, (nucl_num) ] PROVIDE ezfio_filename if (mpi_master) then - call ezfio_has_ao_tc_eff_map_j1b_gauss_pen(exists) + call ezfio_has_ao_tc_eff_map_j1b_pen(exists) endif IRP_IF MPI_DEBUG @@ -24,21 +24,21 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_pen, (nucl_num) ] IRP_IF MPI include 'mpif.h' integer :: ierr - call MPI_BCAST(j1b_gauss_pen, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + call MPI_BCAST(j1b_pen, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) if (ierr /= MPI_SUCCESS) then - stop 'Unable to read j1b_gauss_pen with MPI' + stop 'Unable to read j1b_pen with MPI' endif IRP_ENDIF if (exists) then if (mpi_master) then - write(6,'(A)') '.. >>>>> [ IO READ: j1b_gauss_pen ] <<<<< ..' - call ezfio_get_ao_tc_eff_map_j1b_gauss_pen(j1b_gauss_pen) + write(6,'(A)') '.. >>>>> [ IO READ: j1b_pen ] <<<<< ..' + call ezfio_get_ao_tc_eff_map_j1b_pen(j1b_pen) IRP_IF MPI - call MPI_BCAST(j1b_gauss_pen, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + call MPI_BCAST(j1b_pen, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) if (ierr /= MPI_SUCCESS) then - stop 'Unable to read j1b_gauss_pen with MPI' + stop 'Unable to read j1b_pen with MPI' endif IRP_ENDIF endif @@ -47,7 +47,7 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_pen, (nucl_num) ] integer :: i do i = 1, nucl_num - j1b_gauss_pen(i) = 1d5 + j1b_pen(i) = 1d5 enddo endif @@ -56,4 +56,57 @@ END_PROVIDER ! --- +BEGIN_PROVIDER [ double precision, j1b_coeff, (nucl_num) ] + BEGIN_DOC + ! coefficients of the 1-body Jastrow + END_DOC + + implicit none + logical :: exists + + PROVIDE ezfio_filename + + if (mpi_master) then + call ezfio_has_ao_tc_eff_map_j1b_coeff(exists) + endif + + IRP_IF MPI_DEBUG + print *, irp_here, mpi_rank + call MPI_BARRIER(MPI_COMM_WORLD, ierr) + IRP_ENDIF + + IRP_IF MPI + include 'mpif.h' + integer :: ierr + call MPI_BCAST(j1b_coeff, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read j1b_coeff with MPI' + endif + IRP_ENDIF + + if (exists) then + + if (mpi_master) then + write(6,'(A)') '.. >>>>> [ IO READ: j1b_coeff ] <<<<< ..' + call ezfio_get_ao_tc_eff_map_j1b_coeff(j1b_coeff) + IRP_IF MPI + call MPI_BCAST(j1b_coeff, (nucl_num), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr) + if (ierr /= MPI_SUCCESS) then + stop 'Unable to read j1b_coeff with MPI' + endif + IRP_ENDIF + endif + + else + + integer :: i + do i = 1, nucl_num + j1b_coeff(i) = 0d5 + enddo + + endif + +END_PROVIDER + +! --- diff --git a/src/ao_tc_eff_map/map_integrals_eff_pot.irp.f b/src/ao_tc_eff_map/map_integrals_eff_pot.irp.f index 4a6128b9..95dc664d 100644 --- a/src/ao_tc_eff_map/map_integrals_eff_pot.irp.f +++ b/src/ao_tc_eff_map/map_integrals_eff_pot.irp.f @@ -27,42 +27,52 @@ END_PROVIDER END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, ao_tc_sym_two_e_pot_cache, (0:64*64*64*64) ] + use map_module - implicit none - BEGIN_DOC - ! Cache of |AO| integrals for fast access - END_DOC - PROVIDE ao_tc_sym_two_e_pot_in_map - integer :: i,j,k,l,ii - integer(key_kind) :: idx - real(integral_kind) :: integral + implicit none + + BEGIN_DOC + ! Cache of |AO| integrals for fast access + END_DOC + + integer :: i,j,k,l,ii + integer(key_kind) :: idx + real(integral_kind) :: integral + + PROVIDE ao_tc_sym_two_e_pot_in_map + !$OMP PARALLEL DO PRIVATE (i,j,k,l,idx,ii,integral) - do l=ao_tc_sym_two_e_pot_cache_min,ao_tc_sym_two_e_pot_cache_max - do k=ao_tc_sym_two_e_pot_cache_min,ao_tc_sym_two_e_pot_cache_max - do j=ao_tc_sym_two_e_pot_cache_min,ao_tc_sym_two_e_pot_cache_max - do i=ao_tc_sym_two_e_pot_cache_min,ao_tc_sym_two_e_pot_cache_max - !DIR$ FORCEINLINE - call two_e_integrals_index(i,j,k,l,idx) - !DIR$ FORCEINLINE - call map_get(ao_tc_sym_two_e_pot_map,idx,integral) - ii = l-ao_tc_sym_two_e_pot_cache_min - ii = ior( ishft(ii,6), k-ao_tc_sym_two_e_pot_cache_min) - ii = ior( ishft(ii,6), j-ao_tc_sym_two_e_pot_cache_min) - ii = ior( ishft(ii,6), i-ao_tc_sym_two_e_pot_cache_min) - ao_tc_sym_two_e_pot_cache(ii) = integral - enddo - enddo - enddo - enddo - !$OMP END PARALLEL DO + do l = ao_tc_sym_two_e_pot_cache_min, ao_tc_sym_two_e_pot_cache_max + do k = ao_tc_sym_two_e_pot_cache_min, ao_tc_sym_two_e_pot_cache_max + do j = ao_tc_sym_two_e_pot_cache_min, ao_tc_sym_two_e_pot_cache_max + do i = ao_tc_sym_two_e_pot_cache_min, ao_tc_sym_two_e_pot_cache_max + !DIR$ FORCEINLINE + call two_e_integrals_index(i, j, k, l, idx) + !DIR$ FORCEINLINE + call map_get(ao_tc_sym_two_e_pot_map, idx, integral) + ii = l-ao_tc_sym_two_e_pot_cache_min + ii = ior( ishft(ii,6), k-ao_tc_sym_two_e_pot_cache_min) + ii = ior( ishft(ii,6), j-ao_tc_sym_two_e_pot_cache_min) + ii = ior( ishft(ii,6), i-ao_tc_sym_two_e_pot_cache_min) + ao_tc_sym_two_e_pot_cache(ii) = integral + enddo + enddo + enddo + enddo + !$OMP END PARALLEL DO END_PROVIDER +! --- + +subroutine insert_into_ao_tc_sym_two_e_pot_map(n_integrals, buffer_i, buffer_values) -subroutine insert_into_ao_tc_sym_two_e_pot_map(n_integrals,buffer_i, buffer_values) use map_module implicit none + BEGIN_DOC ! Create new entry into |AO| map END_DOC @@ -72,21 +82,30 @@ subroutine insert_into_ao_tc_sym_two_e_pot_map(n_integrals,buffer_i, buffer_valu real(integral_kind), intent(inout) :: buffer_values(n_integrals) call map_append(ao_tc_sym_two_e_pot_map, buffer_i, buffer_values, n_integrals) + end -double precision function get_ao_tc_sym_two_e_pot(i,j,k,l,map) result(result) +! --- + +double precision function get_ao_tc_sym_two_e_pot(i, j, k, l, map) result(result) + use map_module + implicit none + BEGIN_DOC - ! Gets one |AO| two-electron integral from the |AO| map in PHYSICIST NOTATION + ! Gets one |AO| two-electron integral from the |AO| map END_DOC + integer, intent(in) :: i,j,k,l integer(key_kind) :: idx type(map_type), intent(inout) :: map integer :: ii real(integral_kind) :: tmp logical, external :: ao_two_e_integral_zero + PROVIDE ao_tc_sym_two_e_pot_in_map ao_tc_sym_two_e_pot_cache ao_tc_sym_two_e_pot_cache_min + !DIR$ FORCEINLINE ! if (ao_two_e_integral_zero(i,j,k,l)) then if (.False.) then @@ -100,9 +119,9 @@ double precision function get_ao_tc_sym_two_e_pot(i,j,k,l,map) result(result) ii = ior(ii, i-ao_tc_sym_two_e_pot_cache_min) if (iand(ii, -64) /= 0) then !DIR$ FORCEINLINE - call two_e_integrals_index(i,j,k,l,idx) + call two_e_integrals_index(i, j, k, l, idx) !DIR$ FORCEINLINE - call map_get(map,idx,tmp) + call map_get(map, idx, tmp) tmp = tmp else ii = l-ao_tc_sym_two_e_pot_cache_min @@ -112,9 +131,12 @@ double precision function get_ao_tc_sym_two_e_pot(i,j,k,l,map) result(result) tmp = ao_tc_sym_two_e_pot_cache(ii) endif endif + result = tmp + end +! --- subroutine get_many_ao_tc_sym_two_e_pot(j,k,l,sze,out_val) use map_module diff --git a/src/ao_tc_eff_map/one_e_1bgauss_grad2.irp.f b/src/ao_tc_eff_map/one_e_1bgauss_grad2.irp.f new file mode 100644 index 00000000..50c396de --- /dev/null +++ b/src/ao_tc_eff_map/one_e_1bgauss_grad2.irp.f @@ -0,0 +1,332 @@ +! --- + +BEGIN_PROVIDER [ double precision, j1b_gauss_hermII, (ao_num,ao_num)] + + BEGIN_DOC + ! + ! :math:`\langle \chi_A | -0.5 \grad \tau_{1b} \cdot \grad \tau_{1b} | \chi_B \rangle` + ! + END_DOC + + implicit none + + integer :: num_A, num_B + integer :: power_A(3), power_B(3) + integer :: i, j, k1, k2, l, m + double precision :: alpha, beta, gama1, gama2, coef1, coef2 + double precision :: A_center(3), B_center(3), C_center1(3), C_center2(3) + double precision :: c1, c + + integer :: dim1 + double precision :: overlap_y, d_a_2, overlap_z, overlap + + double precision :: int_gauss_4G + + PROVIDE j1b_type j1b_pen j1b_coeff + + ! -------------------------------------------------------------------------------- + ! -- Dummy call to provide everything + dim1 = 100 + A_center(:) = 0.d0 + B_center(:) = 1.d0 + alpha = 1.d0 + beta = 0.1d0 + power_A(:) = 1 + power_B(:) = 0 + call overlap_gaussian_xyz( A_center, B_center, alpha, beta, power_A, power_B & + , overlap_y, d_a_2, overlap_z, overlap, dim1 ) + ! -------------------------------------------------------------------------------- + + + j1b_gauss_hermII(1:ao_num,1:ao_num) = 0.d0 + + if(j1b_type .eq. 1) then + ! \tau_1b = \sum_iA -[1 - exp(-alpha_A r_iA^2)] + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k1, k2, l, m, alpha, beta, gama1, gama2, & + !$OMP A_center, B_center, C_center1, C_center2, & + !$OMP power_A, power_B, num_A, num_B, c1, c) & + !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & + !$OMP ao_power, ao_nucl, nucl_coord, & + !$OMP ao_coef_normalized_ordered_transp, & + !$OMP nucl_num, j1b_pen, j1b_gauss_hermII) + !$OMP DO SCHEDULE (dynamic) + do j = 1, ao_num + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + do i = 1, ao_num + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + + c = 0.d0 + do k1 = 1, nucl_num + gama1 = j1b_pen(k1) + C_center1(1:3) = nucl_coord(k1,1:3) + + do k2 = 1, nucl_num + gama2 = j1b_pen(k2) + C_center2(1:3) = nucl_coord(k2,1:3) + + ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] r_C1 \cdot r_C2 | XB > + c1 = int_gauss_4G( A_center, B_center, C_center1, C_center2 & + , power_A, power_B, alpha, beta, gama1, gama2 ) + + c = c - 2.d0 * gama1 * gama2 * c1 + enddo + enddo + + j1b_gauss_hermII(i,j) = j1b_gauss_hermII(i,j) & + + ao_coef_normalized_ordered_transp(l,j) & + * ao_coef_normalized_ordered_transp(m,i) * c + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + elseif(j1b_type .eq. 2) then + ! \tau_1b = \sum_iA [c_A exp(-alpha_A r_iA^2)] + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k1, k2, l, m, alpha, beta, gama1, gama2, & + !$OMP A_center, B_center, C_center1, C_center2, & + !$OMP power_A, power_B, num_A, num_B, c1, c, & + !$OMP coef1, coef2) & + !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & + !$OMP ao_power, ao_nucl, nucl_coord, & + !$OMP ao_coef_normalized_ordered_transp, & + !$OMP nucl_num, j1b_pen, j1b_gauss_hermII, & + !$OMP j1b_coeff) + !$OMP DO SCHEDULE (dynamic) + do j = 1, ao_num + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + do i = 1, ao_num + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + + c = 0.d0 + do k1 = 1, nucl_num + gama1 = j1b_pen (k1) + coef1 = j1b_coeff(k1) + C_center1(1:3) = nucl_coord(k1,1:3) + + do k2 = 1, nucl_num + gama2 = j1b_pen (k2) + coef2 = j1b_coeff(k2) + C_center2(1:3) = nucl_coord(k2,1:3) + + ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] r_C1 \cdot r_C2 | XB > + c1 = int_gauss_4G( A_center, B_center, C_center1, C_center2 & + , power_A, power_B, alpha, beta, gama1, gama2 ) + + c = c - 2.d0 * gama1 * gama2 * coef1 * coef2 * c1 + enddo + enddo + + j1b_gauss_hermII(i,j) = j1b_gauss_hermII(i,j) & + + ao_coef_normalized_ordered_transp(l,j) & + * ao_coef_normalized_ordered_transp(m,i) * c + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + endif + +END_PROVIDER + + + + + +!_____________________________________________________________________________________________________________ +! +! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] r_C1 \cdot r_C2 | XB > +! +double precision function int_gauss_4G( A_center, B_center, C_center1, C_center2, power_A, power_B & + , alpha, beta, gama1, gama2 ) + + ! for max_dim + include 'constants.include.F' + + implicit none + + integer , intent(in) :: power_A(3), power_B(3) + double precision, intent(in) :: A_center(3), B_center(3), C_center1(3), C_center2(3) + double precision, intent(in) :: alpha, beta, gama1, gama2 + + integer :: i, dim1, power_C + integer :: iorder(3) + double precision :: AB_expo, fact_AB, AB_center(3), P_AB(0:max_dim,3) + double precision :: gama, fact_C, C_center(3) + double precision :: cx0, cy0, cz0, c_tmp1, c_tmp2, cx, cy, cz + double precision :: int_tmp + + double precision :: overlap_gaussian_x + + dim1 = 100 + + ! P_AB(0:max_dim,3) polynomial + ! AB_center(3) new center + ! AB_expo new exponent + ! fact_AB constant factor + ! iorder(3) i_order(i) = order of the polynomials + call give_explicit_poly_and_gaussian( P_AB, AB_center, AB_expo, fact_AB & + , iorder, alpha, beta, power_A, power_B, A_center, B_center, dim1) + + call gaussian_product(gama1, C_center1, gama2, C_center2, fact_C, gama, C_center) + + ! <<< + ! to avoid multi-evaluation + power_C = 0 + + cx0 = 0.d0 + do i = 0, iorder(1) + cx0 = cx0 + P_AB(i,1) * overlap_gaussian_x( AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) + enddo + cy0 = 0.d0 + do i = 0, iorder(2) + cy0 = cy0 + P_AB(i,2) * overlap_gaussian_x( AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) + enddo + cz0 = 0.d0 + do i = 0, iorder(3) + cz0 = cz0 + P_AB(i,3) * overlap_gaussian_x( AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) + enddo + ! >>> + + int_tmp = 0.d0 + + ! ----------------------------------------------------------------------------------------------- + ! + ! x term: + ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] (x - x_C1) (x - x_C2) | XB > + ! + + c_tmp1 = 2.d0 * C_center(1) - C_center1(1) - C_center2(1) + c_tmp2 = ( C_center(1) - C_center1(1) ) * ( C_center(1) - C_center2(1) ) + + cx = 0.d0 + do i = 0, iorder(1) + + ! < XA | exp[-gama r_C^2] (x - x_C)^2 | XB > + power_C = 2 + cx = cx + P_AB(i,1) & + * overlap_gaussian_x( AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) + + ! < XA | exp[-gama r_C^2] (x - x_C) | XB > + power_C = 1 + cx = cx + P_AB(i,1) * c_tmp1 & + * overlap_gaussian_x( AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) + + ! < XA | exp[-gama r_C^2] | XB > + power_C = 0 + cx = cx + P_AB(i,1) * c_tmp2 & + * overlap_gaussian_x( AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) + + enddo + + int_tmp += cx * cy0 * cz0 + + ! ----------------------------------------------------------------------------------------------- + + + ! ----------------------------------------------------------------------------------------------- + ! + ! y term: + ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] (y - y_C1) (y - y_C2) | XB > + ! + + c_tmp1 = 2.d0 * C_center(2) - C_center1(2) - C_center2(2) + c_tmp2 = ( C_center(2) - C_center1(2) ) * ( C_center(2) - C_center2(2) ) + + cy = 0.d0 + do i = 0, iorder(2) + + ! < XA | exp[-gama r_C^2] (y - y_C)^2 | XB > + power_C = 2 + cy = cy + P_AB(i,2) & + * overlap_gaussian_x( AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) + + ! < XA | exp[-gama r_C^2] (y - y_C) | XB > + power_C = 1 + cy = cy + P_AB(i,2) * c_tmp1 & + * overlap_gaussian_x( AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) + + ! < XA | exp[-gama r_C^2] | XB > + power_C = 0 + cy = cy + P_AB(i,2) * c_tmp2 & + * overlap_gaussian_x( AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) + + enddo + + int_tmp += cx0 * cy * cz0 + + ! ----------------------------------------------------------------------------------------------- + + + ! ----------------------------------------------------------------------------------------------- + ! + ! z term: + ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] (z - z_C1) (z - z_C2) | XB > + ! + + c_tmp1 = 2.d0 * C_center(3) - C_center1(3) - C_center2(3) + c_tmp2 = ( C_center(3) - C_center1(3) ) * ( C_center(3) - C_center2(3) ) + + cz = 0.d0 + do i = 0, iorder(3) + + ! < XA | exp[-gama r_C^2] (z - z_C)^2 | XB > + power_C = 2 + cz = cz + P_AB(i,3) & + * overlap_gaussian_x( AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) + + ! < XA | exp[-gama r_C^2] (z - z_C) | XB > + power_C = 1 + cz = cz + P_AB(i,3) * c_tmp1 & + * overlap_gaussian_x( AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) + + ! < XA | exp[-gama r_C^2] | XB > + power_C = 0 + cz = cz + P_AB(i,3) * c_tmp2 & + * overlap_gaussian_x( AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) + + enddo + + int_tmp += cx0 * cy0 * cz + + ! ----------------------------------------------------------------------------------------------- + + int_gauss_4G = fact_AB * fact_C * int_tmp + + return +end function int_gauss_4G +!_____________________________________________________________________________________________________________ +!_____________________________________________________________________________________________________________ + + diff --git a/src/ao_tc_eff_map/one_e_1bgauss_hermit.irp.f b/src/ao_tc_eff_map/one_e_1bgauss_hermit.irp.f deleted file mode 100644 index 21b6ed83..00000000 --- a/src/ao_tc_eff_map/one_e_1bgauss_hermit.irp.f +++ /dev/null @@ -1,519 +0,0 @@ - -BEGIN_PROVIDER [ double precision, j1b_gauss_hermII, (ao_num,ao_num)] - - BEGIN_DOC - ! - ! Hermitian part of 1-body Jastrow factow in the |AO| basis set. - ! - ! :math:`\langle \chi_A | -0.5 \grad \tau_{1b} \cdot \grad \tau_{1b} | \chi_B \rangle` - ! - END_DOC - - implicit none - - integer :: num_A, num_B - integer :: power_A(3), power_B(3) - integer :: i, j, k1, k2, l, m - double precision :: alpha, beta, gama1, gama2 - double precision :: A_center(3), B_center(3), C_center1(3), C_center2(3) - double precision :: c1, c - - integer :: dim1 - double precision :: overlap_y, d_a_2, overlap_z, overlap - - double precision :: int_gauss_4G - - PROVIDE j1b_gauss_pen - - ! -------------------------------------------------------------------------------- - ! -- Dummy call to provide everything - dim1 = 100 - A_center(:) = 0.d0 - B_center(:) = 1.d0 - alpha = 1.d0 - beta = 0.1d0 - power_A(:) = 1 - power_B(:) = 0 - call overlap_gaussian_xyz( A_center, B_center, alpha, beta, power_A, power_B & - , overlap_y, d_a_2, overlap_z, overlap, dim1 ) - ! -------------------------------------------------------------------------------- - - - j1b_gauss_hermII(1:ao_num,1:ao_num) = 0.d0 - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, k1, k2, l, m, alpha, beta, gama1, gama2, & - !$OMP A_center, B_center, C_center1, C_center2, & - !$OMP power_A, power_B, num_A, num_B, c1, c) & - !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & - !$OMP ao_power, ao_nucl, nucl_coord, & - !$OMP ao_coef_normalized_ordered_transp, & - !$OMP nucl_num, j1b_gauss_pen, j1b_gauss_hermII) - - !$OMP DO SCHEDULE (dynamic) - - do j = 1, ao_num - - num_A = ao_nucl(j) - power_A(1:3) = ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - - do i = 1, ao_num - - num_B = ao_nucl(i) - power_B(1:3) = ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - - do l = 1, ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - - do m = 1, ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - - c = 0.d0 - do k1 = 1, nucl_num - gama1 = j1b_gauss_pen(k1) - C_center1(1:3) = nucl_coord(k1,1:3) - - do k2 = 1, nucl_num - gama2 = j1b_gauss_pen(k2) - C_center2(1:3) = nucl_coord(k2,1:3) - - ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] r_C1 \cdot r_C2 | XB > - c1 = int_gauss_4G( A_center, B_center, C_center1, C_center2 & - , power_A, power_B, alpha, beta, gama1, gama2 ) - - c = c - 2.d0 * gama1 * gama2 * c1 - enddo - enddo - - j1b_gauss_hermII(i,j) = j1b_gauss_hermII(i,j) & - + ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) * c - - enddo - enddo - enddo - enddo - - !$OMP END DO - !$OMP END PARALLEL - -END_PROVIDER - - - - - -!_____________________________________________________________________________________________________________ -! -! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] r_C1 \cdot r_C2 | XB > -! -double precision function int_gauss_4G( A_center, B_center, C_center1, C_center2, power_A, power_B & - , alpha, beta, gama1, gama2 ) - - ! for max_dim - include 'constants.include.F' - - implicit none - - integer , intent(in) :: power_A(3), power_B(3) - double precision, intent(in) :: A_center(3), B_center(3), C_center1(3), C_center2(3) - double precision, intent(in) :: alpha, beta, gama1, gama2 - - integer :: i, dim1, power_C - integer :: iorder(3) - double precision :: AB_expo, fact_AB, AB_center(3), P_AB(0:max_dim,3) - double precision :: gama, fact_C, C_center(3) - double precision :: cx0, cy0, cz0, c_tmp1, c_tmp2, cx, cy, cz - double precision :: int_tmp - - double precision :: overlap_gaussian_x - - dim1 = 100 - - ! P_AB(0:max_dim,3) polynomial - ! AB_center(3) new center - ! AB_expo new exponent - ! fact_AB constant factor - ! iorder(3) i_order(i) = order of the polynomials - call give_explicit_poly_and_gaussian( P_AB, AB_center, AB_expo, fact_AB & - , iorder, alpha, beta, power_A, power_B, A_center, B_center, dim1) - - call gaussian_product(gama1, C_center1, gama2, C_center2, fact_C, gama, C_center) - - ! <<< - ! to avoid multi-evaluation - power_C = 0 - - cx0 = 0.d0 - do i = 0, iorder(1) - cx0 = cx0 + P_AB(i,1) * overlap_gaussian_x( AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) - enddo - cy0 = 0.d0 - do i = 0, iorder(2) - cy0 = cy0 + P_AB(i,2) * overlap_gaussian_x( AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) - enddo - cz0 = 0.d0 - do i = 0, iorder(3) - cz0 = cz0 + P_AB(i,3) * overlap_gaussian_x( AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) - enddo - ! >>> - - int_tmp = 0.d0 - - ! ----------------------------------------------------------------------------------------------- - ! - ! x term: - ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] (x - x_C1) (x - x_C2) | XB > - ! - - c_tmp1 = 2.d0 * C_center(1) - C_center1(1) - C_center2(1) - c_tmp2 = ( C_center(1) - C_center1(1) ) * ( C_center(1) - C_center2(1) ) - - cx = 0.d0 - do i = 0, iorder(1) - - ! < XA | exp[-gama r_C^2] (x - x_C)^2 | XB > - power_C = 2 - cx = cx + P_AB(i,1) & - * overlap_gaussian_x( AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) - - ! < XA | exp[-gama r_C^2] (x - x_C) | XB > - power_C = 1 - cx = cx + P_AB(i,1) * c_tmp1 & - * overlap_gaussian_x( AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) - - ! < XA | exp[-gama r_C^2] | XB > - power_C = 0 - cx = cx + P_AB(i,1) * c_tmp2 & - * overlap_gaussian_x( AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) - - enddo - - int_tmp += cx * cy0 * cz0 - - ! ----------------------------------------------------------------------------------------------- - - - ! ----------------------------------------------------------------------------------------------- - ! - ! y term: - ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] (y - y_C1) (y - y_C2) | XB > - ! - - c_tmp1 = 2.d0 * C_center(2) - C_center1(2) - C_center2(2) - c_tmp2 = ( C_center(2) - C_center1(2) ) * ( C_center(2) - C_center2(2) ) - - cy = 0.d0 - do i = 0, iorder(2) - - ! < XA | exp[-gama r_C^2] (y - y_C)^2 | XB > - power_C = 2 - cy = cy + P_AB(i,2) & - * overlap_gaussian_x( AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) - - ! < XA | exp[-gama r_C^2] (y - y_C) | XB > - power_C = 1 - cy = cy + P_AB(i,2) * c_tmp1 & - * overlap_gaussian_x( AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) - - ! < XA | exp[-gama r_C^2] | XB > - power_C = 0 - cy = cy + P_AB(i,2) * c_tmp2 & - * overlap_gaussian_x( AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) - - enddo - - int_tmp += cx0 * cy * cz0 - - ! ----------------------------------------------------------------------------------------------- - - - ! ----------------------------------------------------------------------------------------------- - ! - ! z term: - ! < XA | exp[-gama1 r_C1^2 -gama2 r_C2^2] (z - z_C1) (z - z_C2) | XB > - ! - - c_tmp1 = 2.d0 * C_center(3) - C_center1(3) - C_center2(3) - c_tmp2 = ( C_center(3) - C_center1(3) ) * ( C_center(3) - C_center2(3) ) - - cz = 0.d0 - do i = 0, iorder(3) - - ! < XA | exp[-gama r_C^2] (z - z_C)^2 | XB > - power_C = 2 - cz = cz + P_AB(i,3) & - * overlap_gaussian_x( AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) - - ! < XA | exp[-gama r_C^2] (z - z_C) | XB > - power_C = 1 - cz = cz + P_AB(i,3) * c_tmp1 & - * overlap_gaussian_x( AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) - - ! < XA | exp[-gama r_C^2] | XB > - power_C = 0 - cz = cz + P_AB(i,3) * c_tmp2 & - * overlap_gaussian_x( AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) - - enddo - - int_tmp += cx0 * cy0 * cz - - ! ----------------------------------------------------------------------------------------------- - - int_gauss_4G = fact_AB * fact_C * int_tmp - - return -end function int_gauss_4G -!_____________________________________________________________________________________________________________ -!_____________________________________________________________________________________________________________ - -BEGIN_PROVIDER [ double precision, j1b_gauss_hermI, (ao_num,ao_num)] - - BEGIN_DOC - ! - ! Hermitian part of 1-body Jastrow factow in the |AO| basis set. - ! - ! :math:`\langle \chi_A | -0.5 \Delta \tau_{1b} | \chi_B \rangle` - ! - END_DOC - - implicit none - - integer :: num_A, num_B - integer :: power_A(3), power_B(3) - integer :: i, j, k, l, m - double precision :: alpha, beta, gama - double precision :: A_center(3), B_center(3), C_center(3) - double precision :: c1, c2, c - - integer :: dim1 - double precision :: overlap_y, d_a_2, overlap_z, overlap - - double precision :: int_gauss_r0, int_gauss_r2 - - PROVIDE j1b_gauss_pen - - ! -------------------------------------------------------------------------------- - ! -- Dummy call to provide everything - dim1 = 100 - A_center(:) = 0.d0 - B_center(:) = 1.d0 - alpha = 1.d0 - beta = 0.1d0 - power_A(:) = 1 - power_B(:) = 0 - call overlap_gaussian_xyz( A_center, B_center, alpha, beta, power_A, power_B & - , overlap_y, d_a_2, overlap_z, overlap, dim1 ) - ! -------------------------------------------------------------------------------- - - j1b_gauss_hermI(1:ao_num,1:ao_num) = 0.d0 - - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i, j, k, l, m, alpha, beta, gama, & - !$OMP A_center, B_center, C_center, power_A, power_B, & - !$OMP num_A, num_B, c1, c2, c) & - !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & - !$OMP ao_power, ao_nucl, nucl_coord, & - !$OMP ao_coef_normalized_ordered_transp, & - !$OMP nucl_num, j1b_gauss_pen, j1b_gauss_hermI) - - !$OMP DO SCHEDULE (dynamic) - - do j = 1, ao_num - - num_A = ao_nucl(j) - power_A(1:3) = ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - - do i = 1, ao_num - - num_B = ao_nucl(i) - power_B(1:3) = ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - - do l = 1, ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - - do m = 1, ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - - c = 0.d0 - do k = 1, nucl_num - - gama = j1b_gauss_pen(k) - C_center(1:3) = nucl_coord(k,1:3) - - ! < XA | exp[-gama r_C^2] | XB > - c1 = int_gauss_r0( A_center, B_center, C_center & - , power_A, power_B, alpha, beta, gama ) - - ! < XA | r_A^2 exp[-gama r_C^2] | XB > - c2 = int_gauss_r2( A_center, B_center, C_center & - , power_A, power_B, alpha, beta, gama ) - - c = c + 3.d0 * gama * c1 - 2.d0 * gama * gama * c2 - enddo - - j1b_gauss_hermI(i,j) = j1b_gauss_hermI(i,j) & - + ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) * c - - enddo - enddo - enddo - enddo - - !$OMP END DO - !$OMP END PARALLEL - -END_PROVIDER - - -!_____________________________________________________________________________________________________________ -! -! < XA | exp[-gama r_C^2] | XB > -! -double precision function int_gauss_r0(A_center, B_center, C_center, power_A, power_B, alpha, beta, gama) - - ! for max_dim - include 'constants.include.F' - - implicit none - - integer , intent(in) :: power_A(3), power_B(3) - double precision, intent(in) :: A_center(3), B_center(3), C_center(3) - double precision, intent(in) :: alpha, beta, gama - - integer :: i, power_C, dim1 - integer :: iorder(3) - integer :: nmax - double precision :: AB_expo, fact_AB, AB_center(3), P_AB(0:max_dim,3) - double precision :: cx, cy, cz - - double precision :: overlap_gaussian_x - - dim1 = 100 - - ! P_AB(0:max_dim,3) polynomial - ! AB_center(3) new center - ! AB_expo new exponent - ! fact_AB constant factor - ! iorder(3) i_order(i) = order of the polynomials - call give_explicit_poly_and_gaussian( P_AB, AB_center, AB_expo, fact_AB & - , iorder, alpha, beta, power_A, power_B, A_center, B_center, dim1) - - if( fact_AB .lt. 1d-20 ) then - int_gauss_r0 = 0.d0 - return - endif - - power_C = 0 - cx = 0.d0 - do i = 0, iorder(1) - cx = cx + P_AB(i,1) * overlap_gaussian_x(AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) - enddo - cy = 0.d0 - do i = 0, iorder(2) - cy = cy + P_AB(i,2) * overlap_gaussian_x(AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) - enddo - cz = 0.d0 - do i = 0, iorder(3) - cz = cz + P_AB(i,3) * overlap_gaussian_x(AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) - enddo - - int_gauss_r0 = fact_AB * cx * cy * cz - - return -end function int_gauss_r0 -!_____________________________________________________________________________________________________________ -!_____________________________________________________________________________________________________________ - - - -!_____________________________________________________________________________________________________________ -! -! < XA | r_C^2 exp[-gama r_C^2] | XB > -! -double precision function int_gauss_r2(A_center, B_center, C_center, power_A, power_B, alpha, beta, gama) - - ! for max_dim - include 'constants.include.F' - - implicit none - - integer, intent(in) :: power_A(3), power_B(3) - double precision, intent(in) :: A_center(3), B_center(3), C_center(3) - double precision, intent(in) :: alpha, beta, gama - - integer :: i, power_C, dim1 - integer :: iorder(3) - double precision :: AB_expo, fact_AB, AB_center(3), P_AB(0:max_dim,3) - double precision :: cx0, cy0, cz0, cx, cy, cz - double precision :: int_tmp - - double precision :: overlap_gaussian_x - - dim1 = 100 - - ! P_AB(0:max_dim,3) polynomial centered on AB_center - ! AB_center(3) new center - ! AB_expo new exponent - ! fact_AB constant factor - ! iorder(3) i_order(i) = order of the polynomials - call give_explicit_poly_and_gaussian( P_AB, AB_center, AB_expo, fact_AB & - , iorder, alpha, beta, power_A, power_B, A_center, B_center, dim1) - - ! <<< - ! to avoid multi-evaluation - power_C = 0 - - cx0 = 0.d0 - do i = 0, iorder(1) - cx0 = cx0 + P_AB(i,1) * overlap_gaussian_x(AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) - enddo - cy0 = 0.d0 - do i = 0, iorder(2) - cy0 = cy0 + P_AB(i,2) * overlap_gaussian_x(AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) - enddo - cz0 = 0.d0 - do i = 0, iorder(3) - cz0 = cz0 + P_AB(i,3) * overlap_gaussian_x(AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) - enddo - ! >>> - - int_tmp = 0.d0 - - power_C = 2 - - ! ( x - XC)^2 - cx = 0.d0 - do i = 0, iorder(1) - cx = cx + P_AB(i,1) * overlap_gaussian_x(AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) - enddo - int_tmp += cx * cy0 * cz0 - - ! ( y - YC)^2 - cy = 0.d0 - do i = 0, iorder(2) - cy = cy + P_AB(i,2) * overlap_gaussian_x(AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) - enddo - int_tmp += cx0 * cy * cz0 - - ! ( z - ZC)^2 - cz = 0.d0 - do i = 0, iorder(3) - cz = cz + P_AB(i,3) * overlap_gaussian_x(AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) - enddo - int_tmp += cx0 * cy0 * cz - - int_gauss_r2 = fact_AB * int_tmp - - return -end function int_gauss_r2 -!_____________________________________________________________________________________________________________ -!_____________________________________________________________________________________________________________ diff --git a/src/ao_tc_eff_map/one_e_1bgauss_lap.irp.f b/src/ao_tc_eff_map/one_e_1bgauss_lap.irp.f new file mode 100644 index 00000000..0a0b7610 --- /dev/null +++ b/src/ao_tc_eff_map/one_e_1bgauss_lap.irp.f @@ -0,0 +1,303 @@ +! --- + +BEGIN_PROVIDER [ double precision, j1b_gauss_hermI, (ao_num,ao_num)] + + BEGIN_DOC + ! + ! :math:`\langle \chi_A | -0.5 \Delta \tau_{1b} | \chi_B \rangle` + ! + END_DOC + + implicit none + + integer :: num_A, num_B + integer :: power_A(3), power_B(3) + integer :: i, j, k, l, m + double precision :: alpha, beta, gama, coef + double precision :: A_center(3), B_center(3), C_center(3) + double precision :: c1, c2, c + + integer :: dim1 + double precision :: overlap_y, d_a_2, overlap_z, overlap + + double precision :: int_gauss_r0, int_gauss_r2 + + PROVIDE j1b_type j1b_pen j1b_coeff + + ! -------------------------------------------------------------------------------- + ! -- Dummy call to provide everything + dim1 = 100 + A_center(:) = 0.d0 + B_center(:) = 1.d0 + alpha = 1.d0 + beta = 0.1d0 + power_A(:) = 1 + power_B(:) = 0 + call overlap_gaussian_xyz( A_center, B_center, alpha, beta, power_A, power_B & + , overlap_y, d_a_2, overlap_z, overlap, dim1 ) + ! -------------------------------------------------------------------------------- + + j1b_gauss_hermI(1:ao_num,1:ao_num) = 0.d0 + + if(j1b_type .eq. 1) then + ! \tau_1b = \sum_iA -[1 - exp(-alpha_A r_iA^2)] + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k, l, m, alpha, beta, gama, & + !$OMP A_center, B_center, C_center, power_A, power_B, & + !$OMP num_A, num_B, c1, c2, c) & + !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & + !$OMP ao_power, ao_nucl, nucl_coord, & + !$OMP ao_coef_normalized_ordered_transp, & + !$OMP nucl_num, j1b_pen, j1b_gauss_hermI) + !$OMP DO SCHEDULE (dynamic) + do j = 1, ao_num + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + do i = 1, ao_num + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + + c = 0.d0 + do k = 1, nucl_num + gama = j1b_pen(k) + C_center(1:3) = nucl_coord(k,1:3) + + ! < XA | exp[-gama r_C^2] | XB > + c1 = int_gauss_r0( A_center, B_center, C_center & + , power_A, power_B, alpha, beta, gama ) + + ! < XA | r_A^2 exp[-gama r_C^2] | XB > + c2 = int_gauss_r2( A_center, B_center, C_center & + , power_A, power_B, alpha, beta, gama ) + + c = c + 3.d0 * gama * c1 - 2.d0 * gama * gama * c2 + enddo + + j1b_gauss_hermI(i,j) = j1b_gauss_hermI(i,j) & + + ao_coef_normalized_ordered_transp(l,j) & + * ao_coef_normalized_ordered_transp(m,i) * c + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + elseif(j1b_type .eq. 2) then + ! \tau_1b = \sum_iA [c_A exp(-alpha_A r_iA^2)] + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k, l, m, alpha, beta, gama, coef, & + !$OMP A_center, B_center, C_center, power_A, power_B, & + !$OMP num_A, num_B, c1, c2, c) & + !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & + !$OMP ao_power, ao_nucl, nucl_coord, & + !$OMP ao_coef_normalized_ordered_transp, & + !$OMP nucl_num, j1b_pen, j1b_gauss_hermI, & + !$OMP j1b_coeff) + !$OMP DO SCHEDULE (dynamic) + do j = 1, ao_num + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + do i = 1, ao_num + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + + c = 0.d0 + do k = 1, nucl_num + gama = j1b_pen (k) + coef = j1b_coeff(k) + C_center(1:3) = nucl_coord(k,1:3) + + ! < XA | exp[-gama r_C^2] | XB > + c1 = int_gauss_r0( A_center, B_center, C_center & + , power_A, power_B, alpha, beta, gama ) + + ! < XA | r_A^2 exp[-gama r_C^2] | XB > + c2 = int_gauss_r2( A_center, B_center, C_center & + , power_A, power_B, alpha, beta, gama ) + + c = c + 3.d0 * gama * coef * c1 - 2.d0 * gama * gama * coef * c2 + enddo + + j1b_gauss_hermI(i,j) = j1b_gauss_hermI(i,j) & + + ao_coef_normalized_ordered_transp(l,j) & + * ao_coef_normalized_ordered_transp(m,i) * c + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + endif + +END_PROVIDER + + +!_____________________________________________________________________________________________________________ +! +! < XA | exp[-gama r_C^2] | XB > +! +double precision function int_gauss_r0(A_center, B_center, C_center, power_A, power_B, alpha, beta, gama) + + ! for max_dim + include 'constants.include.F' + + implicit none + + integer , intent(in) :: power_A(3), power_B(3) + double precision, intent(in) :: A_center(3), B_center(3), C_center(3) + double precision, intent(in) :: alpha, beta, gama + + integer :: i, power_C, dim1 + integer :: iorder(3) + integer :: nmax + double precision :: AB_expo, fact_AB, AB_center(3), P_AB(0:max_dim,3) + double precision :: cx, cy, cz + + double precision :: overlap_gaussian_x + + dim1 = 100 + + ! P_AB(0:max_dim,3) polynomial + ! AB_center(3) new center + ! AB_expo new exponent + ! fact_AB constant factor + ! iorder(3) i_order(i) = order of the polynomials + call give_explicit_poly_and_gaussian( P_AB, AB_center, AB_expo, fact_AB & + , iorder, alpha, beta, power_A, power_B, A_center, B_center, dim1) + + if( fact_AB .lt. 1d-20 ) then + int_gauss_r0 = 0.d0 + return + endif + + power_C = 0 + cx = 0.d0 + do i = 0, iorder(1) + cx = cx + P_AB(i,1) * overlap_gaussian_x(AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) + enddo + cy = 0.d0 + do i = 0, iorder(2) + cy = cy + P_AB(i,2) * overlap_gaussian_x(AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) + enddo + cz = 0.d0 + do i = 0, iorder(3) + cz = cz + P_AB(i,3) * overlap_gaussian_x(AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) + enddo + + int_gauss_r0 = fact_AB * cx * cy * cz + + return +end function int_gauss_r0 +!_____________________________________________________________________________________________________________ +!_____________________________________________________________________________________________________________ + + + +!_____________________________________________________________________________________________________________ +! +! < XA | r_C^2 exp[-gama r_C^2] | XB > +! +double precision function int_gauss_r2(A_center, B_center, C_center, power_A, power_B, alpha, beta, gama) + + ! for max_dim + include 'constants.include.F' + + implicit none + + integer, intent(in) :: power_A(3), power_B(3) + double precision, intent(in) :: A_center(3), B_center(3), C_center(3) + double precision, intent(in) :: alpha, beta, gama + + integer :: i, power_C, dim1 + integer :: iorder(3) + double precision :: AB_expo, fact_AB, AB_center(3), P_AB(0:max_dim,3) + double precision :: cx0, cy0, cz0, cx, cy, cz + double precision :: int_tmp + + double precision :: overlap_gaussian_x + + dim1 = 100 + + ! P_AB(0:max_dim,3) polynomial centered on AB_center + ! AB_center(3) new center + ! AB_expo new exponent + ! fact_AB constant factor + ! iorder(3) i_order(i) = order of the polynomials + call give_explicit_poly_and_gaussian( P_AB, AB_center, AB_expo, fact_AB & + , iorder, alpha, beta, power_A, power_B, A_center, B_center, dim1) + + ! <<< + ! to avoid multi-evaluation + power_C = 0 + + cx0 = 0.d0 + do i = 0, iorder(1) + cx0 = cx0 + P_AB(i,1) * overlap_gaussian_x(AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) + enddo + cy0 = 0.d0 + do i = 0, iorder(2) + cy0 = cy0 + P_AB(i,2) * overlap_gaussian_x(AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) + enddo + cz0 = 0.d0 + do i = 0, iorder(3) + cz0 = cz0 + P_AB(i,3) * overlap_gaussian_x(AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) + enddo + ! >>> + + int_tmp = 0.d0 + + power_C = 2 + + ! ( x - XC)^2 + cx = 0.d0 + do i = 0, iorder(1) + cx = cx + P_AB(i,1) * overlap_gaussian_x(AB_center(1), C_center(1), AB_expo, gama, i, power_C, dim1) + enddo + int_tmp += cx * cy0 * cz0 + + ! ( y - YC)^2 + cy = 0.d0 + do i = 0, iorder(2) + cy = cy + P_AB(i,2) * overlap_gaussian_x(AB_center(2), C_center(2), AB_expo, gama, i, power_C, dim1) + enddo + int_tmp += cx0 * cy * cz0 + + ! ( z - ZC)^2 + cz = 0.d0 + do i = 0, iorder(3) + cz = cz + P_AB(i,3) * overlap_gaussian_x(AB_center(3), C_center(3), AB_expo, gama, i, power_C, dim1) + enddo + int_tmp += cx0 * cy0 * cz + + int_gauss_r2 = fact_AB * int_tmp + + return +end function int_gauss_r2 +!_____________________________________________________________________________________________________________ +!_____________________________________________________________________________________________________________ + + diff --git a/src/ao_tc_eff_map/one_e_1bgauss_nonherm.irp.f b/src/ao_tc_eff_map/one_e_1bgauss_nonherm.irp.f index 3ae3e019..bd881d32 100644 --- a/src/ao_tc_eff_map/one_e_1bgauss_nonherm.irp.f +++ b/src/ao_tc_eff_map/one_e_1bgauss_nonherm.irp.f @@ -1,11 +1,10 @@ +! --- + BEGIN_PROVIDER [ double precision, j1b_gauss_nonherm, (ao_num,ao_num)] BEGIN_DOC ! - ! Hermitian part of 1-body Jastrow factow in the |AO| basis set. - ! - ! \langle \chi_i | - grad \tau_{1b} \cdot grad | \chi_j \rangle = - ! 2 \sum_A aA \langle \chi_i | exp[-aA riA^2] (ri-rA) \cdot grad | \chi_j \rangle + ! j1b_gauss_nonherm(i,j) = \langle \chi_j | - grad \tau_{1b} \cdot grad | \chi_i \rangle ! END_DOC @@ -14,7 +13,7 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_nonherm, (ao_num,ao_num)] integer :: num_A, num_B integer :: power_A(3), power_B(3) integer :: i, j, k, l, m - double precision :: alpha, beta, gama + double precision :: alpha, beta, gama, coef double precision :: A_center(3), B_center(3), C_center(3) double precision :: c1, c @@ -23,7 +22,7 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_nonherm, (ao_num,ao_num)] double precision :: int_gauss_deriv - PROVIDE j1b_gauss_pen + PROVIDE j1b_type j1b_pen j1b_coeff ! -------------------------------------------------------------------------------- ! -- Dummy call to provide everything @@ -41,6 +40,9 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_nonherm, (ao_num,ao_num)] j1b_gauss_nonherm(1:ao_num,1:ao_num) = 0.d0 + if(j1b_type .eq. 1) then + ! \tau_1b = \sum_iA -[1 - exp(-alpha_A r_iA^2)] + !$OMP PARALLEL & !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i, j, k, l, m, alpha, beta, gama, & @@ -49,53 +51,101 @@ BEGIN_PROVIDER [ double precision, j1b_gauss_nonherm, (ao_num,ao_num)] !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & !$OMP ao_power, ao_nucl, nucl_coord, & !$OMP ao_coef_normalized_ordered_transp, & - !$OMP nucl_num, j1b_gauss_pen, j1b_gauss_nonherm) - + !$OMP nucl_num, j1b_pen, j1b_gauss_nonherm) !$OMP DO SCHEDULE (dynamic) - - do j = 1, ao_num - - num_A = ao_nucl(j) - power_A(1:3) = ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - - do i = 1, ao_num - - num_B = ao_nucl(i) - power_B(1:3) = ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - - do l = 1, ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - - do m = 1, ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - - c = 0.d0 - do k = 1, nucl_num - - gama = j1b_gauss_pen(k) - C_center(1:3) = nucl_coord(k,1:3) - - ! \langle \chi_A | exp[-gama r_C^2] r_C \cdot grad | \chi_B \rangle - c1 = int_gauss_deriv( A_center, B_center, C_center & - , power_A, power_B, alpha, beta, gama ) - - c = c + 2.d0 * gama * c1 + do j = 1, ao_num + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + do i = 1, ao_num + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + + c = 0.d0 + do k = 1, nucl_num + gama = j1b_pen(k) + C_center(1:3) = nucl_coord(k,1:3) + + ! \langle \chi_A | exp[-gama r_C^2] r_C \cdot grad | \chi_B \rangle + c1 = int_gauss_deriv( A_center, B_center, C_center & + , power_A, power_B, alpha, beta, gama ) + + c = c + 2.d0 * gama * c1 + enddo + + j1b_gauss_nonherm(i,j) = j1b_gauss_nonherm(i,j) & + + ao_coef_normalized_ordered_transp(l,j) & + * ao_coef_normalized_ordered_transp(m,i) * c enddo - - j1b_gauss_nonherm(i,j) = j1b_gauss_nonherm(i,j) & - + ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) * c - enddo enddo enddo - enddo - !$OMP END DO !$OMP END PARALLEL + elseif(j1b_type .eq. 2) then + ! \tau_1b = \sum_iA [c_A exp(-alpha_A r_iA^2)] + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, k, l, m, alpha, beta, gama, coef, & + !$OMP A_center, B_center, C_center, power_A, power_B, & + !$OMP num_A, num_B, c1, c) & + !$OMP SHARED (ao_num, ao_prim_num, ao_expo_ordered_transp, & + !$OMP ao_power, ao_nucl, nucl_coord, & + !$OMP ao_coef_normalized_ordered_transp, & + !$OMP nucl_num, j1b_pen, j1b_gauss_nonherm, & + !$OMP j1b_coeff) + !$OMP DO SCHEDULE (dynamic) + do j = 1, ao_num + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + do i = 1, ao_num + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + + c = 0.d0 + do k = 1, nucl_num + gama = j1b_pen (k) + coef = j1b_coeff(k) + C_center(1:3) = nucl_coord(k,1:3) + + ! \langle \chi_A | exp[-gama r_C^2] r_C \cdot grad | \chi_B \rangle + c1 = int_gauss_deriv( A_center, B_center, C_center & + , power_A, power_B, alpha, beta, gama ) + + c = c + 2.d0 * gama * coef * c1 + enddo + + j1b_gauss_nonherm(i,j) = j1b_gauss_nonherm(i,j) & + + ao_coef_normalized_ordered_transp(l,j) & + * ao_coef_normalized_ordered_transp(m,i) * c + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + endif + END_PROVIDER @@ -317,3 +367,5 @@ double precision function int_gauss_deriv(A_center, B_center, C_center, power_A, end function int_gauss_deriv !_____________________________________________________________________________________________________________ !_____________________________________________________________________________________________________________ + + diff --git a/src/ao_tc_eff_map/two_e_1bgauss_coul.irp.f b/src/ao_tc_eff_map/two_e_1bgauss_coul.irp.f deleted file mode 100644 index 8d819711..00000000 --- a/src/ao_tc_eff_map/two_e_1bgauss_coul.irp.f +++ /dev/null @@ -1,800 +0,0 @@ -double precision function j1b_gauss_coul(i, j, k, l) - - BEGIN_DOC - ! - ! integral in the AO basis: - ! i(r1) j(r1) f(r12) k(r2) l(r2) - ! - ! with: - ! f(r12) = - [ 0.5 / r12 ] (r1-r2) \cdot \sum_A (-2 a_A) [ r1A exp(-aA r1A^2) - r2A exp(-aA r2A^2) ] - ! = [ 1 / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! + (r2-RA)^2 exp(-aA r2A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] - ! - END_DOC - - include 'utils/constants.include.F' - - implicit none - - integer, intent(in) :: i, j, k, l - - integer :: p, q, r, s, ii - integer :: num_i, num_j, num_k, num_l, num_ii - integer :: I_power(3), J_power(3), K_power(3), L_power(3) - integer :: iorder_p(3), iorder_q(3) - integer :: shift_P(3), shift_Q(3) - integer :: dim1 - - double precision :: coef1, coef2, coef3, coef4 - double precision :: expo1, expo2, expo3, expo4 - double precision :: p_inv, q_inv - double precision :: P_new_tmp(0:max_dim,3), P_center_tmp(3), fact_p_tmp, pp_tmp - double precision :: Q_new_tmp(0:max_dim,3), Q_center_tmp(3), fact_q_tmp, qq_tmp - double precision :: P_new(0:max_dim,3), P_center(3), fact_p, pp - double precision :: Q_new(0:max_dim,3), Q_center(3), fact_q, qq - double precision :: I_center(3), J_center(3), K_center(3), L_center(3) - double precision :: expoii, factii, Centerii(3) - double precision :: ff, gg, cx, cy, cz - - double precision :: general_primitive_integral_coul_shifted - - PROVIDE j1b_gauss_pen - - dim1 = n_pt_max_integrals - - num_i = ao_nucl(i) - num_j = ao_nucl(j) - num_k = ao_nucl(k) - num_l = ao_nucl(l) - - do p = 1, 3 - I_power(p) = ao_power(i,p) - J_power(p) = ao_power(j,p) - K_power(p) = ao_power(k,p) - L_power(p) = ao_power(l,p) - I_center(p) = nucl_coord(num_i,p) - J_center(p) = nucl_coord(num_j,p) - K_center(p) = nucl_coord(num_k,p) - L_center(p) = nucl_coord(num_l,p) - enddo - - j1b_gauss_coul = 0.d0 - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! [ 1 / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! - ! ------------------------------------------------------------------------------------------------------------------- - - shift_Q = (/ 0, 0, 0 /) - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new_tmp, P_center_tmp, pp_tmp, fact_p_tmp, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new, Q_center, qq, fact_q, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - q_inv = 1.d0 / qq - - cx = 0.d0 - cy = 0.d0 - cz = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(pp_tmp, P_center_tmp, expoii, Centerii, factii, pp, P_center) - - fact_p = fact_p_tmp * factii - p_inv = 1.d0 / pp - - ! pol centerd on P_center_tmp ==> centerd on P_center - call pol_modif_center( P_center_tmp, P_center, iorder_p, P_new_tmp, P_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - ff = P_center(1) - Centerii(1) - - shift_P = (/ 2, 0, 0 /) - cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 1, 0, 0 /) - cx = cx + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - cx = cx + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! y term: - - ff = P_center(2) - Centerii(2) - - shift_P = (/ 0, 2, 0 /) - cy = cy + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 1, 0 /) - cy = cy + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - cy = cy + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! z term: - - ff = P_center(3) - Centerii(3) - - shift_P = (/ 0, 0, 2 /) - cz = cz + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 1 /) - cz = cz + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - cz = cz + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_coul = j1b_gauss_coul + coef4 * ( cx + cy + cz ) - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! [ 1 / r12 ] \sum_A a_A [ (r2-RA)^2 exp(-aA r2A^2) - ! - ! ------------------------------------------------------------------------------------------------------------------- - - shift_P = (/ 0, 0, 0 /) - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new, P_center, pp, fact_p, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - p_inv = 1.d0 / pp - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new_tmp, Q_center_tmp, qq_tmp, fact_q_tmp, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - - cx = 0.d0 - cy = 0.d0 - cz = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(qq_tmp, Q_center_tmp, expoii, Centerii, factii, qq, Q_center) - - fact_q = fact_q_tmp * factii - q_inv = 1.d0 / qq - - ! pol centerd on Q_center_tmp ==> centerd on Q_center - call pol_modif_center( Q_center_tmp, Q_center, iorder_q, Q_new_tmp, Q_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - ff = Q_center(1) - Centerii(1) - - shift_Q = (/ 2, 0, 0 /) - cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q = (/ 1, 0, 0 /) - cx = cx + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 0 /) - cx = cx + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! y term: - - ff = Q_center(2) - Centerii(2) - - shift_Q = (/ 0, 2, 0 /) - cy = cy + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 1, 0 /) - cy = cy + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 0 /) - cy = cy + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! z term: - - ff = Q_center(3) - Centerii(3) - - shift_Q = (/ 0, 0, 2 /) - cz = cz + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 1 /) - cz = cz + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 0 /) - cz = cz + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_coul = j1b_gauss_coul + coef4 * ( cx + cy + cz ) - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! - [ 1 / r12 ] \sum_A a_A [ (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) ] - ! - ! ------------------------------------------------------------------------------------------------------------------- - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new_tmp, P_center_tmp, pp_tmp, fact_p_tmp, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new, Q_center, qq, fact_q, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - q_inv = 1.d0 / qq - - cx = 0.d0 - cy = 0.d0 - cz = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(pp_tmp, P_center_tmp, expoii, Centerii, factii, pp, P_center) - - fact_p = fact_p_tmp * factii - p_inv = 1.d0 / pp - - ! pol centerd on P_center_tmp ==> centerd on P_center - call pol_modif_center( P_center_tmp, P_center, iorder_p, P_new_tmp, P_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - ff = P_center(1) - Centerii(1) - gg = Q_center(1) - Centerii(1) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx + expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx + expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx + expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! y term: - - ff = P_center(2) - Centerii(2) - gg = Q_center(2) - Centerii(2) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy + expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy + expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy + expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! z term: - - ff = P_center(3) - Centerii(3) - gg = Q_center(3) - Centerii(3) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz + expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz + expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz + expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_coul = j1b_gauss_coul - coef4 * ( cx + cy + cz ) - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! - [ 1 / r12 ] \sum_A a_A [ (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] - ! - ! ------------------------------------------------------------------------------------------------------------------- - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new, P_center, pp, fact_p, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - p_inv = 1.d0 / pp - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new_tmp, Q_center_tmp, qq_tmp, fact_q_tmp, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - - cx = 0.d0 - cy = 0.d0 - cz = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(qq_tmp, Q_center_tmp, expoii, Centerii, factii, qq, Q_center) - - fact_q = fact_q_tmp * factii - q_inv = 1.d0 / qq - - ! pol centerd on Q_center_tmp ==> centerd on Q_center - call pol_modif_center( Q_center_tmp, Q_center, iorder_q, Q_new_tmp, Q_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - ff = P_center(1) - Centerii(1) - gg = Q_center(1) - Centerii(1) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx + expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx + expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx + expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! y term: - - ff = P_center(2) - Centerii(2) - gg = Q_center(2) - Centerii(2) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy + expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy + expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy + expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! z term: - - ff = P_center(3) - Centerii(3) - gg = Q_center(3) - Centerii(3) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz + expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz + expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz + expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_coul = j1b_gauss_coul - coef4 * ( cx + cy + cz ) - - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - return -end function j1b_gauss_coul - - - - -!______________________________________________________________________________________________________________________ -!______________________________________________________________________________________________________________________ - -double precision function general_primitive_integral_coul_shifted( dim & - , P_new, P_center, fact_p, p, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, q, q_inv, iorder_q, shift_Q ) - - include 'utils/constants.include.F' - - implicit none - - integer, intent(in) :: dim - integer, intent(in) :: iorder_p(3), shift_P(3) - integer, intent(in) :: iorder_q(3), shift_Q(3) - double precision, intent(in) :: P_new(0:max_dim,3), P_center(3), fact_p, p, p_inv - double precision, intent(in) :: Q_new(0:max_dim,3), Q_center(3), fact_q, q, q_inv - - integer :: n_Ix, n_Iy, n_Iz, nx, ny, nz - integer :: ix, iy, iz, jx, jy, jz, i - integer :: n_pt_tmp, n_pt_out, iorder - integer :: ii, jj - double precision :: rho, dist - double precision :: dx(0:max_dim), Ix_pol(0:max_dim) - double precision :: dy(0:max_dim), Iy_pol(0:max_dim) - double precision :: dz(0:max_dim), Iz_pol(0:max_dim) - double precision :: a, b, c, d, e, f, accu, pq, const - double precision :: pq_inv, p10_1, p10_2, p01_1, p01_2, pq_inv_2 - double precision :: d1(0:max_dim), d_poly(0:max_dim) - double precision :: p_plus_q - - double precision :: rint_sum - - general_primitive_integral_coul_shifted = 0.d0 - - !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: dx, Ix_pol, dy, Iy_pol, dz, Iz_pol - !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: d1, d_poly - - ! Gaussian Product - ! ---------------- - p_plus_q = (p+q) - pq = p_inv * 0.5d0 * q_inv - pq_inv = 0.5d0 / p_plus_q - p10_1 = q * pq ! 1/(2p) - p01_1 = p * pq ! 1/(2q) - pq_inv_2 = pq_inv + pq_inv - p10_2 = pq_inv_2 * p10_1 * q ! 0.5d0 * q / (pq + p*p) - p01_2 = pq_inv_2 * p01_1 * p ! 0.5d0 * p / (q*q + pq) - - accu = 0.d0 - - iorder = iorder_p(1) + iorder_q(1) + iorder_p(1) + iorder_q(1) - iorder = iorder + shift_P(1) + shift_Q(1) - iorder = iorder + shift_P(1) + shift_Q(1) - !DIR$ VECTOR ALIGNED - do ix = 0, iorder - Ix_pol(ix) = 0.d0 - enddo - n_Ix = 0 - do ix = 0, iorder_p(1) - - ii = ix + shift_P(1) - a = P_new(ix,1) - if(abs(a) < thresh) cycle - - do jx = 0, iorder_q(1) - - jj = jx + shift_Q(1) - d = a * Q_new(jx,1) - if(abs(d) < thresh) cycle - - !DEC$ FORCEINLINE - call give_polynom_mult_center_x( P_center(1), Q_center(1), ii, jj & - , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dx, nx ) - !DEC$ FORCEINLINE - call add_poly_multiply(dx, nx, d, Ix_pol, n_Ix) - enddo - enddo - if(n_Ix == -1) then - return - endif - - iorder = iorder_p(2) + iorder_q(2) + iorder_p(2) + iorder_q(2) - iorder = iorder + shift_P(2) + shift_Q(2) - iorder = iorder + shift_P(2) + shift_Q(2) - !DIR$ VECTOR ALIGNED - do ix = 0, iorder - Iy_pol(ix) = 0.d0 - enddo - n_Iy = 0 - do iy = 0, iorder_p(2) - - if(abs(P_new(iy,2)) > thresh) then - - ii = iy + shift_P(2) - b = P_new(iy,2) - - do jy = 0, iorder_q(2) - - jj = jy + shift_Q(2) - e = b * Q_new(jy,2) - if(abs(e) < thresh) cycle - - !DEC$ FORCEINLINE - call give_polynom_mult_center_x( P_center(2), Q_center(2), ii, jj & - , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dy, ny ) - !DEC$ FORCEINLINE - call add_poly_multiply(dy, ny, e, Iy_pol, n_Iy) - enddo - endif - enddo - if(n_Iy == -1) then - return - endif - - iorder = iorder_p(3) + iorder_q(3) + iorder_p(3) + iorder_q(3) - iorder = iorder + shift_P(3) + shift_Q(3) - iorder = iorder + shift_P(3) + shift_Q(3) - do ix = 0, iorder - Iz_pol(ix) = 0.d0 - enddo - n_Iz = 0 - do iz = 0, iorder_p(3) - - if( abs(P_new(iz,3)) > thresh ) then - - ii = iz + shift_P(3) - c = P_new(iz,3) - - do jz = 0, iorder_q(3) - - jj = jz + shift_Q(3) - f = c * Q_new(jz,3) - if(abs(f) < thresh) cycle - - !DEC$ FORCEINLINE - call give_polynom_mult_center_x( P_center(3), Q_center(3), ii, jj & - , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dz, nz ) - !DEC$ FORCEINLINE - call add_poly_multiply(dz, nz, f, Iz_pol, n_Iz) - enddo - endif - enddo - if(n_Iz == -1) then - return - endif - - rho = p * q * pq_inv_2 - dist = (P_center(1) - Q_center(1)) * (P_center(1) - Q_center(1)) & - + (P_center(2) - Q_center(2)) * (P_center(2) - Q_center(2)) & - + (P_center(3) - Q_center(3)) * (P_center(3) - Q_center(3)) - const = dist*rho - - n_pt_tmp = n_Ix + n_Iy - do i = 0, n_pt_tmp - d_poly(i) = 0.d0 - enddo - - !DEC$ FORCEINLINE - call multiply_poly(Ix_pol, n_Ix, Iy_pol, n_Iy, d_poly, n_pt_tmp) - if(n_pt_tmp == -1) then - return - endif - n_pt_out = n_pt_tmp + n_Iz - do i = 0, n_pt_out - d1(i) = 0.d0 - enddo - - !DEC$ FORCEINLINE - call multiply_poly(d_poly, n_pt_tmp, Iz_pol, n_Iz, d1, n_pt_out) - accu = accu + rint_sum(n_pt_out, const, d1) - - general_primitive_integral_coul_shifted = fact_p * fact_q * accu * pi_5_2 * p_inv * q_inv / dsqrt(p_plus_q) - - return -end function general_primitive_integral_coul_shifted -!______________________________________________________________________________________________________________________ -!______________________________________________________________________________________________________________________ diff --git a/src/ao_tc_eff_map/two_e_1bgauss_coul_acc.irp.f b/src/ao_tc_eff_map/two_e_1bgauss_coul_acc.irp.f deleted file mode 100644 index cee9183c..00000000 --- a/src/ao_tc_eff_map/two_e_1bgauss_coul_acc.irp.f +++ /dev/null @@ -1,433 +0,0 @@ -double precision function j1b_gauss_coul_acc(i, j, k, l) - - BEGIN_DOC - ! - ! integral in the AO basis: - ! i(r1) j(r1) f(r12) k(r2) l(r2) - ! - ! with: - ! f(r12) = - [ 0.5 / r12 ] (r1-r2) \cdot \sum_A (-2 a_A) [ r1A exp(-aA r1A^2) - r2A exp(-aA r2A^2) ] - ! = [ 1 / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! + (r2-RA)^2 exp(-aA r2A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] - ! - END_DOC - - include 'utils/constants.include.F' - - implicit none - - integer, intent(in) :: i, j, k, l - - integer :: p, q, r, s, ii - integer :: num_i, num_j, num_k, num_l, num_ii - integer :: I_power(3), J_power(3), K_power(3), L_power(3) - integer :: iorder_p(3), iorder_q(3) - integer :: shift_P(3), shift_Q(3) - integer :: dim1 - - double precision :: coef1, coef2, coef3, coef4 - double precision :: expo1, expo2, expo3, expo4 - double precision :: p1_inv, q1_inv, p2_inv, q2_inv - double precision :: P1_new(0:max_dim,3), P1_center(3), fact_p1, pp1 - double precision :: P2_new(0:max_dim,3), P2_center(3), fact_p2, pp2 - double precision :: Q1_new(0:max_dim,3), Q1_center(3), fact_q1, qq1 - double precision :: Q2_new(0:max_dim,3), Q2_center(3), fact_q2, qq2 - double precision :: I_center(3), J_center(3), K_center(3), L_center(3) - double precision :: expoii, factii, Centerii(3) - double precision :: ff, gg, cx, cy, cz - - double precision :: general_primitive_integral_coul_shifted - !double precision :: j1b_gauss_coul_schwartz_accel - - PROVIDE j1b_gauss_pen - - dim1 = n_pt_max_integrals - - ! TODO - !if( ao_prim_num(i) * ao_prim_num(j) * ao_prim_num(k) * ao_prim_num(l) > 1024 ) then - ! j1b_gauss_coul_schwartz_accel = j1b_gauss_coul_schwartz_accel(i, j, k, l) - ! return - !endif - - num_i = ao_nucl(i) - num_j = ao_nucl(j) - num_k = ao_nucl(k) - num_l = ao_nucl(l) - - do p = 1, 3 - I_power(p) = ao_power(i,p) - J_power(p) = ao_power(j,p) - K_power(p) = ao_power(k,p) - L_power(p) = ao_power(l,p) - I_center(p) = nucl_coord(num_i,p) - J_center(p) = nucl_coord(num_j,p) - K_center(p) = nucl_coord(num_k,p) - L_center(p) = nucl_coord(num_l,p) - enddo - - j1b_gauss_coul_acc = 0.d0 - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P1_new, P1_center, pp1, fact_p1, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - p1_inv = 1.d0 / pp1 - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q1_new, Q1_center, qq1, fact_q1, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - q1_inv = 1.d0 / qq1 - - cx = 0.d0 - cy = 0.d0 - cz = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(pp1, P1_center, expoii, Centerii, factii, pp2, P2_center) - fact_p2 = fact_p1 * factii - p2_inv = 1.d0 / pp2 - call pol_modif_center( P1_center, P2_center, iorder_p, P1_new, P2_new) - - call gaussian_product(qq1, Q1_center, expoii, Centerii, factii, qq2, Q2_center) - fact_q2 = fact_q1 * factii - q2_inv = 1.d0 / qq2 - call pol_modif_center( Q1_center, Q2_center, iorder_q, Q1_new, Q2_new) - - - ! ---------------------------------------------------------------------------------------------------- - ! [ 1 / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! ---------------------------------------------------------------------------------------------------- - - shift_Q = (/ 0, 0, 0 /) - - ! x term: - ff = P2_center(1) - Centerii(1) - - shift_P = (/ 2, 0, 0 /) - cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 1, 0, 0 /) - cx = cx + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - cx = cx + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! y term: - ff = P2_center(2) - Centerii(2) - - shift_P = (/ 0, 2, 0 /) - cy = cy + expoii * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 1, 0 /) - cy = cy + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - cy = cy + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! z term: - ff = P2_center(3) - Centerii(3) - - shift_P = (/ 0, 0, 2 /) - cz = cz + expoii * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 1 /) - cz = cz + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - cz = cz + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - - - ! ---------------------------------------------------------------------------------------------------- - ! [ 1 / r12 ] \sum_A a_A [ (r2-RA)^2 exp(-aA r2A^2) - ! ---------------------------------------------------------------------------------------------------- - - shift_P = (/ 0, 0, 0 /) - - ! x term: - ff = Q2_center(1) - Centerii(1) - - shift_Q = (/ 2, 0, 0 /) - cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 1, 0, 0 /) - cx = cx + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 0 /) - cx = cx + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! y term: - ff = Q2_center(2) - Centerii(2) - - shift_Q = (/ 0, 2, 0 /) - cy = cy + expoii * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 1, 0 /) - cy = cy + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 0 /) - cy = cy + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! z term: - ff = Q2_center(3) - Centerii(3) - - shift_Q = (/ 0, 0, 2 /) - cz = cz + expoii * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 1 /) - cz = cz + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 0 /) - cz = cz + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - - - ! ---------------------------------------------------------------------------------------------------- - ! - [ 1 / r12 ] \sum_A a_A [ (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) ] - ! ---------------------------------------------------------------------------------------------------- - - ! x term: - ff = P2_center(1) - Centerii(1) - gg = Q1_center(1) - Centerii(1) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx - expoii * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx - expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx - expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx - expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! y term: - ff = P2_center(2) - Centerii(2) - gg = Q1_center(2) - Centerii(2) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy - expoii * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy - expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy - expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy - expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! z term: - ff = P2_center(3) - Centerii(3) - gg = Q1_center(3) - Centerii(3) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz - expoii * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz - expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz - expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz - expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - - - ! ---------------------------------------------------------------------------------------------------- - ! - [ 1 / r12 ] \sum_A a_A [ (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] - ! ---------------------------------------------------------------------------------------------------- - - ! x term: - ff = P1_center(1) - Centerii(1) - gg = Q2_center(1) - Centerii(1) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx - expoii * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx - expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx - expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx - expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! y term: - ff = P1_center(2) - Centerii(2) - gg = Q2_center(2) - Centerii(2) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy - expoii * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy - expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy - expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy - expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! z term: - ff = P1_center(3) - Centerii(3) - gg = Q2_center(3) - Centerii(3) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz - expoii * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz - expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz - expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz - expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_coul_acc = j1b_gauss_coul_acc + coef4 * ( cx + cy + cz ) - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - return -end function j1b_gauss_coul_acc diff --git a/src/ao_tc_eff_map/two_e_1bgauss_coul_debug.irp.f b/src/ao_tc_eff_map/two_e_1bgauss_coul_debug.irp.f deleted file mode 100644 index 8ced59e4..00000000 --- a/src/ao_tc_eff_map/two_e_1bgauss_coul_debug.irp.f +++ /dev/null @@ -1,397 +0,0 @@ -double precision function j1b_gauss_coul_debug(i, j, k, l) - - BEGIN_DOC - ! - ! integral in the AO basis: - ! i(r1) j(r1) f(r12) k(r2) l(r2) - ! - ! with: - ! f(r12) = - [ 0.5 / r12 ] (r1-r2) \cdot \sum_A (-2 a_A) [ r1A exp(-aA r1A^2) - r2A exp(-aA r2A^2) ] - ! = [ 1 / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! + (r2-RA)^2 exp(-aA r2A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] - ! - END_DOC - - include 'utils/constants.include.F' - - implicit none - - integer, intent(in) :: i, j, k, l - - integer :: p, q, r, s, ii - integer :: num_i, num_j, num_k, num_l, num_ii - integer :: I_power(3), J_power(3), K_power(3), L_power(3) - integer :: iorder_p(3), iorder_q(3) - integer :: shift_P(3), shift_Q(3) - integer :: dim1 - - double precision :: coef1, coef2, coef3, coef4 - double precision :: expo1, expo2, expo3, expo4 - double precision :: p_inv, q_inv - double precision :: P_new_tmp(0:max_dim,3), P_center_tmp(3), fact_p_tmp, pp_tmp - double precision :: Q_new_tmp(0:max_dim,3), Q_center_tmp(3), fact_q_tmp, qq_tmp - double precision :: P_new(0:max_dim,3), P_center(3), fact_p, pp - double precision :: Q_new(0:max_dim,3), Q_center(3), fact_q, qq - double precision :: I_center(3), J_center(3), K_center(3), L_center(3) - double precision :: expoii, factii, Centerii(3) - double precision :: ff, gg, cx, cy, cz - - double precision :: general_primitive_integral_coul_shifted - - PROVIDE j1b_gauss_pen - - dim1 = n_pt_max_integrals - - num_i = ao_nucl(i) - num_j = ao_nucl(j) - num_k = ao_nucl(k) - num_l = ao_nucl(l) - - do p = 1, 3 - I_power(p) = ao_power(i,p) - J_power(p) = ao_power(j,p) - K_power(p) = ao_power(k,p) - L_power(p) = ao_power(l,p) - I_center(p) = nucl_coord(num_i,p) - J_center(p) = nucl_coord(num_j,p) - K_center(p) = nucl_coord(num_k,p) - L_center(p) = nucl_coord(num_l,p) - enddo - - j1b_gauss_coul_debug = 0.d0 - - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! [ 1 / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! - ! ------------------------------------------------------------------------------------------------------------------- - - shift_Q = (/ 0, 0, 0 /) - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new_tmp, P_center_tmp, pp_tmp, fact_p_tmp, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new, Q_center, qq, fact_q, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - q_inv = 1.d0 / qq - - cx = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(pp_tmp, P_center_tmp, expoii, Centerii, factii, pp, P_center) - - fact_p = fact_p_tmp * factii - p_inv = 1.d0 / pp - - ! pol centerd on P_center_tmp ==> centerd on P_center - call pol_modif_center( P_center_tmp, P_center, iorder_p, P_new_tmp, P_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - ff = P_center(1) - Centerii(1) - - shift_P = (/ 2, 0, 0 /) - cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 1, 0, 0 /) - cx = cx + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - cx = cx + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_coul_debug = j1b_gauss_coul_debug + coef4 * cx - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - -! ! ------------------------------------------------------------------------------------------------------------------- -! ! -! ! [ 1 / r12 ] \sum_A a_A [ (r2-RA)^2 exp(-aA r2A^2) -! ! -! ! ------------------------------------------------------------------------------------------------------------------- -! -! shift_P = (/ 0, 0, 0 /) -! -! do p = 1, ao_prim_num(i) -! coef1 = ao_coef_normalized_ordered_transp(p, i) -! expo1 = ao_expo_ordered_transp(p, i) -! -! do q = 1, ao_prim_num(j) -! coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) -! expo2 = ao_expo_ordered_transp(q, j) -! -! call give_explicit_poly_and_gaussian( P_new, P_center, pp, fact_p, iorder_p, expo1, expo2 & -! , I_power, J_power, I_center, J_center, dim1 ) -! p_inv = 1.d0 / pp -! -! do r = 1, ao_prim_num(k) -! coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) -! expo3 = ao_expo_ordered_transp(r, k) -! -! do s = 1, ao_prim_num(l) -! coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) -! expo4 = ao_expo_ordered_transp(s, l) -! -! call give_explicit_poly_and_gaussian( Q_new_tmp, Q_center_tmp, qq_tmp, fact_q_tmp, iorder_q, expo3, expo4 & -! , K_power, L_power, K_center, L_center, dim1 ) -! -! cx = 0.d0 -! do ii = 1, nucl_num -! expoii = j1b_gauss_pen(ii) -! Centerii(1:3) = nucl_coord(ii, 1:3) -! -! call gaussian_product(qq_tmp, Q_center_tmp, expoii, Centerii, factii, qq, Q_center) -! -! fact_q = fact_q_tmp * factii -! q_inv = 1.d0 / qq -! -! ! pol centerd on Q_center_tmp ==> centerd on Q_center -! call pol_modif_center( Q_center_tmp, Q_center, iorder_q, Q_new_tmp, Q_new) -! -! ! ---------------------------------------------------------------------------------------------------- -! ! x term: -! -! ff = Q_center(1) - Centerii(1) -! -! shift_Q = (/ 2, 0, 0 /) -! cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & -! , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & -! , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) -! -! shift_Q = (/ 1, 0, 0 /) -! cx = cx + expoii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & -! , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & -! , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) -! -! shift_Q = (/ 0, 0, 0 /) -! cx = cx + expoii * ff * ff * general_primitive_integral_coul_shifted( dim1 & -! , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & -! , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) -! -! ! ---------------------------------------------------------------------------------------------------- -! -! enddo -! -! j1b_gauss_coul_debug = j1b_gauss_coul_debug + coef4 * cx -! enddo ! s -! enddo ! r -! enddo ! q -! enddo ! p -! -! ! ------------------------------------------------------------------------------------------------------------------- -! ! ------------------------------------------------------------------------------------------------------------------- - - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! - [ 1 / r12 ] \sum_A a_A [ (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) ] - ! - ! ------------------------------------------------------------------------------------------------------------------- - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new_tmp, P_center_tmp, pp_tmp, fact_p_tmp, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new, Q_center, qq, fact_q, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - q_inv = 1.d0 / qq - - cx = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(pp_tmp, P_center_tmp, expoii, Centerii, factii, pp, P_center) - - fact_p = fact_p_tmp * factii - p_inv = 1.d0 / pp - - ! pol centerd on P_center_tmp ==> centerd on P_center - call pol_modif_center( P_center_tmp, P_center, iorder_p, P_new_tmp, P_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - ff = P_center(1) - Centerii(1) - gg = Q_center(1) - Centerii(1) - - shift_P = (/ 1, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 1, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx + expoii * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx + expoii * ff * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx + expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_coul_debug = j1b_gauss_coul_debug - coef4 * cx - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - - -! ! ------------------------------------------------------------------------------------------------------------------- -! ! -! ! - [ 1 / r12 ] \sum_A a_A [ (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] -! ! -! ! ------------------------------------------------------------------------------------------------------------------- -! -! do p = 1, ao_prim_num(i) -! coef1 = ao_coef_normalized_ordered_transp(p, i) -! expo1 = ao_expo_ordered_transp(p, i) -! -! do q = 1, ao_prim_num(j) -! coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) -! expo2 = ao_expo_ordered_transp(q, j) -! -! call give_explicit_poly_and_gaussian( P_new, P_center, pp, fact_p, iorder_p, expo1, expo2 & -! , I_power, J_power, I_center, J_center, dim1 ) -! p_inv = 1.d0 / pp -! -! do r = 1, ao_prim_num(k) -! coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) -! expo3 = ao_expo_ordered_transp(r, k) -! -! do s = 1, ao_prim_num(l) -! coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) -! expo4 = ao_expo_ordered_transp(s, l) -! -! call give_explicit_poly_and_gaussian( Q_new_tmp, Q_center_tmp, qq_tmp, fact_q_tmp, iorder_q, expo3, expo4 & -! , K_power, L_power, K_center, L_center, dim1 ) -! -! cx = 0.d0 -! do ii = 1, nucl_num -! expoii = j1b_gauss_pen(ii) -! Centerii(1:3) = nucl_coord(ii, 1:3) -! -! call gaussian_product(qq_tmp, Q_center_tmp, expoii, Centerii, factii, qq, Q_center) -! -! fact_q = fact_q_tmp * factii -! q_inv = 1.d0 / qq -! -! ! pol centerd on Q_center_tmp ==> centerd on Q_center -! call pol_modif_center( Q_center_tmp, Q_center, iorder_q, Q_new_tmp, Q_new) -! -! ! ---------------------------------------------------------------------------------------------------- -! ! x term: -! -! ff = P_center(1) - Centerii(1) -! gg = Q_center(1) - Centerii(1) -! -! shift_P = (/ 1, 0, 0 /) -! shift_Q = (/ 1, 0, 0 /) -! cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & -! , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & -! , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) -! -! shift_P = (/ 1, 0, 0 /) -! shift_Q = (/ 0, 0, 0 /) -! cx = cx + expoii * gg * general_primitive_integral_coul_shifted( dim1 & -! , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & -! , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) -! -! shift_P = (/ 0, 0, 0 /) -! shift_Q = (/ 1, 0, 0 /) -! cx = cx + expoii * ff * general_primitive_integral_coul_shifted( dim1 & -! , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & -! , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) -! -! shift_P = (/ 0, 0, 0 /) -! shift_Q = (/ 0, 0, 0 /) -! cx = cx + expoii * ff * gg * general_primitive_integral_coul_shifted( dim1 & -! , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & -! , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) -! -! ! ---------------------------------------------------------------------------------------------------- -! -! enddo -! -! j1b_gauss_coul_debug = j1b_gauss_coul_debug - coef4 * cx -! -! enddo ! s -! enddo ! r -! enddo ! q -! enddo ! p -! -! ! ------------------------------------------------------------------------------------------------------------------- -! ! ------------------------------------------------------------------------------------------------------------------- - - return -end function j1b_gauss_coul_debug - diff --git a/src/ao_tc_eff_map/two_e_1bgauss_coul_modifdebug.irp.f b/src/ao_tc_eff_map/two_e_1bgauss_coul_modifdebug.irp.f deleted file mode 100644 index 753fff8f..00000000 --- a/src/ao_tc_eff_map/two_e_1bgauss_coul_modifdebug.irp.f +++ /dev/null @@ -1,324 +0,0 @@ -double precision function j1b_gauss_coul_modifdebug(i, j, k, l) - - include 'utils/constants.include.F' - - implicit none - - integer, intent(in) :: i, j, k, l - - integer :: p, q, r, s, ii - integer :: num_i, num_j, num_k, num_l, num_ii - integer :: I_power(3), J_power(3), K_power(3), L_power(3) - integer :: iorder_p(3), iorder_q(3) - integer :: shift_P(3), shift_Q(3) - integer :: dim1 - - double precision :: coef1, coef2, coef3, coef4 - double precision :: expo1, expo2, expo3, expo4 - double precision :: p_inv, q_inv - double precision :: P_new_tmp(0:max_dim,3), P_center_tmp(3), fact_p_tmp, pp_tmp - double precision :: Q_new_tmp(0:max_dim,3), Q_center_tmp(3), fact_q_tmp, qq_tmp - double precision :: P_new(0:max_dim,3), P_center(3), fact_p, pp - double precision :: Q_new(0:max_dim,3), Q_center(3), fact_q, qq - double precision :: I_center(3), J_center(3), K_center(3), L_center(3) - double precision :: expoii, factii, Centerii(3) - double precision :: ff, gg, cx, cy, cz - - double precision :: general_primitive_integral_coul - double precision :: general_primitive_integral_coul_shifted - double precision :: ao_two_e_integral - - PROVIDE j1b_gauss_pen - - dim1 = n_pt_max_integrals - - num_i = ao_nucl(i) - num_j = ao_nucl(j) - num_k = ao_nucl(k) - num_l = ao_nucl(l) - - do p = 1, 3 - I_power(p) = ao_power(i,p) - J_power(p) = ao_power(j,p) - K_power(p) = ao_power(k,p) - L_power(p) = ao_power(l,p) - I_center(p) = nucl_coord(num_i,p) - J_center(p) = nucl_coord(num_j,p) - K_center(p) = nucl_coord(num_k,p) - L_center(p) = nucl_coord(num_l,p) - enddo - - j1b_gauss_coul_modifdebug = 0.d0 - -! do ii = 1, nucl_num -! expoii = j1b_gauss_pen(ii) -! j1b_gauss_coul_modifdebug += expoii * ao_two_e_integral(i, j, k, l) -! enddo - - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! [ 1 / r12 ] \sum_A a_A exp(-aA r1A^2) - ! - ! ------------------------------------------------------------------------------------------------------------------- - - shift_P = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new_tmp, P_center_tmp, pp_tmp, fact_p_tmp, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new, Q_center, qq, fact_q, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - q_inv = 1.d0 / qq - - cx = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(pp_tmp, P_center_tmp, expoii, Centerii, factii, pp, P_center) - fact_p = fact_p_tmp * factii - p_inv = 1.d0 / pp - P_new(:,:) = 0.d0 - call pol_modif_center( P_center_tmp, P_center, iorder_p, P_new_tmp, P_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_coul_modifdebug = j1b_gauss_coul_modifdebug + coef4 * cx - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! [ 1 / r12 ] \sum_A a_A exp(-aA r2A^2) - ! - ! ------------------------------------------------------------------------------------------------------------------- - - shift_P = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new, P_center, pp, fact_p, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - p_inv = 1.d0 / pp - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new_tmp, Q_center_tmp, qq_tmp, fact_q_tmp, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - - cx = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(qq_tmp, Q_center_tmp, expoii, Centerii, factii, qq, Q_center) - fact_q = fact_q_tmp * factii - Q_inv = 1.d0 / qq - call pol_modif_center( Q_center_tmp, Q_center, iorder_q, Q_new_tmp, Q_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - cx = cx + expoii * general_primitive_integral_coul_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_coul_modifdebug = j1b_gauss_coul_modifdebug + coef4 * cx - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - return -end function j1b_gauss_coul_modifdebug - - - - -double precision function general_primitive_integral_coul(dim, & - P_new,P_center,fact_p,p,p_inv,iorder_p, & - Q_new,Q_center,fact_q,q,q_inv,iorder_q) - implicit none - BEGIN_DOC - ! Computes the integral where p,q,r,s are Gaussian primitives - END_DOC - integer,intent(in) :: dim - include 'utils/constants.include.F' - double precision, intent(in) :: P_new(0:max_dim,3),P_center(3),fact_p,p,p_inv - double precision, intent(in) :: Q_new(0:max_dim,3),Q_center(3),fact_q,q,q_inv - integer, intent(in) :: iorder_p(3) - integer, intent(in) :: iorder_q(3) - - double precision :: r_cut,gama_r_cut,rho,dist - double precision :: dx(0:max_dim),Ix_pol(0:max_dim),dy(0:max_dim),Iy_pol(0:max_dim),dz(0:max_dim),Iz_pol(0:max_dim) - integer :: n_Ix,n_Iy,n_Iz,nx,ny,nz - double precision :: bla - integer :: ix,iy,iz,jx,jy,jz,i - double precision :: a,b,c,d,e,f,accu,pq,const - double precision :: pq_inv, p10_1, p10_2, p01_1, p01_2,pq_inv_2 - integer :: n_pt_tmp,n_pt_out, iorder - double precision :: d1(0:max_dim),d_poly(0:max_dim),rint,d1_screened(0:max_dim) - - general_primitive_integral_coul = 0.d0 - - !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: dx,Ix_pol,dy,Iy_pol,dz,Iz_pol - !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: d1, d_poly - - ! Gaussian Product - ! ---------------- - - pq = p_inv*0.5d0*q_inv - pq_inv = 0.5d0/(p+q) - p10_1 = q*pq ! 1/(2p) - p01_1 = p*pq ! 1/(2q) - pq_inv_2 = pq_inv+pq_inv - p10_2 = pq_inv_2 * p10_1*q !0.5d0*q/(pq + p*p) - p01_2 = pq_inv_2 * p01_1*p !0.5d0*p/(q*q + pq) - - - accu = 0.d0 - iorder = iorder_p(1)+iorder_q(1)+iorder_p(1)+iorder_q(1) - do ix=0,iorder - Ix_pol(ix) = 0.d0 - enddo - n_Ix = 0 - do ix = 0, iorder_p(1) - if (abs(P_new(ix,1)) < thresh) cycle - a = P_new(ix,1) - do jx = 0, iorder_q(1) - d = a*Q_new(jx,1) - if (abs(d) < thresh) cycle - !DIR$ FORCEINLINE - call give_polynom_mult_center_x(P_center(1),Q_center(1),ix,jx,p,q,iorder,pq_inv,pq_inv_2,p10_1,p01_1,p10_2,p01_2,dx,nx) - !DIR$ FORCEINLINE - call add_poly_multiply(dx,nx,d,Ix_pol,n_Ix) - enddo - enddo - if (n_Ix == -1) then - return - endif - iorder = iorder_p(2)+iorder_q(2)+iorder_p(2)+iorder_q(2) - do ix=0, iorder - Iy_pol(ix) = 0.d0 - enddo - n_Iy = 0 - do iy = 0, iorder_p(2) - if (abs(P_new(iy,2)) > thresh) then - b = P_new(iy,2) - do jy = 0, iorder_q(2) - e = b*Q_new(jy,2) - if (abs(e) < thresh) cycle - !DIR$ FORCEINLINE - call give_polynom_mult_center_x(P_center(2),Q_center(2),iy,jy,p,q,iorder,pq_inv,pq_inv_2,p10_1,p01_1,p10_2,p01_2,dy,ny) - !DIR$ FORCEINLINE - call add_poly_multiply(dy,ny,e,Iy_pol,n_Iy) - enddo - endif - enddo - if (n_Iy == -1) then - return - endif - - iorder = iorder_p(3)+iorder_q(3)+iorder_p(3)+iorder_q(3) - do ix=0,iorder - Iz_pol(ix) = 0.d0 - enddo - n_Iz = 0 - do iz = 0, iorder_p(3) - if (abs(P_new(iz,3)) > thresh) then - c = P_new(iz,3) - do jz = 0, iorder_q(3) - f = c*Q_new(jz,3) - if (abs(f) < thresh) cycle - !DIR$ FORCEINLINE - call give_polynom_mult_center_x(P_center(3),Q_center(3),iz,jz,p,q,iorder,pq_inv,pq_inv_2,p10_1,p01_1,p10_2,p01_2,dz,nz) - !DIR$ FORCEINLINE - call add_poly_multiply(dz,nz,f,Iz_pol,n_Iz) - enddo - endif - enddo - if (n_Iz == -1) then - return - endif - - rho = p*q *pq_inv_2 - dist = (P_center(1) - Q_center(1))*(P_center(1) - Q_center(1)) + & - (P_center(2) - Q_center(2))*(P_center(2) - Q_center(2)) + & - (P_center(3) - Q_center(3))*(P_center(3) - Q_center(3)) - const = dist*rho - - n_pt_tmp = n_Ix+n_Iy - do i=0,n_pt_tmp - d_poly(i)=0.d0 - enddo - - !DIR$ FORCEINLINE - call multiply_poly(Ix_pol,n_Ix,Iy_pol,n_Iy,d_poly,n_pt_tmp) - if (n_pt_tmp == -1) then - return - endif - n_pt_out = n_pt_tmp+n_Iz - do i=0,n_pt_out - d1(i)=0.d0 - enddo - - !DIR$ FORCEINLINE - call multiply_poly(d_poly ,n_pt_tmp ,Iz_pol,n_Iz,d1,n_pt_out) - double precision :: rint_sum - accu = accu + rint_sum(n_pt_out,const,d1) - - general_primitive_integral_coul = fact_p * fact_q * accu *pi_5_2*p_inv*q_inv/dsqrt(p+q) -end function general_primitive_integral_coul diff --git a/src/ao_tc_eff_map/two_e_1bgauss_coulerf.irp.f b/src/ao_tc_eff_map/two_e_1bgauss_coulerf.irp.f deleted file mode 100644 index 92512bd7..00000000 --- a/src/ao_tc_eff_map/two_e_1bgauss_coulerf.irp.f +++ /dev/null @@ -1,102 +0,0 @@ -double precision function j1b_gauss_coulerf(i, j, k, l) - - BEGIN_DOC - ! - ! integral in the AO basis: - ! i(r1) j(r1) f(r12) k(r2) l(r2) - ! - ! with: - ! f(r12) = - [ (0.5 - 0.5 erf(mu r12)) / r12 ] (r1-r2) \cdot \sum_A (-2 a_A) [ r1A exp(-aA r1A^2) - r2A exp(-aA r2A^2) ] - ! = [ (1 - erf(mu r12) / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! + (r2-RA)^2 exp(-aA r2A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] - ! - END_DOC - - include 'utils/constants.include.F' - - implicit none - - integer, intent(in) :: i, j, k, l - - integer :: p, q, r, s - integer :: num_i, num_j, num_k, num_l, num_ii - integer :: I_power(3), J_power(3), K_power(3), L_power(3) - integer :: iorder_p(3), iorder_q(3) - integer :: shift_P(3), shift_Q(3) - integer :: dim1 - - double precision :: coef1, coef2, coef3, coef4 - double precision :: expo1, expo2, expo3, expo4 - double precision :: P1_new(0:max_dim,3), P1_center(3), fact_p1, pp1, p1_inv - double precision :: Q1_new(0:max_dim,3), Q1_center(3), fact_q1, qq1, q1_inv - double precision :: I_center(3), J_center(3), K_center(3), L_center(3) - double precision :: ff, gg, cx, cy, cz - - double precision :: j1b_gauss_coulerf_schwartz - - PROVIDE j1b_gauss_pen - - dim1 = n_pt_max_integrals - - if( ao_prim_num(i) * ao_prim_num(j) * ao_prim_num(k) * ao_prim_num(l) > 1024 ) then - j1b_gauss_coulerf = j1b_gauss_coulerf_schwartz(i, j, k, l) - return - endif - - num_i = ao_nucl(i) - num_j = ao_nucl(j) - num_k = ao_nucl(k) - num_l = ao_nucl(l) - - do p = 1, 3 - I_power(p) = ao_power(i,p) - J_power(p) = ao_power(j,p) - K_power(p) = ao_power(k,p) - L_power(p) = ao_power(l,p) - I_center(p) = nucl_coord(num_i,p) - J_center(p) = nucl_coord(num_j,p) - K_center(p) = nucl_coord(num_k,p) - L_center(p) = nucl_coord(num_l,p) - enddo - - j1b_gauss_coulerf = 0.d0 - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P1_new, P1_center, pp1, fact_p1, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - p1_inv = 1.d0 / pp1 - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q1_new, Q1_center, qq1, fact_q1, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - q1_inv = 1.d0 / qq1 - - call get_cxcycz( dim1, cx, cy, cz & - , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & - , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) - - j1b_gauss_coulerf = j1b_gauss_coulerf + coef4 * ( cx + cy + cz ) - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - return -end function j1b_gauss_coulerf - diff --git a/src/ao_tc_eff_map/two_e_1bgauss_erf.irp.f b/src/ao_tc_eff_map/two_e_1bgauss_erf.irp.f deleted file mode 100644 index f5ff5499..00000000 --- a/src/ao_tc_eff_map/two_e_1bgauss_erf.irp.f +++ /dev/null @@ -1,854 +0,0 @@ -double precision function j1b_gauss_erf(i, j, k, l) - - BEGIN_DOC - ! - ! integral in the AO basis: - ! i(r1) j(r1) f(r12) k(r2) l(r2) - ! - ! with: - ! f(r12) = - [ -0.5 erf(mu r12) / r12 ] (r1-r2) \cdot \sum_A (-2 a_A) [ r1A exp(-aA r1A^2) - r2A exp(-aA r2A^2) ] - ! = - [ erf(mu r12) / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! + (r2-RA)^2 exp(-aA r2A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] - ! - END_DOC - - include 'utils/constants.include.F' - - implicit none - - integer, intent(in) :: i, j, k, l - - integer :: p, q, r, s, ii - integer :: num_i, num_j, num_k, num_l, num_ii - integer :: I_power(3), J_power(3), K_power(3), L_power(3) - integer :: iorder_p(3), iorder_q(3) - integer :: shift_P(3), shift_Q(3) - integer :: dim1 - - double precision :: coef1, coef2, coef3, coef4 - double precision :: expo1, expo2, expo3, expo4 - double precision :: p_inv, q_inv - double precision :: P_new_tmp(0:max_dim,3), P_center_tmp(3), fact_p_tmp, pp_tmp - double precision :: Q_new_tmp(0:max_dim,3), Q_center_tmp(3), fact_q_tmp, qq_tmp - double precision :: P_new(0:max_dim,3), P_center(3), fact_p, pp - double precision :: Q_new(0:max_dim,3), Q_center(3), fact_q, qq - double precision :: I_center(3), J_center(3), K_center(3), L_center(3) - double precision :: expoii, factii, Centerii(3) - double precision :: ff, gg, cx, cy, cz - - double precision :: general_primitive_integral_erf_shifted - - PROVIDE mu_erf - PROVIDE j1b_gauss_pen - - dim1 = n_pt_max_integrals - - num_i = ao_nucl(i) - num_j = ao_nucl(j) - num_k = ao_nucl(k) - num_l = ao_nucl(l) - - do p = 1, 3 - I_power(p) = ao_power(i,p) - J_power(p) = ao_power(j,p) - K_power(p) = ao_power(k,p) - L_power(p) = ao_power(l,p) - I_center(p) = nucl_coord(num_i,p) - J_center(p) = nucl_coord(num_j,p) - K_center(p) = nucl_coord(num_k,p) - L_center(p) = nucl_coord(num_l,p) - enddo - - j1b_gauss_erf = 0.d0 - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! - [ erf(mu r12) / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! - ! ------------------------------------------------------------------------------------------------------------------- - - shift_Q(1) = 0 - shift_Q(2) = 0 - shift_Q(3) = 0 - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new_tmp, P_center_tmp, pp_tmp, fact_p_tmp, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new, Q_center, qq, fact_q, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - q_inv = 1.d0 / qq - - cx = 0.d0 - cy = 0.d0 - cz = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(pp_tmp, P_center_tmp, expoii, Centerii, factii, pp, P_center) - - fact_p = fact_p_tmp * factii - p_inv = 1.d0 / pp - - ! pol centerd on P_center_tmp ==> centerd on P_center - call pol_modif_center( P_center_tmp, P_center, iorder_p, P_new_tmp, P_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - shift_P(2) = 0 - shift_P(3) = 0 - - ff = P_center(1) - Centerii(1) - - shift_P(1) = 2 - cx = cx + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(1) = 1 - cx = cx + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(1) = 0 - cx = cx + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! y term: - - shift_P(1) = 0 - shift_P(3) = 0 - - ff = P_center(2) - Centerii(2) - - shift_P(2) = 2 - cy = cy + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(2) = 1 - cy = cy + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(2) = 0 - cy = cy + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! z term: - - shift_P(1) = 0 - shift_P(2) = 0 - - ff = P_center(3) - Centerii(3) - - shift_P(3) = 2 - cz = cz + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(3) = 1 - cz = cz + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(3) = 0 - cz = cz + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_erf = j1b_gauss_erf - coef4 * ( cx + cy + cz ) - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! - [ erf(mu r12) / r12 ] \sum_A a_A [ (r2-RA)^2 exp(-aA r2A^2) - ! - ! ------------------------------------------------------------------------------------------------------------------- - - shift_P(1) = 0 - shift_P(2) = 0 - shift_P(3) = 0 - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new, P_center, pp, fact_p, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - p_inv = 1.d0 / pp - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new_tmp, Q_center_tmp, qq_tmp, fact_q_tmp, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - - cx = 0.d0 - cy = 0.d0 - cz = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(qq_tmp, Q_center_tmp, expoii, Centerii, factii, qq, Q_center) - - fact_q = fact_q_tmp * factii - q_inv = 1.d0 / qq - - ! pol centerd on Q_center_tmp ==> centerd on Q_center - call pol_modif_center( Q_center_tmp, Q_center, iorder_q, Q_new_tmp, Q_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - shift_Q(2) = 0 - shift_Q(3) = 0 - - ff = Q_center(1) - Centerii(1) - - shift_Q(1) = 2 - cx = cx + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q(1) = 1 - cx = cx + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q(1) = 0 - cx = cx + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! y term: - - shift_Q(1) = 0 - shift_Q(3) = 0 - - ff = Q_center(2) - Centerii(2) - - shift_Q(2) = 2 - cy = cy + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q(2) = 1 - cy = cy + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q(2) = 0 - cy = cy + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! z term: - - shift_Q(1) = 0 - shift_Q(2) = 0 - - ff = Q_center(3) - Centerii(3) - - shift_Q(3) = 2 - cz = cz + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q(3) = 1 - cz = cz + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_Q(3) = 0 - cz = cz + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_erf = j1b_gauss_erf - coef4 * ( cx + cy + cz ) - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! [ erf(mu r12) / r12 ] \sum_A a_A [ (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) ] - ! - ! ------------------------------------------------------------------------------------------------------------------- - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new_tmp, P_center_tmp, pp_tmp, fact_p_tmp, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new, Q_center, qq, fact_q, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - q_inv = 1.d0 / qq - - cx = 0.d0 - cy = 0.d0 - cz = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(pp_tmp, P_center_tmp, expoii, Centerii, factii, pp, P_center) - - fact_p = fact_p_tmp * factii - p_inv = 1.d0 / pp - - ! pol centerd on P_center_tmp ==> centerd on P_center - call pol_modif_center( P_center_tmp, P_center, iorder_p, P_new_tmp, P_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - shift_P(2) = 0 - shift_P(3) = 0 - shift_Q(2) = 0 - shift_Q(3) = 0 - - ff = P_center(1) - Centerii(1) - gg = Q_center(1) - Centerii(1) - - shift_P(1) = 1 - shift_Q(1) = 1 - cx = cx + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(1) = 1 - shift_Q(1) = 0 - cx = cx + expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(1) = 0 - shift_Q(1) = 1 - cx = cx + expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(1) = 0 - shift_Q(1) = 0 - cx = cx + expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! y term: - - shift_P(1) = 0 - shift_P(3) = 0 - shift_Q(1) = 0 - shift_Q(3) = 0 - - ff = P_center(2) - Centerii(2) - gg = Q_center(2) - Centerii(2) - - shift_P(2) = 1 - shift_Q(2) = 1 - cy = cy + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(2) = 1 - shift_Q(2) = 0 - cy = cy + expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(2) = 0 - shift_Q(2) = 1 - cy = cy + expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(2) = 0 - shift_Q(2) = 0 - cy = cy + expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! z term: - - shift_P(1) = 0 - shift_P(2) = 0 - shift_Q(1) = 0 - shift_Q(2) = 0 - - ff = P_center(3) - Centerii(3) - gg = Q_center(3) - Centerii(3) - - shift_P(3) = 1 - shift_Q(3) = 1 - cz = cz + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(3) = 1 - shift_Q(3) = 0 - cz = cz + expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(3) = 0 - shift_Q(3) = 1 - cz = cz + expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(3) = 0 - shift_Q(3) = 0 - cz = cz + expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_erf = j1b_gauss_erf + coef4 * ( cx + cy + cz ) - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - - - ! ------------------------------------------------------------------------------------------------------------------- - ! - ! [ erf(mu r12) / r12 ] \sum_A a_A [ (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] - ! - ! ------------------------------------------------------------------------------------------------------------------- - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P_new, P_center, pp, fact_p, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - p_inv = 1.d0 / pp - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q_new_tmp, Q_center_tmp, qq_tmp, fact_q_tmp, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - - cx = 0.d0 - cy = 0.d0 - cz = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(qq_tmp, Q_center_tmp, expoii, Centerii, factii, qq, Q_center) - - fact_q = fact_q_tmp * factii - q_inv = 1.d0 / qq - - ! pol centerd on Q_center_tmp ==> centerd on Q_center - call pol_modif_center( Q_center_tmp, Q_center, iorder_q, Q_new_tmp, Q_new) - - ! ---------------------------------------------------------------------------------------------------- - ! x term: - - shift_P(2) = 0 - shift_P(3) = 0 - shift_Q(2) = 0 - shift_Q(3) = 0 - - ff = P_center(1) - Centerii(1) - gg = Q_center(1) - Centerii(1) - - shift_P(1) = 1 - shift_Q(1) = 1 - cx = cx + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(1) = 1 - shift_Q(1) = 0 - cx = cx + expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(1) = 0 - shift_Q(1) = 1 - cx = cx + expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(1) = 0 - shift_Q(1) = 0 - cx = cx + expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! y term: - - shift_P(1) = 0 - shift_P(3) = 0 - shift_Q(1) = 0 - shift_Q(3) = 0 - - ff = P_center(2) - Centerii(2) - gg = Q_center(2) - Centerii(2) - - shift_P(2) = 1 - shift_Q(2) = 1 - cy = cy + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(2) = 1 - shift_Q(2) = 0 - cy = cy + expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(2) = 0 - shift_Q(2) = 1 - cy = cy + expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(2) = 0 - shift_Q(2) = 0 - cy = cy + expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - ! ---------------------------------------------------------------------------------------------------- - ! z term: - - shift_P(1) = 0 - shift_P(2) = 0 - shift_Q(1) = 0 - shift_Q(2) = 0 - - ff = P_center(3) - Centerii(3) - gg = Q_center(3) - Centerii(3) - - shift_P(3) = 1 - shift_Q(3) = 1 - cz = cz + expoii * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(3) = 1 - shift_Q(3) = 0 - cz = cz + expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(3) = 0 - shift_Q(3) = 1 - cz = cz + expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - shift_P(3) = 0 - shift_Q(3) = 0 - cz = cz + expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P_new, P_center, fact_p, pp, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, qq, q_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_erf = j1b_gauss_erf + coef4 * ( cx + cy + cz ) - - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - ! ------------------------------------------------------------------------------------------------------------------- - ! ------------------------------------------------------------------------------------------------------------------- - - - return -end function j1b_gauss_erf - - - - -!______________________________________________________________________________________________________________________ -!______________________________________________________________________________________________________________________ - -double precision function general_primitive_integral_erf_shifted( dim & - , P_new, P_center, fact_p, p, p_inv, iorder_p, shift_P & - , Q_new, Q_center, fact_q, q, q_inv, iorder_q, shift_Q ) - - include 'utils/constants.include.F' - - implicit none - - integer, intent(in) :: dim - integer, intent(in) :: iorder_p(3), shift_P(3) - integer, intent(in) :: iorder_q(3), shift_Q(3) - double precision, intent(in) :: P_new(0:max_dim,3), P_center(3), fact_p, p, p_inv - double precision, intent(in) :: Q_new(0:max_dim,3), Q_center(3), fact_q, q, q_inv - - integer :: n_Ix, n_Iy, n_Iz, nx, ny, nz - integer :: ix, iy, iz, jx, jy, jz, i - integer :: n_pt_tmp, n_pt_out, iorder - integer :: ii, jj - double precision :: rho, dist - double precision :: dx(0:max_dim), Ix_pol(0:max_dim) - double precision :: dy(0:max_dim), Iy_pol(0:max_dim) - double precision :: dz(0:max_dim), Iz_pol(0:max_dim) - double precision :: a, b, c, d, e, f, accu, pq, const - double precision :: pq_inv, p10_1, p10_2, p01_1, p01_2, pq_inv_2 - double precision :: d1(0:max_dim), d_poly(0:max_dim) - double precision :: p_plus_q - - double precision :: rint_sum - - general_primitive_integral_erf_shifted = 0.d0 - - !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: dx, Ix_pol, dy, Iy_pol, dz, Iz_pol - !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: d1, d_poly - - ! Gaussian Product - ! ---------------- - p_plus_q = (p+q) * ( (p*q)/(p+q) + mu_erf*mu_erf ) / (mu_erf*mu_erf) - pq = p_inv * 0.5d0 * q_inv - pq_inv = 0.5d0 / p_plus_q - p10_1 = q * pq ! 1/(2p) - p01_1 = p * pq ! 1/(2q) - pq_inv_2 = pq_inv + pq_inv - p10_2 = pq_inv_2 * p10_1 * q ! 0.5d0 * q / (pq + p*p) - p01_2 = pq_inv_2 * p01_1 * p ! 0.5d0 * p / (q*q + pq) - - accu = 0.d0 - - iorder = iorder_p(1) + iorder_q(1) + iorder_p(1) + iorder_q(1) - iorder = iorder + shift_P(1) + shift_Q(1) - iorder = iorder + shift_P(1) + shift_Q(1) - !DIR$ VECTOR ALIGNED - do ix = 0, iorder - Ix_pol(ix) = 0.d0 - enddo - n_Ix = 0 - do ix = 0, iorder_p(1) - - ii = ix + shift_P(1) - a = P_new(ix,1) - if(abs(a) < thresh) cycle - - do jx = 0, iorder_q(1) - - jj = jx + shift_Q(1) - d = a * Q_new(jx,1) - if(abs(d) < thresh) cycle - - !DEC$ FORCEINLINE - call give_polynom_mult_center_x( P_center(1), Q_center(1), ii, jj & - , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dx, nx ) - !DEC$ FORCEINLINE - call add_poly_multiply(dx, nx, d, Ix_pol, n_Ix) - enddo - enddo - if(n_Ix == -1) then - return - endif - - iorder = iorder_p(2) + iorder_q(2) + iorder_p(2) + iorder_q(2) - iorder = iorder + shift_P(2) + shift_Q(2) - iorder = iorder + shift_P(2) + shift_Q(2) - !DIR$ VECTOR ALIGNED - do ix = 0, iorder - Iy_pol(ix) = 0.d0 - enddo - n_Iy = 0 - do iy = 0, iorder_p(2) - - if(abs(P_new(iy,2)) > thresh) then - - ii = iy + shift_P(2) - b = P_new(iy,2) - - do jy = 0, iorder_q(2) - - jj = jy + shift_Q(2) - e = b * Q_new(jy,2) - if(abs(e) < thresh) cycle - - !DEC$ FORCEINLINE - call give_polynom_mult_center_x( P_center(2), Q_center(2), ii, jj & - , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dy, ny ) - !DEC$ FORCEINLINE - call add_poly_multiply(dy, ny, e, Iy_pol, n_Iy) - enddo - endif - enddo - if(n_Iy == -1) then - return - endif - - iorder = iorder_p(3) + iorder_q(3) + iorder_p(3) + iorder_q(3) - iorder = iorder + shift_P(3) + shift_Q(3) - iorder = iorder + shift_P(3) + shift_Q(3) - do ix = 0, iorder - Iz_pol(ix) = 0.d0 - enddo - n_Iz = 0 - do iz = 0, iorder_p(3) - - if( abs(P_new(iz,3)) > thresh ) then - - ii = iz + shift_P(3) - c = P_new(iz,3) - - do jz = 0, iorder_q(3) - - jj = jz + shift_Q(3) - f = c * Q_new(jz,3) - if(abs(f) < thresh) cycle - - !DEC$ FORCEINLINE - call give_polynom_mult_center_x( P_center(3), Q_center(3), ii, jj & - , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dz, nz ) - !DEC$ FORCEINLINE - call add_poly_multiply(dz, nz, f, Iz_pol, n_Iz) - enddo - endif - enddo - if(n_Iz == -1) then - return - endif - - rho = p * q * pq_inv_2 - dist = (P_center(1) - Q_center(1)) * (P_center(1) - Q_center(1)) & - + (P_center(2) - Q_center(2)) * (P_center(2) - Q_center(2)) & - + (P_center(3) - Q_center(3)) * (P_center(3) - Q_center(3)) - const = dist*rho - - n_pt_tmp = n_Ix + n_Iy - do i = 0, n_pt_tmp - d_poly(i) = 0.d0 - enddo - - !DEC$ FORCEINLINE - call multiply_poly(Ix_pol, n_Ix, Iy_pol, n_Iy, d_poly, n_pt_tmp) - if(n_pt_tmp == -1) then - return - endif - n_pt_out = n_pt_tmp + n_Iz - do i = 0, n_pt_out - d1(i) = 0.d0 - enddo - - !DEC$ FORCEINLINE - call multiply_poly(d_poly, n_pt_tmp, Iz_pol, n_Iz, d1, n_pt_out) - accu = accu + rint_sum(n_pt_out, const, d1) - - general_primitive_integral_erf_shifted = fact_p * fact_q * accu * pi_5_2 * p_inv * q_inv / dsqrt(p_plus_q) - - return -end function general_primitive_integral_erf_shifted -!______________________________________________________________________________________________________________________ -!______________________________________________________________________________________________________________________ diff --git a/src/ao_tc_eff_map/two_e_1bgauss_erf_acc.irp.f b/src/ao_tc_eff_map/two_e_1bgauss_erf_acc.irp.f deleted file mode 100644 index 54210c44..00000000 --- a/src/ao_tc_eff_map/two_e_1bgauss_erf_acc.irp.f +++ /dev/null @@ -1,433 +0,0 @@ -double precision function j1b_gauss_erf_acc(i, j, k, l) - - BEGIN_DOC - ! - ! integral in the AO basis: - ! i(r1) j(r1) f(r12) k(r2) l(r2) - ! - ! with: - ! f(r12) = - [ -0.5 erf(mu r12) / r12 ] (r1-r2) \cdot \sum_A (-2 a_A) [ r1A exp(-aA r1A^2) - r2A exp(-aA r2A^2) ] - ! = - [ erf(mu r12) / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! + (r2-RA)^2 exp(-aA r2A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) - ! - (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] - ! - END_DOC - - include 'utils/constants.include.F' - - implicit none - - integer, intent(in) :: i, j, k, l - - integer :: p, q, r, s, ii - integer :: num_i, num_j, num_k, num_l, num_ii - integer :: I_power(3), J_power(3), K_power(3), L_power(3) - integer :: iorder_p(3), iorder_q(3) - integer :: shift_P(3), shift_Q(3) - integer :: dim1 - - double precision :: coef1, coef2, coef3, coef4 - double precision :: expo1, expo2, expo3, expo4 - double precision :: p1_inv, q1_inv, p2_inv, q2_inv - double precision :: P1_new(0:max_dim,3), P1_center(3), fact_p1, pp1 - double precision :: P2_new(0:max_dim,3), P2_center(3), fact_p2, pp2 - double precision :: Q1_new(0:max_dim,3), Q1_center(3), fact_q1, qq1 - double precision :: Q2_new(0:max_dim,3), Q2_center(3), fact_q2, qq2 - double precision :: I_center(3), J_center(3), K_center(3), L_center(3) - double precision :: expoii, factii, Centerii(3) - double precision :: ff, gg, cx, cy, cz - - double precision :: general_primitive_integral_erf_shifted - !double precision :: j1b_gauss_erf_schwartz_accel - - PROVIDE j1b_gauss_pen - - dim1 = n_pt_max_integrals - - ! TODO - !if( ao_prim_num(i) * ao_prim_num(j) * ao_prim_num(k) * ao_prim_num(l) > 1024 ) then - ! j1b_gauss_erf_schwartz_accel = j1b_gauss_erf_schwartz_accel(i, j, k, l) - ! return - !endif - - num_i = ao_nucl(i) - num_j = ao_nucl(j) - num_k = ao_nucl(k) - num_l = ao_nucl(l) - - do p = 1, 3 - I_power(p) = ao_power(i,p) - J_power(p) = ao_power(j,p) - K_power(p) = ao_power(k,p) - L_power(p) = ao_power(l,p) - I_center(p) = nucl_coord(num_i,p) - J_center(p) = nucl_coord(num_j,p) - K_center(p) = nucl_coord(num_k,p) - L_center(p) = nucl_coord(num_l,p) - enddo - - j1b_gauss_erf_acc = 0.d0 - - do p = 1, ao_prim_num(i) - coef1 = ao_coef_normalized_ordered_transp(p, i) - expo1 = ao_expo_ordered_transp(p, i) - - do q = 1, ao_prim_num(j) - coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) - expo2 = ao_expo_ordered_transp(q, j) - - call give_explicit_poly_and_gaussian( P1_new, P1_center, pp1, fact_p1, iorder_p, expo1, expo2 & - , I_power, J_power, I_center, J_center, dim1 ) - p1_inv = 1.d0 / pp1 - - do r = 1, ao_prim_num(k) - coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) - expo3 = ao_expo_ordered_transp(r, k) - - do s = 1, ao_prim_num(l) - coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) - expo4 = ao_expo_ordered_transp(s, l) - - call give_explicit_poly_and_gaussian( Q1_new, Q1_center, qq1, fact_q1, iorder_q, expo3, expo4 & - , K_power, L_power, K_center, L_center, dim1 ) - q1_inv = 1.d0 / qq1 - - cx = 0.d0 - cy = 0.d0 - cz = 0.d0 - do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) - Centerii(1:3) = nucl_coord(ii, 1:3) - - call gaussian_product(pp1, P1_center, expoii, Centerii, factii, pp2, P2_center) - fact_p2 = fact_p1 * factii - p2_inv = 1.d0 / pp2 - call pol_modif_center( P1_center, P2_center, iorder_p, P1_new, P2_new) - - call gaussian_product(qq1, Q1_center, expoii, Centerii, factii, qq2, Q2_center) - fact_q2 = fact_q1 * factii - q2_inv = 1.d0 / qq2 - call pol_modif_center( Q1_center, Q2_center, iorder_q, Q1_new, Q2_new) - - - ! ---------------------------------------------------------------------------------------------------- - ! [ erf(mu r12) / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) - ! ---------------------------------------------------------------------------------------------------- - - shift_Q = (/ 0, 0, 0 /) - - ! x term: - ff = P2_center(1) - Centerii(1) - - shift_P = (/ 2, 0, 0 /) - cx = cx + expoii * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 1, 0, 0 /) - cx = cx + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - cx = cx + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! y term: - ff = P2_center(2) - Centerii(2) - - shift_P = (/ 0, 2, 0 /) - cy = cy + expoii * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 1, 0 /) - cy = cy + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - cy = cy + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! z term: - ff = P2_center(3) - Centerii(3) - - shift_P = (/ 0, 0, 2 /) - cz = cz + expoii * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 1 /) - cz = cz + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_P = (/ 0, 0, 0 /) - cz = cz + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - - - ! ---------------------------------------------------------------------------------------------------- - ! [ erf(mu r12) / r12 ] \sum_A a_A [ (r2-RA)^2 exp(-aA r2A^2) - ! ---------------------------------------------------------------------------------------------------- - - shift_P = (/ 0, 0, 0 /) - - ! x term: - ff = Q2_center(1) - Centerii(1) - - shift_Q = (/ 2, 0, 0 /) - cx = cx + expoii * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 1, 0, 0 /) - cx = cx + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 0 /) - cx = cx + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! y term: - ff = Q2_center(2) - Centerii(2) - - shift_Q = (/ 0, 2, 0 /) - cy = cy + expoii * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 1, 0 /) - cy = cy + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 0 /) - cy = cy + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! z term: - ff = Q2_center(3) - Centerii(3) - - shift_Q = (/ 0, 0, 2 /) - cz = cz + expoii * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 1 /) - cz = cz + expoii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_Q = (/ 0, 0, 0 /) - cz = cz + expoii * ff * ff * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - - - ! ---------------------------------------------------------------------------------------------------- - ! - [ erf(mu r12) / r12 ] \sum_A a_A [ (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) ] - ! ---------------------------------------------------------------------------------------------------- - - ! x term: - ff = P2_center(1) - Centerii(1) - gg = Q1_center(1) - Centerii(1) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx - expoii * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx - expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx - expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx - expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! y term: - ff = P2_center(2) - Centerii(2) - gg = Q1_center(2) - Centerii(2) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy - expoii * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy - expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy - expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy - expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! z term: - ff = P2_center(3) - Centerii(3) - gg = Q1_center(3) - Centerii(3) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz - expoii * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz - expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz - expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz - expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & - , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - - - ! ---------------------------------------------------------------------------------------------------- - ! - [ erf(mu r12) / r12 ] \sum_A a_A [ (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] - ! ---------------------------------------------------------------------------------------------------- - - ! x term: - ff = P1_center(1) - Centerii(1) - gg = Q2_center(1) - Centerii(1) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx - expoii * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 1, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx - expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 1, 0, 0 /) - cx = cx - expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cx = cx - expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! y term: - ff = P1_center(2) - Centerii(2) - gg = Q2_center(2) - Centerii(2) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy - expoii * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 1, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy - expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 1, 0 /) - cy = cy - expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cy = cy - expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! z term: - ff = P1_center(3) - Centerii(3) - gg = Q2_center(3) - Centerii(3) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz - expoii * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 1 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz - expoii * gg * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 1 /) - cz = cz - expoii * ff * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - shift_p = (/ 0, 0, 0 /) - shift_Q = (/ 0, 0, 0 /) - cz = cz - expoii * ff * gg * general_primitive_integral_erf_shifted( dim1 & - , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & - , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) - - ! ---------------------------------------------------------------------------------------------------- - - enddo - - j1b_gauss_erf_acc = j1b_gauss_erf_acc - coef4 * ( cx + cy + cz ) - enddo ! s - enddo ! r - enddo ! q - enddo ! p - - return -end function j1b_gauss_erf_acc diff --git a/src/ao_tc_eff_map/two_e_1bgauss_coulerf_schwartz.irp.f b/src/ao_tc_eff_map/two_e_1bgauss_j1.irp.f similarity index 86% rename from src/ao_tc_eff_map/two_e_1bgauss_coulerf_schwartz.irp.f rename to src/ao_tc_eff_map/two_e_1bgauss_j1.irp.f index f2ba8276..c36ee9b4 100644 --- a/src/ao_tc_eff_map/two_e_1bgauss_coulerf_schwartz.irp.f +++ b/src/ao_tc_eff_map/two_e_1bgauss_j1.irp.f @@ -1,4 +1,106 @@ -double precision function j1b_gauss_coulerf_schwartz(i, j, k, l) +! --- + +double precision function j1b_gauss_2e_j1(i, j, k, l) + + BEGIN_DOC + ! + ! integral in the AO basis: + ! i(r1) j(r1) f(r12) k(r2) l(r2) + ! + ! with: + ! f(r12) = - [ (0.5 - 0.5 erf(mu r12)) / r12 ] (r1-r2) \cdot \sum_A (-2 a_A) [ r1A exp(-aA r1A^2) - r2A exp(-aA r2A^2) ] + ! = [ (1 - erf(mu r12) / r12 ] \sum_A a_A [ (r1-RA)^2 exp(-aA r1A^2) + ! + (r2-RA)^2 exp(-aA r2A^2) + ! - (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) + ! - (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] + ! + END_DOC + + include 'utils/constants.include.F' + + implicit none + + integer, intent(in) :: i, j, k, l + + integer :: p, q, r, s + integer :: num_i, num_j, num_k, num_l, num_ii + integer :: I_power(3), J_power(3), K_power(3), L_power(3) + integer :: iorder_p(3), iorder_q(3) + integer :: shift_P(3), shift_Q(3) + integer :: dim1 + + double precision :: coef1, coef2, coef3, coef4 + double precision :: expo1, expo2, expo3, expo4 + double precision :: P1_new(0:max_dim,3), P1_center(3), fact_p1, pp1, p1_inv + double precision :: Q1_new(0:max_dim,3), Q1_center(3), fact_q1, qq1, q1_inv + double precision :: I_center(3), J_center(3), K_center(3), L_center(3) + double precision :: ff, gg, cx, cy, cz + + double precision :: j1b_gauss_2e_j1_schwartz + + if( ao_prim_num(i) * ao_prim_num(j) * ao_prim_num(k) * ao_prim_num(l) > 1024 ) then + j1b_gauss_2e_j1 = j1b_gauss_2e_j1_schwartz(i, j, k, l) + return + endif + + num_i = ao_nucl(i) + num_j = ao_nucl(j) + num_k = ao_nucl(k) + num_l = ao_nucl(l) + + do p = 1, 3 + I_power(p) = ao_power(i,p) + J_power(p) = ao_power(j,p) + K_power(p) = ao_power(k,p) + L_power(p) = ao_power(l,p) + I_center(p) = nucl_coord(num_i,p) + J_center(p) = nucl_coord(num_j,p) + K_center(p) = nucl_coord(num_k,p) + L_center(p) = nucl_coord(num_l,p) + enddo + + j1b_gauss_2e_j1 = 0.d0 + + do p = 1, ao_prim_num(i) + coef1 = ao_coef_normalized_ordered_transp(p, i) + expo1 = ao_expo_ordered_transp(p, i) + + do q = 1, ao_prim_num(j) + coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) + expo2 = ao_expo_ordered_transp(q, j) + + call give_explicit_poly_and_gaussian( P1_new, P1_center, pp1, fact_p1, iorder_p, expo1, expo2 & + , I_power, J_power, I_center, J_center, dim1 ) + p1_inv = 1.d0 / pp1 + + do r = 1, ao_prim_num(k) + coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) + expo3 = ao_expo_ordered_transp(r, k) + + do s = 1, ao_prim_num(l) + coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) + expo4 = ao_expo_ordered_transp(s, l) + + call give_explicit_poly_and_gaussian( Q1_new, Q1_center, qq1, fact_q1, iorder_q, expo3, expo4 & + , K_power, L_power, K_center, L_center, dim1 ) + q1_inv = 1.d0 / qq1 + + call get_cxcycz_j1( dim1, cx, cy, cz & + , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & + , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) + + j1b_gauss_2e_j1 = j1b_gauss_2e_j1 + coef4 * ( cx + cy + cz ) + enddo ! s + enddo ! r + enddo ! q + enddo ! p + + return +end function j1b_gauss_2e_j1 + +! --- + +double precision function j1b_gauss_2e_j1_schwartz(i, j, k, l) BEGIN_DOC ! @@ -35,7 +137,7 @@ double precision function j1b_gauss_coulerf_schwartz(i, j, k, l) double precision :: schwartz_ij, thr double precision, allocatable :: schwartz_kl(:,:) - PROVIDE j1b_gauss_pen + PROVIDE j1b_pen dim1 = n_pt_max_integrals thr = ao_integrals_threshold * ao_integrals_threshold @@ -73,9 +175,9 @@ double precision function j1b_gauss_coulerf_schwartz(i, j, k, l) , K_power, L_power, K_center, L_center, dim1 ) q1_inv = 1.d0 / qq1 - call get_cxcycz( dim1, cx, cy, cz & - , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q & - , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) + call get_cxcycz_j1( dim1, cx, cy, cz & + , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q & + , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) schwartz_kl(s,r) = coef4 * dabs( cx + cy + cz ) schwartz_kl(0,r) = max( schwartz_kl(0,r) , schwartz_kl(s,r) ) @@ -85,7 +187,7 @@ double precision function j1b_gauss_coulerf_schwartz(i, j, k, l) enddo - j1b_gauss_coulerf_schwartz = 0.d0 + j1b_gauss_2e_j1_schwartz = 0.d0 do p = 1, ao_prim_num(i) expo1 = ao_expo_ordered_transp(p, i) @@ -99,9 +201,9 @@ double precision function j1b_gauss_coulerf_schwartz(i, j, k, l) , I_power, J_power, I_center, J_center, dim1 ) p1_inv = 1.d0 / pp1 - call get_cxcycz( dim1, cx, cy, cz & - , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & - , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p ) + call get_cxcycz_j1( dim1, cx, cy, cz & + , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & + , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p ) schwartz_ij = coef2 * coef2 * dabs( cx + cy + cz ) if( schwartz_kl(0,0) * schwartz_ij < thr ) cycle @@ -120,11 +222,11 @@ double precision function j1b_gauss_coulerf_schwartz(i, j, k, l) , K_power, L_power, K_center, L_center, dim1 ) q1_inv = 1.d0 / qq1 - call get_cxcycz( dim1, cx, cy, cz & - , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & - , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) + call get_cxcycz_j1( dim1, cx, cy, cz & + , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & + , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) - j1b_gauss_coulerf_schwartz = j1b_gauss_coulerf_schwartz + coef4 * ( cx + cy + cz ) + j1b_gauss_2e_j1_schwartz = j1b_gauss_2e_j1_schwartz + coef4 * ( cx + cy + cz ) enddo ! s enddo ! r enddo ! q @@ -133,15 +235,13 @@ double precision function j1b_gauss_coulerf_schwartz(i, j, k, l) deallocate( schwartz_kl ) return -end function j1b_gauss_coulerf_schwartz +end function j1b_gauss_2e_j1_schwartz +! --- - - - -subroutine get_cxcycz( dim1, cx, cy, cz & - , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & - , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) +subroutine get_cxcycz_j1( dim1, cx, cy, cz & + , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & + , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) include 'utils/constants.include.F' @@ -163,12 +263,14 @@ subroutine get_cxcycz( dim1, cx, cy, cz & double precision :: general_primitive_integral_erf_shifted double precision :: general_primitive_integral_coul_shifted + PROVIDE j1b_pen + cx = 0.d0 cy = 0.d0 cz = 0.d0 do ii = 1, nucl_num - expoii = j1b_gauss_pen(ii) + expoii = j1b_pen(ii) Centerii(1:3) = nucl_coord(ii, 1:3) call gaussian_product(pp1, P1_center, expoii, Centerii, factii, pp2, P2_center) @@ -620,5 +722,7 @@ subroutine get_cxcycz( dim1, cx, cy, cz & enddo return -end subroutine get_cxcycz +end subroutine get_cxcycz_j1 + +! --- diff --git a/src/ao_tc_eff_map/two_e_1bgauss_j2.irp.f b/src/ao_tc_eff_map/two_e_1bgauss_j2.irp.f new file mode 100644 index 00000000..a61b5336 --- /dev/null +++ b/src/ao_tc_eff_map/two_e_1bgauss_j2.irp.f @@ -0,0 +1,729 @@ +! --- + +double precision function j1b_gauss_2e_j2(i, j, k, l) + + BEGIN_DOC + ! + ! integral in the AO basis: + ! i(r1) j(r1) f(r12) k(r2) l(r2) + ! + ! with: + ! f(r12) = - [ (0.5 - 0.5 erf(mu r12)) / r12 ] (r1-r2) \cdot \sum_A (-2 a_A c_A) [ r1A exp(-aA r1A^2) - r2A exp(-aA r2A^2) ] + ! = [ (1 - erf(mu r12) / r12 ] \sum_A a_A c_A [ (r1-RA)^2 exp(-aA r1A^2) + ! + (r2-RA)^2 exp(-aA r2A^2) + ! - (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) + ! - (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] + ! + END_DOC + + include 'utils/constants.include.F' + + implicit none + + integer, intent(in) :: i, j, k, l + + integer :: p, q, r, s + integer :: num_i, num_j, num_k, num_l, num_ii + integer :: I_power(3), J_power(3), K_power(3), L_power(3) + integer :: iorder_p(3), iorder_q(3) + integer :: shift_P(3), shift_Q(3) + integer :: dim1 + + double precision :: coef1, coef2, coef3, coef4 + double precision :: expo1, expo2, expo3, expo4 + double precision :: P1_new(0:max_dim,3), P1_center(3), fact_p1, pp1, p1_inv + double precision :: Q1_new(0:max_dim,3), Q1_center(3), fact_q1, qq1, q1_inv + double precision :: I_center(3), J_center(3), K_center(3), L_center(3) + double precision :: ff, gg, cx, cy, cz + + double precision :: j1b_gauss_2e_j2_schwartz + + dim1 = n_pt_max_integrals + + if( ao_prim_num(i) * ao_prim_num(j) * ao_prim_num(k) * ao_prim_num(l) > 1024 ) then + j1b_gauss_2e_j2 = j1b_gauss_2e_j2_schwartz(i, j, k, l) + return + endif + + num_i = ao_nucl(i) + num_j = ao_nucl(j) + num_k = ao_nucl(k) + num_l = ao_nucl(l) + + do p = 1, 3 + I_power(p) = ao_power(i,p) + J_power(p) = ao_power(j,p) + K_power(p) = ao_power(k,p) + L_power(p) = ao_power(l,p) + I_center(p) = nucl_coord(num_i,p) + J_center(p) = nucl_coord(num_j,p) + K_center(p) = nucl_coord(num_k,p) + L_center(p) = nucl_coord(num_l,p) + enddo + + j1b_gauss_2e_j2 = 0.d0 + + do p = 1, ao_prim_num(i) + coef1 = ao_coef_normalized_ordered_transp(p, i) + expo1 = ao_expo_ordered_transp(p, i) + + do q = 1, ao_prim_num(j) + coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) + expo2 = ao_expo_ordered_transp(q, j) + + call give_explicit_poly_and_gaussian( P1_new, P1_center, pp1, fact_p1, iorder_p, expo1, expo2 & + , I_power, J_power, I_center, J_center, dim1 ) + p1_inv = 1.d0 / pp1 + + do r = 1, ao_prim_num(k) + coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) + expo3 = ao_expo_ordered_transp(r, k) + + do s = 1, ao_prim_num(l) + coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) + expo4 = ao_expo_ordered_transp(s, l) + + call give_explicit_poly_and_gaussian( Q1_new, Q1_center, qq1, fact_q1, iorder_q, expo3, expo4 & + , K_power, L_power, K_center, L_center, dim1 ) + q1_inv = 1.d0 / qq1 + + call get_cxcycz_j2( dim1, cx, cy, cz & + , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & + , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) + + j1b_gauss_2e_j2 = j1b_gauss_2e_j2 + coef4 * ( cx + cy + cz ) + enddo ! s + enddo ! r + enddo ! q + enddo ! p + + return +end function j1b_gauss_2e_j2 + +! --- + +double precision function j1b_gauss_2e_j2_schwartz(i, j, k, l) + + BEGIN_DOC + ! + ! integral in the AO basis: + ! i(r1) j(r1) f(r12) k(r2) l(r2) + ! + ! with: + ! f(r12) = - [ (0.5 - 0.5 erf(mu r12)) / r12 ] (r1-r2) \cdot \sum_A (-2 a_A c_A) [ r1A exp(-aA r1A^2) - r2A exp(-aA r2A^2) ] + ! = [ (1 - erf(mu r12) / r12 ] \sum_A a_A c_A [ (r1-RA)^2 exp(-aA r1A^2) + ! + (r2-RA)^2 exp(-aA r2A^2) + ! - (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) + ! - (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] + ! + END_DOC + + include 'utils/constants.include.F' + + implicit none + + integer, intent(in) :: i, j, k, l + + integer :: p, q, r, s + integer :: num_i, num_j, num_k, num_l, num_ii + integer :: I_power(3), J_power(3), K_power(3), L_power(3) + integer :: iorder_p(3), iorder_q(3) + integer :: dim1 + + double precision :: coef1, coef2, coef3, coef4 + double precision :: expo1, expo2, expo3, expo4 + double precision :: P1_new(0:max_dim,3), P1_center(3), fact_p1, pp1, p1_inv + double precision :: Q1_new(0:max_dim,3), Q1_center(3), fact_q1, qq1, q1_inv + double precision :: I_center(3), J_center(3), K_center(3), L_center(3) + double precision :: cx, cy, cz + double precision :: schwartz_ij, thr + double precision, allocatable :: schwartz_kl(:,:) + + dim1 = n_pt_max_integrals + thr = ao_integrals_threshold * ao_integrals_threshold + + num_i = ao_nucl(i) + num_j = ao_nucl(j) + num_k = ao_nucl(k) + num_l = ao_nucl(l) + + do p = 1, 3 + I_power(p) = ao_power(i,p) + J_power(p) = ao_power(j,p) + K_power(p) = ao_power(k,p) + L_power(p) = ao_power(l,p) + I_center(p) = nucl_coord(num_i,p) + J_center(p) = nucl_coord(num_j,p) + K_center(p) = nucl_coord(num_k,p) + L_center(p) = nucl_coord(num_l,p) + enddo + + + allocate( schwartz_kl(0:ao_prim_num(l) , 0:ao_prim_num(k)) ) + + schwartz_kl(0,0) = 0.d0 + do r = 1, ao_prim_num(k) + expo3 = ao_expo_ordered_transp(r,k) + coef3 = ao_coef_normalized_ordered_transp(r,k) * ao_coef_normalized_ordered_transp(r,k) + + schwartz_kl(0,r) = 0.d0 + do s = 1, ao_prim_num(l) + expo4 = ao_expo_ordered_transp(s,l) + coef4 = coef3 * ao_coef_normalized_ordered_transp(s,l) * ao_coef_normalized_ordered_transp(s,l) + + call give_explicit_poly_and_gaussian( Q1_new, Q1_center, qq1, fact_q1, iorder_q, expo3, expo4 & + , K_power, L_power, K_center, L_center, dim1 ) + q1_inv = 1.d0 / qq1 + + call get_cxcycz_j2( dim1, cx, cy, cz & + , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q & + , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) + + schwartz_kl(s,r) = coef4 * dabs( cx + cy + cz ) + schwartz_kl(0,r) = max( schwartz_kl(0,r) , schwartz_kl(s,r) ) + enddo + + schwartz_kl(0,0) = max( schwartz_kl(0,r) , schwartz_kl(0,0) ) + enddo + + + j1b_gauss_2e_j2_schwartz = 0.d0 + + do p = 1, ao_prim_num(i) + expo1 = ao_expo_ordered_transp(p, i) + coef1 = ao_coef_normalized_ordered_transp(p, i) + + do q = 1, ao_prim_num(j) + expo2 = ao_expo_ordered_transp(q, j) + coef2 = coef1 * ao_coef_normalized_ordered_transp(q, j) + + call give_explicit_poly_and_gaussian( P1_new, P1_center, pp1, fact_p1, iorder_p, expo1, expo2 & + , I_power, J_power, I_center, J_center, dim1 ) + p1_inv = 1.d0 / pp1 + + call get_cxcycz_j2( dim1, cx, cy, cz & + , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & + , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p ) + + schwartz_ij = coef2 * coef2 * dabs( cx + cy + cz ) + if( schwartz_kl(0,0) * schwartz_ij < thr ) cycle + + do r = 1, ao_prim_num(k) + if( schwartz_kl(0,r) * schwartz_ij < thr ) cycle + coef3 = coef2 * ao_coef_normalized_ordered_transp(r, k) + expo3 = ao_expo_ordered_transp(r, k) + + do s = 1, ao_prim_num(l) + if( schwartz_kl(s,r) * schwartz_ij < thr ) cycle + coef4 = coef3 * ao_coef_normalized_ordered_transp(s, l) + expo4 = ao_expo_ordered_transp(s, l) + + call give_explicit_poly_and_gaussian( Q1_new, Q1_center, qq1, fact_q1, iorder_q, expo3, expo4 & + , K_power, L_power, K_center, L_center, dim1 ) + q1_inv = 1.d0 / qq1 + + call get_cxcycz_j2( dim1, cx, cy, cz & + , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & + , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) + + j1b_gauss_2e_j2_schwartz = j1b_gauss_2e_j2_schwartz + coef4 * ( cx + cy + cz ) + enddo ! s + enddo ! r + enddo ! q + enddo ! p + + deallocate( schwartz_kl ) + + return +end function j1b_gauss_2e_j2_schwartz + +! --- + +subroutine get_cxcycz_j2( dim1, cx, cy, cz & + , P1_center, P1_new, pp1, fact_p1, p1_inv, iorder_p & + , Q1_center, Q1_new, qq1, fact_q1, q1_inv, iorder_q ) + + include 'utils/constants.include.F' + + implicit none + + integer, intent(in) :: dim1 + integer, intent(in) :: iorder_p(3), iorder_q(3) + double precision, intent(in) :: P1_new(0:max_dim,3), P1_center(3), fact_p1, pp1, p1_inv + double precision, intent(in) :: Q1_new(0:max_dim,3), Q1_center(3), fact_q1, qq1, q1_inv + double precision, intent(out) :: cx, cy, cz + + integer :: ii + integer :: shift_P(3), shift_Q(3) + double precision :: coefii, expoii, factii, Centerii(3) + double precision :: P2_new(0:max_dim,3), P2_center(3), fact_p2, pp2, p2_inv + double precision :: Q2_new(0:max_dim,3), Q2_center(3), fact_q2, qq2, q2_inv + double precision :: ff, gg + + double precision :: general_primitive_integral_erf_shifted + double precision :: general_primitive_integral_coul_shifted + + PROVIDE j1b_pen j1b_coeff + + cx = 0.d0 + cy = 0.d0 + cz = 0.d0 + do ii = 1, nucl_num + + expoii = j1b_pen (ii) + coefii = j1b_coeff(ii) + Centerii(1:3) = nucl_coord(ii, 1:3) + + call gaussian_product(pp1, P1_center, expoii, Centerii, factii, pp2, P2_center) + fact_p2 = fact_p1 * factii + p2_inv = 1.d0 / pp2 + call pol_modif_center( P1_center, P2_center, iorder_p, P1_new, P2_new ) + + call gaussian_product(qq1, Q1_center, expoii, Centerii, factii, qq2, Q2_center) + fact_q2 = fact_q1 * factii + q2_inv = 1.d0 / qq2 + call pol_modif_center( Q1_center, Q2_center, iorder_q, Q1_new, Q2_new ) + + + ! ---------------------------------------------------------------------------------------------------- + ! [ (1-erf(mu r12)) / r12 ] \sum_A a_A c_A [ (r1-RA)^2 exp(-aA r1A^2) + ! ---------------------------------------------------------------------------------------------------- + + shift_Q = (/ 0, 0, 0 /) + + ! x term: + ff = P2_center(1) - Centerii(1) + + shift_P = (/ 2, 0, 0 /) + cx = cx + expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cx = cx - expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_P = (/ 1, 0, 0 /) + cx = cx + expoii * coefii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cx = cx - expoii * coefii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_P = (/ 0, 0, 0 /) + cx = cx + expoii * coefii * ff * ff * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cx = cx - expoii * coefii * ff * ff * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + ! y term: + ff = P2_center(2) - Centerii(2) + + shift_P = (/ 0, 2, 0 /) + cy = cy + expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cy = cy - expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_P = (/ 0, 1, 0 /) + cy = cy + expoii * coefii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cy = cy - expoii * coefii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_P = (/ 0, 0, 0 /) + cy = cy + expoii * coefii * ff * ff * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cy = cy - expoii * coefii * ff * ff * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + ! z term: + ff = P2_center(3) - Centerii(3) + + shift_P = (/ 0, 0, 2 /) + cz = cz + expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cz = cz - expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_P = (/ 0, 0, 1 /) + cz = cz + expoii * coefii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cz = cz - expoii * coefii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_P = (/ 0, 0, 0 /) + cz = cz + expoii * coefii * ff * ff * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cz = cz - expoii * coefii * ff * ff * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + ! ---------------------------------------------------------------------------------------------------- + + + + ! ---------------------------------------------------------------------------------------------------- + ! [ (1-erf(mu r12)) / r12 ] \sum_A a_A c_A [ (r2-RA)^2 exp(-aA r2A^2) + ! ---------------------------------------------------------------------------------------------------- + + shift_P = (/ 0, 0, 0 /) + + ! x term: + ff = Q2_center(1) - Centerii(1) + + shift_Q = (/ 2, 0, 0 /) + cx = cx + expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cx = cx - expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_Q = (/ 1, 0, 0 /) + cx = cx + expoii * coefii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cx = cx - expoii * coefii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_Q = (/ 0, 0, 0 /) + cx = cx + expoii * coefii * ff * ff * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cx = cx - expoii * coefii * ff * ff * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + ! y term: + ff = Q2_center(2) - Centerii(2) + + shift_Q = (/ 0, 2, 0 /) + cy = cy + expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cy = cy - expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_Q = (/ 0, 1, 0 /) + cy = cy + expoii * coefii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cy = cy - expoii * coefii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_Q = (/ 0, 0, 0 /) + cy = cy + expoii * coefii * ff * ff * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cy = cy - expoii * coefii * ff * ff * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + ! z term: + ff = Q2_center(3) - Centerii(3) + + shift_Q = (/ 0, 0, 2 /) + cz = cz + expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cz = cz - expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_Q = (/ 0, 0, 1 /) + cz = cz + expoii * coefii * 2.d0 * ff * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cz = cz - expoii * coefii * 2.d0 * ff * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_Q = (/ 0, 0, 0 /) + cz = cz + expoii * coefii * ff * ff * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cz = cz - expoii * coefii * ff * ff * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + ! ---------------------------------------------------------------------------------------------------- + + + + ! ---------------------------------------------------------------------------------------------------- + ! - [ (1-erf(mu r12)) / r12 ] \sum_A a_A c_A [ (r1-RA) \cdot (r2-RA) exp(-aA r1A^2) ] + ! ---------------------------------------------------------------------------------------------------- + + ! x term: + ff = P2_center(1) - Centerii(1) + gg = Q1_center(1) - Centerii(1) + + shift_p = (/ 1, 0, 0 /) + shift_Q = (/ 1, 0, 0 /) + cx = cx - expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cx = cx + expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_p = (/ 1, 0, 0 /) + shift_Q = (/ 0, 0, 0 /) + cx = cx - expoii * coefii * gg * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cx = cx + expoii * coefii * gg * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 1, 0, 0 /) + cx = cx - expoii * coefii * ff * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cx = cx + expoii * coefii * ff * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 0, 0, 0 /) + cx = cx - expoii * coefii * ff * gg * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cx = cx + expoii * coefii * ff * gg * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + ! y term: + ff = P2_center(2) - Centerii(2) + gg = Q1_center(2) - Centerii(2) + + shift_p = (/ 0, 1, 0 /) + shift_Q = (/ 0, 1, 0 /) + cy = cy - expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cy = cy + expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 1, 0 /) + shift_Q = (/ 0, 0, 0 /) + cy = cy - expoii * coefii * gg * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cy = cy + expoii * coefii * gg * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 0, 1, 0 /) + cy = cy - expoii * coefii * ff * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cy = cy + expoii * coefii * ff * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 0, 0, 0 /) + cy = cy - expoii * coefii * ff * gg * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cy = cy + expoii * coefii * ff * gg * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + ! z term: + ff = P2_center(3) - Centerii(3) + gg = Q1_center(3) - Centerii(3) + + shift_p = (/ 0, 0, 1 /) + shift_Q = (/ 0, 0, 1 /) + cz = cz - expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cz = cz + expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 1 /) + shift_Q = (/ 0, 0, 0 /) + cz = cz - expoii * coefii * gg * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cz = cz + expoii * coefii * gg * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 0, 0, 1 /) + cz = cz - expoii * coefii * ff * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cz = cz + expoii * coefii * ff * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 0, 0, 0 /) + cz = cz - expoii * coefii * ff * gg * general_primitive_integral_coul_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + cz = cz + expoii * coefii * ff * gg * general_primitive_integral_erf_shifted( dim1 & + , P2_new, P2_center, fact_p2, pp2, p2_inv, iorder_p, shift_P & + , Q1_new, Q1_center, fact_q1, qq1, q1_inv, iorder_q, shift_Q ) + + ! ---------------------------------------------------------------------------------------------------- + + + + ! ---------------------------------------------------------------------------------------------------- + ! - [ (1-erf(mu r12)) / r12 ] \sum_A a_A c_A [ (r1-RA) \cdot (r2-RA) exp(-aA r2A^2) ] + ! ---------------------------------------------------------------------------------------------------- + + ! x term: + ff = P1_center(1) - Centerii(1) + gg = Q2_center(1) - Centerii(1) + + shift_p = (/ 1, 0, 0 /) + shift_Q = (/ 1, 0, 0 /) + cx = cx - expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cx = cx + expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_p = (/ 1, 0, 0 /) + shift_Q = (/ 0, 0, 0 /) + cx = cx - expoii * coefii * gg * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cx = cx + expoii * coefii * gg * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 1, 0, 0 /) + cx = cx - expoii * coefii * ff * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cx = cx + expoii * coefii * ff * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 0, 0, 0 /) + cx = cx - expoii * coefii * ff * gg * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cx = cx + expoii * coefii * ff * gg * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + ! y term: + ff = P1_center(2) - Centerii(2) + gg = Q2_center(2) - Centerii(2) + + shift_p = (/ 0, 1, 0 /) + shift_Q = (/ 0, 1, 0 /) + cy = cy - expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cy = cy + expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 1, 0 /) + shift_Q = (/ 0, 0, 0 /) + cy = cy - expoii * coefii * gg * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cy = cy + expoii * coefii * gg * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 0, 1, 0 /) + cy = cy - expoii * coefii * ff * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cy = cy + expoii * coefii * ff * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 0, 0, 0 /) + cy = cy - expoii * coefii * ff * gg * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cy = cy + expoii * coefii * ff * gg * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + ! z term: + ff = P1_center(3) - Centerii(3) + gg = Q2_center(3) - Centerii(3) + + shift_p = (/ 0, 0, 1 /) + shift_Q = (/ 0, 0, 1 /) + cz = cz - expoii * coefii * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cz = cz + expoii * coefii * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 1 /) + shift_Q = (/ 0, 0, 0 /) + cz = cz - expoii * coefii * gg * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cz = cz + expoii * coefii * gg * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 0, 0, 1 /) + cz = cz - expoii * coefii * ff * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cz = cz + expoii * coefii * ff * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + shift_p = (/ 0, 0, 0 /) + shift_Q = (/ 0, 0, 0 /) + cz = cz - expoii * coefii * ff * gg * general_primitive_integral_coul_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + cz = cz + expoii * coefii * ff * gg * general_primitive_integral_erf_shifted( dim1 & + , P1_new, P1_center, fact_p1, pp1, p1_inv, iorder_p, shift_P & + , Q2_new, Q2_center, fact_q2, qq2, q2_inv, iorder_q, shift_Q ) + + ! ---------------------------------------------------------------------------------------------------- + + enddo + + return +end subroutine get_cxcycz_j2 + +! --- + diff --git a/src/ao_tc_eff_map/useful_sub.irp.f b/src/ao_tc_eff_map/useful_sub.irp.f new file mode 100644 index 00000000..4cfdcad2 --- /dev/null +++ b/src/ao_tc_eff_map/useful_sub.irp.f @@ -0,0 +1,364 @@ +! --- + +!______________________________________________________________________________________________________________________ +!______________________________________________________________________________________________________________________ + +double precision function general_primitive_integral_coul_shifted( dim & + , P_new, P_center, fact_p, p, p_inv, iorder_p, shift_P & + , Q_new, Q_center, fact_q, q, q_inv, iorder_q, shift_Q ) + + include 'utils/constants.include.F' + + implicit none + + integer, intent(in) :: dim + integer, intent(in) :: iorder_p(3), shift_P(3) + integer, intent(in) :: iorder_q(3), shift_Q(3) + double precision, intent(in) :: P_new(0:max_dim,3), P_center(3), fact_p, p, p_inv + double precision, intent(in) :: Q_new(0:max_dim,3), Q_center(3), fact_q, q, q_inv + + integer :: n_Ix, n_Iy, n_Iz, nx, ny, nz + integer :: ix, iy, iz, jx, jy, jz, i + integer :: n_pt_tmp, n_pt_out, iorder + integer :: ii, jj + double precision :: rho, dist + double precision :: dx(0:max_dim), Ix_pol(0:max_dim) + double precision :: dy(0:max_dim), Iy_pol(0:max_dim) + double precision :: dz(0:max_dim), Iz_pol(0:max_dim) + double precision :: a, b, c, d, e, f, accu, pq, const + double precision :: pq_inv, p10_1, p10_2, p01_1, p01_2, pq_inv_2 + double precision :: d1(0:max_dim), d_poly(0:max_dim) + double precision :: p_plus_q + + double precision :: rint_sum + + general_primitive_integral_coul_shifted = 0.d0 + + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: dx, Ix_pol, dy, Iy_pol, dz, Iz_pol + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: d1, d_poly + + ! Gaussian Product + ! ---------------- + p_plus_q = (p+q) + pq = p_inv * 0.5d0 * q_inv + pq_inv = 0.5d0 / p_plus_q + p10_1 = q * pq ! 1/(2p) + p01_1 = p * pq ! 1/(2q) + pq_inv_2 = pq_inv + pq_inv + p10_2 = pq_inv_2 * p10_1 * q ! 0.5d0 * q / (pq + p*p) + p01_2 = pq_inv_2 * p01_1 * p ! 0.5d0 * p / (q*q + pq) + + accu = 0.d0 + + iorder = iorder_p(1) + iorder_q(1) + iorder_p(1) + iorder_q(1) + iorder = iorder + shift_P(1) + shift_Q(1) + iorder = iorder + shift_P(1) + shift_Q(1) + !DIR$ VECTOR ALIGNED + do ix = 0, iorder + Ix_pol(ix) = 0.d0 + enddo + n_Ix = 0 + do ix = 0, iorder_p(1) + + ii = ix + shift_P(1) + a = P_new(ix,1) + if(abs(a) < thresh) cycle + + do jx = 0, iorder_q(1) + + jj = jx + shift_Q(1) + d = a * Q_new(jx,1) + if(abs(d) < thresh) cycle + + !DEC$ FORCEINLINE + call give_polynom_mult_center_x( P_center(1), Q_center(1), ii, jj & + , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dx, nx ) + !DEC$ FORCEINLINE + call add_poly_multiply(dx, nx, d, Ix_pol, n_Ix) + enddo + enddo + if(n_Ix == -1) then + return + endif + + iorder = iorder_p(2) + iorder_q(2) + iorder_p(2) + iorder_q(2) + iorder = iorder + shift_P(2) + shift_Q(2) + iorder = iorder + shift_P(2) + shift_Q(2) + !DIR$ VECTOR ALIGNED + do ix = 0, iorder + Iy_pol(ix) = 0.d0 + enddo + n_Iy = 0 + do iy = 0, iorder_p(2) + + if(abs(P_new(iy,2)) > thresh) then + + ii = iy + shift_P(2) + b = P_new(iy,2) + + do jy = 0, iorder_q(2) + + jj = jy + shift_Q(2) + e = b * Q_new(jy,2) + if(abs(e) < thresh) cycle + + !DEC$ FORCEINLINE + call give_polynom_mult_center_x( P_center(2), Q_center(2), ii, jj & + , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dy, ny ) + !DEC$ FORCEINLINE + call add_poly_multiply(dy, ny, e, Iy_pol, n_Iy) + enddo + endif + enddo + if(n_Iy == -1) then + return + endif + + iorder = iorder_p(3) + iorder_q(3) + iorder_p(3) + iorder_q(3) + iorder = iorder + shift_P(3) + shift_Q(3) + iorder = iorder + shift_P(3) + shift_Q(3) + do ix = 0, iorder + Iz_pol(ix) = 0.d0 + enddo + n_Iz = 0 + do iz = 0, iorder_p(3) + + if( abs(P_new(iz,3)) > thresh ) then + + ii = iz + shift_P(3) + c = P_new(iz,3) + + do jz = 0, iorder_q(3) + + jj = jz + shift_Q(3) + f = c * Q_new(jz,3) + if(abs(f) < thresh) cycle + + !DEC$ FORCEINLINE + call give_polynom_mult_center_x( P_center(3), Q_center(3), ii, jj & + , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dz, nz ) + !DEC$ FORCEINLINE + call add_poly_multiply(dz, nz, f, Iz_pol, n_Iz) + enddo + endif + enddo + if(n_Iz == -1) then + return + endif + + rho = p * q * pq_inv_2 + dist = (P_center(1) - Q_center(1)) * (P_center(1) - Q_center(1)) & + + (P_center(2) - Q_center(2)) * (P_center(2) - Q_center(2)) & + + (P_center(3) - Q_center(3)) * (P_center(3) - Q_center(3)) + const = dist*rho + + n_pt_tmp = n_Ix + n_Iy + do i = 0, n_pt_tmp + d_poly(i) = 0.d0 + enddo + + !DEC$ FORCEINLINE + call multiply_poly(Ix_pol, n_Ix, Iy_pol, n_Iy, d_poly, n_pt_tmp) + if(n_pt_tmp == -1) then + return + endif + n_pt_out = n_pt_tmp + n_Iz + do i = 0, n_pt_out + d1(i) = 0.d0 + enddo + + !DEC$ FORCEINLINE + call multiply_poly(d_poly, n_pt_tmp, Iz_pol, n_Iz, d1, n_pt_out) + accu = accu + rint_sum(n_pt_out, const, d1) + + general_primitive_integral_coul_shifted = fact_p * fact_q * accu * pi_5_2 * p_inv * q_inv / dsqrt(p_plus_q) + + return +end function general_primitive_integral_coul_shifted +!______________________________________________________________________________________________________________________ +!______________________________________________________________________________________________________________________ + + + +!______________________________________________________________________________________________________________________ +!______________________________________________________________________________________________________________________ + +double precision function general_primitive_integral_erf_shifted( dim & + , P_new, P_center, fact_p, p, p_inv, iorder_p, shift_P & + , Q_new, Q_center, fact_q, q, q_inv, iorder_q, shift_Q ) + + include 'utils/constants.include.F' + + implicit none + + integer, intent(in) :: dim + integer, intent(in) :: iorder_p(3), shift_P(3) + integer, intent(in) :: iorder_q(3), shift_Q(3) + double precision, intent(in) :: P_new(0:max_dim,3), P_center(3), fact_p, p, p_inv + double precision, intent(in) :: Q_new(0:max_dim,3), Q_center(3), fact_q, q, q_inv + + integer :: n_Ix, n_Iy, n_Iz, nx, ny, nz + integer :: ix, iy, iz, jx, jy, jz, i + integer :: n_pt_tmp, n_pt_out, iorder + integer :: ii, jj + double precision :: rho, dist + double precision :: dx(0:max_dim), Ix_pol(0:max_dim) + double precision :: dy(0:max_dim), Iy_pol(0:max_dim) + double precision :: dz(0:max_dim), Iz_pol(0:max_dim) + double precision :: a, b, c, d, e, f, accu, pq, const + double precision :: pq_inv, p10_1, p10_2, p01_1, p01_2, pq_inv_2 + double precision :: d1(0:max_dim), d_poly(0:max_dim) + double precision :: p_plus_q + + double precision :: rint_sum + + general_primitive_integral_erf_shifted = 0.d0 + + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: dx, Ix_pol, dy, Iy_pol, dz, Iz_pol + !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: d1, d_poly + + ! Gaussian Product + ! ---------------- + p_plus_q = (p+q) * ( (p*q)/(p+q) + mu_erf*mu_erf ) / (mu_erf*mu_erf) + pq = p_inv * 0.5d0 * q_inv + pq_inv = 0.5d0 / p_plus_q + p10_1 = q * pq ! 1/(2p) + p01_1 = p * pq ! 1/(2q) + pq_inv_2 = pq_inv + pq_inv + p10_2 = pq_inv_2 * p10_1 * q ! 0.5d0 * q / (pq + p*p) + p01_2 = pq_inv_2 * p01_1 * p ! 0.5d0 * p / (q*q + pq) + + accu = 0.d0 + + iorder = iorder_p(1) + iorder_q(1) + iorder_p(1) + iorder_q(1) + iorder = iorder + shift_P(1) + shift_Q(1) + iorder = iorder + shift_P(1) + shift_Q(1) + !DIR$ VECTOR ALIGNED + do ix = 0, iorder + Ix_pol(ix) = 0.d0 + enddo + n_Ix = 0 + do ix = 0, iorder_p(1) + + ii = ix + shift_P(1) + a = P_new(ix,1) + if(abs(a) < thresh) cycle + + do jx = 0, iorder_q(1) + + jj = jx + shift_Q(1) + d = a * Q_new(jx,1) + if(abs(d) < thresh) cycle + + !DEC$ FORCEINLINE + call give_polynom_mult_center_x( P_center(1), Q_center(1), ii, jj & + , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dx, nx ) + !DEC$ FORCEINLINE + call add_poly_multiply(dx, nx, d, Ix_pol, n_Ix) + enddo + enddo + if(n_Ix == -1) then + return + endif + + iorder = iorder_p(2) + iorder_q(2) + iorder_p(2) + iorder_q(2) + iorder = iorder + shift_P(2) + shift_Q(2) + iorder = iorder + shift_P(2) + shift_Q(2) + !DIR$ VECTOR ALIGNED + do ix = 0, iorder + Iy_pol(ix) = 0.d0 + enddo + n_Iy = 0 + do iy = 0, iorder_p(2) + + if(abs(P_new(iy,2)) > thresh) then + + ii = iy + shift_P(2) + b = P_new(iy,2) + + do jy = 0, iorder_q(2) + + jj = jy + shift_Q(2) + e = b * Q_new(jy,2) + if(abs(e) < thresh) cycle + + !DEC$ FORCEINLINE + call give_polynom_mult_center_x( P_center(2), Q_center(2), ii, jj & + , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dy, ny ) + !DEC$ FORCEINLINE + call add_poly_multiply(dy, ny, e, Iy_pol, n_Iy) + enddo + endif + enddo + if(n_Iy == -1) then + return + endif + + iorder = iorder_p(3) + iorder_q(3) + iorder_p(3) + iorder_q(3) + iorder = iorder + shift_P(3) + shift_Q(3) + iorder = iorder + shift_P(3) + shift_Q(3) + do ix = 0, iorder + Iz_pol(ix) = 0.d0 + enddo + n_Iz = 0 + do iz = 0, iorder_p(3) + + if( abs(P_new(iz,3)) > thresh ) then + + ii = iz + shift_P(3) + c = P_new(iz,3) + + do jz = 0, iorder_q(3) + + jj = jz + shift_Q(3) + f = c * Q_new(jz,3) + if(abs(f) < thresh) cycle + + !DEC$ FORCEINLINE + call give_polynom_mult_center_x( P_center(3), Q_center(3), ii, jj & + , p, q, iorder, pq_inv, pq_inv_2, p10_1, p01_1, p10_2, p01_2, dz, nz ) + !DEC$ FORCEINLINE + call add_poly_multiply(dz, nz, f, Iz_pol, n_Iz) + enddo + endif + enddo + if(n_Iz == -1) then + return + endif + + rho = p * q * pq_inv_2 + dist = (P_center(1) - Q_center(1)) * (P_center(1) - Q_center(1)) & + + (P_center(2) - Q_center(2)) * (P_center(2) - Q_center(2)) & + + (P_center(3) - Q_center(3)) * (P_center(3) - Q_center(3)) + const = dist*rho + + n_pt_tmp = n_Ix + n_Iy + do i = 0, n_pt_tmp + d_poly(i) = 0.d0 + enddo + + !DEC$ FORCEINLINE + call multiply_poly(Ix_pol, n_Ix, Iy_pol, n_Iy, d_poly, n_pt_tmp) + if(n_pt_tmp == -1) then + return + endif + n_pt_out = n_pt_tmp + n_Iz + do i = 0, n_pt_out + d1(i) = 0.d0 + enddo + + !DEC$ FORCEINLINE + call multiply_poly(d_poly, n_pt_tmp, Iz_pol, n_Iz, d1, n_pt_out) + accu = accu + rint_sum(n_pt_out, const, d1) + + general_primitive_integral_erf_shifted = fact_p * fact_q * accu * pi_5_2 * p_inv * q_inv / dsqrt(p_plus_q) + + return +end function general_primitive_integral_erf_shifted +!______________________________________________________________________________________________________________________ +!______________________________________________________________________________________________________________________ + + + + + diff --git a/src/bi_ort_ints/one_e_bi_ort.irp.f b/src/bi_ort_ints/one_e_bi_ort.irp.f index b7b87463..a995a364 100644 --- a/src/bi_ort_ints/one_e_bi_ort.irp.f +++ b/src/bi_ort_ints/one_e_bi_ort.irp.f @@ -8,9 +8,9 @@ BEGIN_PROVIDER [double precision, ao_one_e_integrals_tc_tot, (ao_num,ao_num)] ao_one_e_integrals_tc_tot = ao_one_e_integrals - provide j1b_gauss + provide j1b_type - if(j1b_gauss .eq. 1) then + if(j1b_type .ne. 0) then do i = 1, ao_num do j = 1, ao_num From 7a4f732e15d9131c7545645dffc3ba78ac088040 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Wed, 12 Oct 2022 11:24:09 +0200 Subject: [PATCH 02/10] 1st version of grad + lapl of Jmu_modif --- src/ao_many_one_e_ints/ao_erf_gauss.irp.f | 343 ++++++++++----- src/ao_many_one_e_ints/ao_gaus_gauss.irp.f | 142 +++++-- src/ao_many_one_e_ints/grad_J1b_ints.irp.f | 398 ++++++++++++++++++ .../grad_related_ints.irp.f | 161 ++++--- src/ao_one_e_ints/pot_ao_erf_ints.irp.f | 373 +++++++++++----- src/non_h_ints_mu/fit_j.irp.f | 61 ++- src/non_h_ints_mu/new_grad_tc.irp.f | 100 ++++- 7 files changed, 1230 insertions(+), 348 deletions(-) create mode 100644 src/ao_many_one_e_ints/grad_J1b_ints.irp.f diff --git a/src/ao_many_one_e_ints/ao_erf_gauss.irp.f b/src/ao_many_one_e_ints/ao_erf_gauss.irp.f index 39be352f..d9c35a8c 100644 --- a/src/ao_many_one_e_ints/ao_erf_gauss.irp.f +++ b/src/ao_many_one_e_ints/ao_erf_gauss.irp.f @@ -1,4 +1,6 @@ +! --- + subroutine phi_j_erf_mu_r_xyz_phi(i,j,mu_in, C_center, xyz_ints) implicit none BEGIN_DOC @@ -49,45 +51,58 @@ subroutine phi_j_erf_mu_r_xyz_phi(i,j,mu_in, C_center, xyz_ints) enddo end +! --- -double precision function phi_j_erf_mu_r_phi(i,j,mu_in, C_center) - implicit none - BEGIN_DOC -! phi_j_erf_mu_r_phi = int dr phi_j(r) [erf(mu |r - C|)/|r-C|] phi_i(r) - END_DOC - integer, intent(in) :: i,j - double precision, intent(in) :: mu_in, C_center(3) - integer :: num_A,power_A(3), num_b, power_B(3) - double precision :: alpha, beta, A_center(3), B_center(3),contrib,NAI_pol_mult_erf - integer :: n_pt_in,l,m - phi_j_erf_mu_r_phi = 0.d0 - if(ao_overlap_abs(j,i).lt.1.d-12)then - return - endif - n_pt_in = n_pt_max_integrals - ! j - num_A = ao_nucl(j) - power_A(1:3)= ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - ! i - num_B = ao_nucl(i) - power_B(1:3)= ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) +double precision function phi_j_erf_mu_r_phi(i, j, mu_in, C_center) - do l=1,ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - do m=1,ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - contrib = NAI_pol_mult_erf(A_center,B_center,power_A,power_B,alpha,beta,C_center,n_pt_in,mu_in) - phi_j_erf_mu_r_phi += contrib * ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) + BEGIN_DOC + ! phi_j_erf_mu_r_phi = int dr phi_j(r) [erf(mu |r - C|)/|r-C|] phi_i(r) + END_DOC + + implicit none + integer, intent(in) :: i,j + double precision, intent(in) :: mu_in, C_center(3) + + integer :: num_A, power_A(3), num_b, power_B(3) + integer :: n_pt_in, l, m + double precision :: alpha, beta, A_center(3), B_center(3), contrib + + double precision :: NAI_pol_mult_erf + + phi_j_erf_mu_r_phi = 0.d0 + + if(ao_overlap_abs(j,i).lt.1.d-12) then + return + endif + + n_pt_in = n_pt_max_integrals + + ! j + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + ! i + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + + contrib = NAI_pol_mult_erf(A_center, B_center, power_A, power_B, alpha, beta, C_center, n_pt_in, mu_in) + + phi_j_erf_mu_r_phi += contrib * ao_coef_normalized_ordered_transp(l,j) * ao_coef_normalized_ordered_transp(m,i) + enddo enddo - enddo -end +end function phi_j_erf_mu_r_phi +! --- -subroutine erfc_mu_gauss_xyz_ij_ao(i,j,mu, C_center, delta,gauss_ints) +subroutine erfc_mu_gauss_xyz_ij_ao(i, j, mu, C_center, delta, gauss_ints) implicit none BEGIN_DOC ! gauss_ints(m) = \int dr exp(-delta (r - C)^2 ) x/y/z * ( 1 - erf(mu |r-r'|))/ |r-r'| * AO_i(r') * AO_j(r') @@ -132,95 +147,204 @@ subroutine erfc_mu_gauss_xyz_ij_ao(i,j,mu, C_center, delta,gauss_ints) enddo end -subroutine erf_mu_gauss_ij_ao(i,j,mu, C_center, delta,gauss_ints) - implicit none - BEGIN_DOC - ! gauss_ints(m) = \int dr exp(-delta (r - C)^2 ) * erf(mu |r-r'|)/ |r-r'| * AO_i(r') * AO_j(r') - ! - END_DOC - integer, intent(in) :: i,j - double precision, intent(in) :: mu, C_center(3),delta - double precision, intent(out):: gauss_ints +! --- - integer :: num_A,power_A(3), num_b, power_B(3) - double precision :: alpha, beta, A_center(3), B_center(3),contrib,NAI_pol_mult_erf - double precision :: integral , erf_mu_gauss - integer :: n_pt_in,l,m,mm - gauss_ints = 0.d0 - if(ao_overlap_abs(j,i).lt.1.d-12)then - return - endif - n_pt_in = n_pt_max_integrals - ! j - num_A = ao_nucl(j) - power_A(1:3)= ao_power(j,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - ! i - num_B = ao_nucl(i) - power_B(1:3)= ao_power(i,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) +subroutine erf_mu_gauss_ij_ao(i, j, mu, C_center, delta, gauss_ints) - do l=1,ao_prim_num(j) - alpha = ao_expo_ordered_transp(l,j) - do m=1,ao_prim_num(i) - beta = ao_expo_ordered_transp(m,i) - if(dabs(ao_coef_normalized_ordered_transp(l,j) * ao_coef_normalized_ordered_transp(m,i)).lt.1.d-12)cycle - integral = erf_mu_gauss(C_center,delta,mu,A_center,B_center,power_A,power_B,alpha,beta,n_pt_in) - gauss_ints += integral * ao_coef_normalized_ordered_transp(l,j) & - * ao_coef_normalized_ordered_transp(m,i) - enddo - enddo -end - - -subroutine NAI_pol_x_mult_erf_ao(i_ao,j_ao,mu_in,C_center,ints) - implicit none BEGIN_DOC + ! + ! gauss_ints = \int dr exp(-delta (r - C)^2) * erf(mu |r-C|) / |r-C| * AO_i(r) * AO_j(r) + ! + END_DOC + + implicit none + integer, intent(in) :: i, j + double precision, intent(in) :: mu, C_center(3), delta + double precision, intent(out) :: gauss_ints + + integer :: n_pt_in, l, m + integer :: num_A, power_A(3), num_b, power_B(3) + double precision :: alpha, beta, A_center(3), B_center(3), coef + double precision :: integral + + double precision :: erf_mu_gauss + + gauss_ints = 0.d0 + + if(ao_overlap_abs(j,i).lt.1.d-12) then + return + endif + + n_pt_in = n_pt_max_integrals + + ! j + num_A = ao_nucl(j) + power_A(1:3) = ao_power(j,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + + ! i + num_B = ao_nucl(i) + power_B(1:3) = ao_power(i,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + do l = 1, ao_prim_num(j) + alpha = ao_expo_ordered_transp(l,j) + do m = 1, ao_prim_num(i) + beta = ao_expo_ordered_transp(m,i) + coef = ao_coef_normalized_ordered_transp(l,j) * ao_coef_normalized_ordered_transp(m,i) + + if(dabs(coef) .lt. 1.d-12) cycle + + integral = erf_mu_gauss(C_center, delta, mu, A_center, B_center, power_A, power_B, alpha, beta, n_pt_in) + + gauss_ints += integral * coef + enddo + enddo + +end subroutine erf_mu_gauss_ij_ao + +! --- + +subroutine NAI_pol_x_mult_erf_ao(i_ao, j_ao, mu_in, C_center, ints) + + BEGIN_DOC + ! ! Computes the following integral : + ! ! $\int_{-\infty}^{infty} dr x * \chi_i(r) \chi_j(r) \frac{\erf(\mu | r - R_C | )}{ | r - R_C | }$. ! ! $\int_{-\infty}^{infty} dr y * \chi_i(r) \chi_j(r) \frac{\erf(\mu | r - R_C | )}{ | r - R_C | }$. ! ! $\int_{-\infty}^{infty} dr z * \chi_i(r) \chi_j(r) \frac{\erf(\mu | r - R_C | )}{ | r - R_C | }$. + ! END_DOC - include 'utils/constants.include.F' - integer, intent(in) :: i_ao,j_ao - double precision, intent(in) :: mu_in, C_center(3) - double precision, intent(out):: ints(3) - double precision :: A_center(3), B_center(3),integral, alpha,beta - double precision :: NAI_pol_mult_erf - integer :: i,j,num_A,num_B, power_A(3), power_B(3), n_pt_in, power_xA(3),m - ints = 0.d0 - if(ao_overlap_abs(j_ao,i_ao).lt.1.d-12)then - return - endif - num_A = ao_nucl(i_ao) - power_A(1:3)= ao_power(i_ao,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - num_B = ao_nucl(j_ao) - power_B(1:3)= ao_power(j_ao,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - n_pt_in = n_pt_max_integrals + include 'utils/constants.include.F' - do i = 1, ao_prim_num(i_ao) - alpha = ao_expo_ordered_transp(i,i_ao) - do m = 1, 3 - power_xA = power_A - ! x * phi_i(r) = x * (x-Ax)**ax = (x-Ax)**(ax+1) + Ax * (x-Ax)**ax - power_xA(m) += 1 - do j = 1, ao_prim_num(j_ao) - beta = ao_expo_ordered_transp(j,j_ao) - ! First term = (x-Ax)**(ax+1) - integral = NAI_pol_mult_erf(A_center,B_center,power_xA,power_B,alpha,beta,C_center,n_pt_in,mu_in) - ints(m) += integral * ao_coef_normalized_ordered_transp(j,j_ao)*ao_coef_normalized_ordered_transp(i,i_ao) - ! Second term = Ax * (x-Ax)**(ax) - integral = NAI_pol_mult_erf(A_center,B_center,power_A,power_B,alpha,beta,C_center,n_pt_in,mu_in) - ints(m) += A_center(m) * integral * ao_coef_normalized_ordered_transp(j,j_ao)*ao_coef_normalized_ordered_transp(i,i_ao) + implicit none + + integer, intent(in) :: i_ao, j_ao + double precision, intent(in) :: mu_in, C_center(3) + double precision, intent(out) :: ints(3) + + integer :: i, j, num_A, num_B, power_A(3), power_B(3), n_pt_in, power_xA(3), m + double precision :: A_center(3), B_center(3), integral, alpha, beta, coef + + double precision :: NAI_pol_mult_erf + + ints = 0.d0 + if(ao_overlap_abs(j_ao,i_ao).lt.1.d-12) then + return + endif + + num_A = ao_nucl(i_ao) + power_A(1:3) = ao_power(i_ao,1:3) + A_center(1:3) = nucl_coord(num_A,1:3) + num_B = ao_nucl(j_ao) + power_B(1:3) = ao_power(j_ao,1:3) + B_center(1:3) = nucl_coord(num_B,1:3) + + n_pt_in = n_pt_max_integrals + + do i = 1, ao_prim_num(i_ao) + alpha = ao_expo_ordered_transp(i,i_ao) + + do m = 1, 3 + + power_xA = power_A + ! x * phi_i(r) = x * (x-Ax)**ax = (x-Ax)**(ax+1) + Ax * (x-Ax)**ax + power_xA(m) += 1 + + do j = 1, ao_prim_num(j_ao) + beta = ao_expo_ordered_transp(j,j_ao) + coef = ao_coef_normalized_ordered_transp(j,j_ao) * ao_coef_normalized_ordered_transp(i,i_ao) + + ! First term = (x-Ax)**(ax+1) + integral = NAI_pol_mult_erf(A_center, B_center, power_xA, power_B, alpha, beta, C_center, n_pt_in, mu_in) + ints(m) += integral * coef + + ! Second term = Ax * (x-Ax)**(ax) + integral = NAI_pol_mult_erf(A_center, B_center, power_A, power_B, alpha, beta, C_center, n_pt_in, mu_in) + ints(m) += A_center(m) * integral * coef + + enddo enddo enddo - enddo -end + +end subroutine NAI_pol_x_mult_erf_ao + +! --- + +subroutine NAI_pol_x_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, mu_in, C_center, ints) + + BEGIN_DOC + ! + ! Computes the following integral : + ! + ! $\int_{-\infty}^{infty} dr x * \chi_i(r) \chi_j(r) e^{-\beta (r - B_center)^2} \frac{\erf(\mu | r - R_C | )}{ | r - R_C | }$. + ! + ! $\int_{-\infty}^{infty} dr y * \chi_i(r) \chi_j(r) e^{-\beta (r - B_center)^2} \frac{\erf(\mu | r - R_C | )}{ | r - R_C | }$. + ! + ! $\int_{-\infty}^{infty} dr z * \chi_i(r) \chi_j(r) e^{-\beta (r - B_center)^2} \frac{\erf(\mu | r - R_C | )}{ | r - R_C | }$. + ! + END_DOC + + include 'utils/constants.include.F' + + implicit none + + integer, intent(in) :: i_ao, j_ao + double precision, intent(in) :: beta, B_center(3), mu_in, C_center(3) + double precision, intent(out) :: ints(3) + + integer :: i, j, power_Ai(3), power_Aj(3), n_pt_in, power_xA(3), m + double precision :: Ai_center(3), Aj_center(3), integral, alphai, alphaj, coef + + double precision, external :: NAI_pol_mult_erf_with1s + + ints = 0.d0 + if(ao_overlap_abs(j_ao,i_ao) .lt. 1.d-12) then + return + endif + + power_Ai(1:3) = ao_power(i_ao,1:3) + power_Aj(1:3) = ao_power(j_ao,1:3) + + Ai_center(1:3) = nucl_coord(ao_nucl(i_ao),1:3) + Aj_center(1:3) = nucl_coord(ao_nucl(j_ao),1:3) + + n_pt_in = n_pt_max_integrals + + do i = 1, ao_prim_num(i_ao) + alphai = ao_expo_ordered_transp(i,i_ao) + + do m = 1, 3 + + power_xA = power_Ai + ! x * phi_i(r) = x * (x-Ax)**ax = (x-Ax)**(ax+1) + Ax * (x-Ax)**ax + power_xA(m) += 1 + + do j = 1, ao_prim_num(j_ao) + alphaj = ao_expo_ordered_transp(j,j_ao) + coef = ao_coef_normalized_ordered_transp(j,j_ao) * ao_coef_normalized_ordered_transp(i,i_ao) + + ! First term = (x-Ax)**(ax+1) + integral = NAI_pol_mult_erf_with1s( Ai_center, Aj_center, power_xA, power_Aj, alphai, alphaj & + , beta, b_center, c_center, n_pt_in, mu_in ) + ints(m) += integral * coef + + ! Second term = Ax * (x-Ax)**(ax) + integral = NAI_pol_mult_erf_with1s( Ai_center, Aj_center, power_Ai, power_Aj, alphai, alphaj & + , beta, b_center, c_center, n_pt_in, mu_in ) + ints(m) += Ai_center(m) * integral * coef + + enddo + enddo + enddo + +end subroutine NAI_pol_x_mult_erf_ao_with1s + +! --- subroutine NAI_pol_x_specify_mult_erf_ao(i_ao,j_ao,mu_in,C_center,m,ints) implicit none @@ -249,7 +373,6 @@ subroutine NAI_pol_x_specify_mult_erf_ao(i_ao,j_ao,mu_in,C_center,m,ints) B_center(1:3) = nucl_coord(num_B,1:3) n_pt_in = n_pt_max_integrals - do i = 1, ao_prim_num(i_ao) alpha = ao_expo_ordered_transp(i,i_ao) power_xA = power_A @@ -267,3 +390,5 @@ subroutine NAI_pol_x_specify_mult_erf_ao(i_ao,j_ao,mu_in,C_center,m,ints) enddo end +! --- + diff --git a/src/ao_many_one_e_ints/ao_gaus_gauss.irp.f b/src/ao_many_one_e_ints/ao_gaus_gauss.irp.f index 681d1e6f..fadec343 100644 --- a/src/ao_many_one_e_ints/ao_gaus_gauss.irp.f +++ b/src/ao_many_one_e_ints/ao_gaus_gauss.irp.f @@ -102,36 +102,118 @@ subroutine overlap_gauss_r12_all_ao(D_center,delta,aos_ints) enddo end -double precision function overlap_gauss_r12_ao(D_center,delta,i,j) - implicit none - BEGIN_DOC -! \int dr AO_i(r) AO_j(r) e^{-delta |r-D_center|^2} - END_DOC - integer, intent(in) :: i,j - double precision, intent(in) :: D_center(3), delta +! --- - integer :: num_a,num_b,power_A(3), power_B(3),l,k - double precision :: A_center(3), B_center(3),overlap_gauss_r12,alpha,beta,analytical_j - overlap_gauss_r12_ao = 0.d0 - if(ao_overlap_abs(j,i).lt.1.d-12)then - return - endif - ! TODO :: PUT CYCLES IN LOOPS - num_A = ao_nucl(i) - power_A(1:3)= ao_power(i,1:3) - A_center(1:3) = nucl_coord(num_A,1:3) - num_B = ao_nucl(j) - power_B(1:3)= ao_power(j,1:3) - B_center(1:3) = nucl_coord(num_B,1:3) - do l=1,ao_prim_num(i) - alpha = ao_expo_ordered_transp(l,i) - do k=1,ao_prim_num(j) - beta = ao_expo_ordered_transp(k,j) - analytical_j = overlap_gauss_r12(D_center,delta,A_center,B_center,power_A,power_B,alpha,beta) - overlap_gauss_r12_ao += analytical_j * ao_coef_normalized_ordered_transp(l,i) & - * ao_coef_normalized_ordered_transp(k,j) - enddo - enddo -end +! TODO :: PUT CYCLES IN LOOPS +double precision function overlap_gauss_r12_ao(D_center, delta, i, j) + BEGIN_DOC + ! \int dr AO_i(r) AO_j(r) e^{-delta |r-D_center|^2} + END_DOC + + implicit none + integer, intent(in) :: i, j + double precision, intent(in) :: D_center(3), delta + + integer :: power_A(3), power_B(3), l, k + double precision :: A_center(3), B_center(3), alpha, beta, coef, analytical_j + + double precision, external :: overlap_gauss_r12 + + overlap_gauss_r12_ao = 0.d0 + + if(ao_overlap_abs(j,i).lt.1.d-12) then + return + endif + + power_A(1:3) = ao_power(i,1:3) + power_B(1:3) = ao_power(j,1:3) + + A_center(1:3) = nucl_coord(ao_nucl(i),1:3) + B_center(1:3) = nucl_coord(ao_nucl(j),1:3) + + do l = 1, ao_prim_num(i) + alpha = ao_expo_ordered_transp(l,i) + do k = 1, ao_prim_num(j) + beta = ao_expo_ordered_transp(k,j) + coef = ao_coef_normalized_ordered_transp(l,i) * ao_coef_normalized_ordered_transp(k,j) + + if(dabs(coef) .lt. 1d-12) cycle + + analytical_j = overlap_gauss_r12(D_center, delta, A_center, B_center, power_A, power_B, alpha, beta) + + overlap_gauss_r12_ao += coef * analytical_j + enddo + enddo + +end function overlap_gauss_r12_ao + +! --- + +double precision function overlap_gauss_r12_ao_with1s(B_center, beta, D_center, delta, i, j) + + BEGIN_DOC + ! \int dr AO_i(r) AO_j(r) e^{-beta |r-B_center^2|} e^{-delta |r-D_center|^2} + END_DOC + + implicit none + integer, intent(in) :: i, j + double precision, intent(in) :: B_center(3), beta, D_center(3), delta + + integer :: power_A1(3), power_A2(3), l, k + double precision :: A1_center(3), A2_center(3), alpha1, alpha2, coef12, analytical_j + double precision :: G_center(3), gama, fact_g, gama_inv + + double precision, external :: overlap_gauss_r12, overlap_gauss_r12_ao + + ASSERT(beta .gt. 0.d0) + if(beta .lt. 1d-10) then + overlap_gauss_r12_ao_with1s = overlap_gauss_r12_ao(D_center, delta, i, j) + return + endif + + overlap_gauss_r12_ao_with1s = 0.d0 + + if(ao_overlap_abs(j,i) .lt. 1.d-12) then + return + endif + + ! e^{-beta |r-B_center^2|} e^{-delta |r-D_center|^2} = fact_g e^{-gama |r - G|^2} + + gama = beta + delta + gama_inv = 1.d0 / gama + G_center(1) = (beta * B_center(1) + delta * D_center(1)) * gama_inv + G_center(2) = (beta * B_center(2) + delta * D_center(2)) * gama_inv + G_center(3) = (beta * B_center(3) + delta * D_center(3)) * gama_inv + fact_g = beta * delta * gama_inv * ( (B_center(1) - D_center(1)) * (B_center(1) - D_center(1)) & + + (B_center(2) - D_center(2)) * (B_center(2) - D_center(2)) & + + (B_center(3) - D_center(3)) * (B_center(3) - D_center(3)) ) + fact_g = dexp(-fact_g) + if(fact_g .lt. 1.d-12) return + + ! --- + + power_A1(1:3) = ao_power(i,1:3) + power_A2(1:3) = ao_power(j,1:3) + + A1_center(1:3) = nucl_coord(ao_nucl(i),1:3) + A2_center(1:3) = nucl_coord(ao_nucl(j),1:3) + + do l = 1, ao_prim_num(i) + alpha1 = ao_expo_ordered_transp(l,i) + do k = 1, ao_prim_num(j) + alpha2 = ao_expo_ordered_transp(k,j) + coef12 = fact_g * ao_coef_normalized_ordered_transp(l,i) * ao_coef_normalized_ordered_transp(k,j) + + if(dabs(coef12) .lt. 1d-12) cycle + + analytical_j = overlap_gauss_r12(G_center, gama, A1_center, A2_center, power_A1, power_A2, alpha1, alpha2) + + overlap_gauss_r12_ao_with1s += coef12 * analytical_j + enddo + enddo + +end function overlap_gauss_r12_ao_with1s + +! --- diff --git a/src/ao_many_one_e_ints/grad_J1b_ints.irp.f b/src/ao_many_one_e_ints/grad_J1b_ints.irp.f new file mode 100644 index 00000000..30e0acc8 --- /dev/null +++ b/src/ao_many_one_e_ints/grad_J1b_ints.irp.f @@ -0,0 +1,398 @@ + +! --- + +BEGIN_PROVIDER [ integer, List_all_comb_size] + + implicit none + + List_all_comb_size = 2**nucl_num + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ integer, List_all_comb, (nucl_num, List_all_comb_size)] + + implicit none + integer :: i, j + + if(nucl_num .gt. 32) then + print *, ' nucl_num = ', nucl_num, '> 32' + stop + endif + + List_all_comb = 0 + + do i = 0, List_all_comb_size-1 + do j = 0, nucl_num-1 + if (btest(i,j)) then + List_all_comb(j+1,i+1) = 1 + endif + enddo + enddo + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [ double precision, List_all_j1b1s_coef, ( List_all_comb_size)] +&BEGIN_PROVIDER [ double precision, List_all_j1b1s_expo, ( List_all_comb_size)] +&BEGIN_PROVIDER [ double precision, List_all_j1b1s_cent, (3, List_all_comb_size)] + + implicit none + integer :: i, j, k, phase + double precision :: tmp_alphaj, tmp_alphak + + provide j1b_pen + + List_all_j1b1s_coef = 0.d0 + List_all_j1b1s_expo = 0.d0 + List_all_j1b1s_cent = 0.d0 + + do i = 1, List_all_comb_size + + do j = 1, nucl_num + tmp_alphaj = dble(List_all_comb(j,i)) * j1b_pen(j) + + List_all_j1b1s_expo(i) += tmp_alphaj + List_all_j1b1s_cent(1,i) += tmp_alphaj * nucl_coord(j,1) + List_all_j1b1s_cent(2,i) += tmp_alphaj * nucl_coord(j,2) + List_all_j1b1s_cent(3,i) += tmp_alphaj * nucl_coord(j,3) + + enddo + + ASSERT(List_all_j1b1s_expo(i) .gt. 0d0) + if(List_all_j1b1s_expo(i) .lt. 1d-10) cycle + + List_all_j1b1s_cent(1,i) = List_all_j1b1s_cent(1,i) / List_all_j1b1s_expo(i) + List_all_j1b1s_cent(2,i) = List_all_j1b1s_cent(2,i) / List_all_j1b1s_expo(i) + List_all_j1b1s_cent(3,i) = List_all_j1b1s_cent(3,i) / List_all_j1b1s_expo(i) + enddo + + ! --- + + do i = 1, List_all_comb_size + + do j = 2, nucl_num, 1 + tmp_alphaj = dble(List_all_comb(j,i)) * j1b_pen(j) + do k = 1, j-1, 1 + tmp_alphak = dble(List_all_comb(k,i)) * j1b_pen(k) + + List_all_j1b1s_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & + + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & + + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) + enddo + enddo + + if(List_all_j1b1s_expo(i) .lt. 1d-10) cycle + + List_all_j1b1s_coef(i) = List_all_j1b1s_coef(i) / List_all_j1b1s_expo(i) + enddo + + ! --- + + do i = 1, List_all_comb_size + + phase = 0 + do j = 1, nucl_num + phase += List_all_comb(j,i) + enddo + + List_all_j1b1s_coef(i) = (-1.d0)**dble(phase) * dexp(-List_all_j1b1s_coef(i)) + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! int dr phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R| - 1) / |r - R| + ! + END_DOC + + implicit none + integer :: i, j, ipoint, i_1s + double precision :: r(3), int_mu, int_coulomb + double precision :: coef, beta, B_center(3) + double precision :: wall0, wall1 + double precision, allocatable :: tmp(:,:,:) + + double precision, external :: NAI_pol_mult_erf_ao_with1s + + provide mu_erf final_grid_points j1b_pen + call wall_time(wall0) + + v_ij_erf_rk_cst_mu_j1b = 0.d0 + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, int_mu, int_coulomb, tmp) & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_size, final_grid_points, & + !$OMP List_all_j1b1s_coef, List_all_j1b1s_expo, List_all_j1b1s_cent, & + !$OMP v_ij_erf_rk_cst_mu_j1b, mu_erf) + + allocate( tmp(ao_num,ao_num,n_points_final_grid) ) + tmp = 0.d0 + + !$OMP DO + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i_1s = 1, List_all_comb_size + + coef = List_all_j1b1s_coef (i_1s) + beta = List_all_j1b1s_expo (i_1s) + B_center(1) = List_all_j1b1s_cent(1,i_1s) + B_center(2) = List_all_j1b1s_cent(2,i_1s) + B_center(3) = List_all_j1b1s_cent(3,i_1s) + + int_mu = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r) + int_coulomb = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r) + + tmp(j,i,ipoint) += coef * (int_mu - int_coulomb) + enddo + + enddo + enddo + enddo + !$OMP END DO + + !$OMP CRITICAL + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + v_ij_erf_rk_cst_mu_j1b(j,i,ipoint) += tmp(j,i,ipoint) + enddo + enddo + enddo + !$OMP END CRITICAL + + deallocate( tmp ) + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, i-1 + v_ij_erf_rk_cst_mu_j1b(j,i,ipoint) = v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for v_ij_erf_rk_cst_mu_j1b', wall1 - wall0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_points_final_grid, 3)] + + BEGIN_DOC + ! int dr x * phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R|) - 1)/|r - R| + END_DOC + + implicit none + integer :: i, j, ipoint + double precision :: wall0, wall1 + + call wall_time(wall0) + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, ao_num + x_v_ij_erf_rk_cst_mu_j1b(j,i,ipoint,1) = x_v_ij_erf_rk_cst_mu_tmp_j1b(1,j,i,ipoint) + x_v_ij_erf_rk_cst_mu_j1b(j,i,ipoint,2) = x_v_ij_erf_rk_cst_mu_tmp_j1b(2,j,i,ipoint) + x_v_ij_erf_rk_cst_mu_j1b(j,i,ipoint,3) = x_v_ij_erf_rk_cst_mu_tmp_j1b(3,j,i,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for x_v_ij_erf_rk_cst_mu_j1b', wall1 - wall0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! int dr x * phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R|) - 1)/|r - R| + END_DOC + + implicit none + integer :: i, j, ipoint, i_1s + double precision :: coef, beta, B_center(3), r(3), ints(3), ints_coulomb(3) + double precision :: wall0, wall1 + double precision, allocatable :: tmp(:,:,:,:) + + call wall_time(wall0) + + x_v_ij_erf_rk_cst_mu_tmp_j1b = 0.d0 + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, ints, ints_coulomb, tmp) & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_size, final_grid_points, & + !$OMP List_all_j1b1s_coef, List_all_j1b1s_expo, List_all_j1b1s_cent, & + !$OMP x_v_ij_erf_rk_cst_mu_tmp_j1b, mu_erf) + + allocate( tmp(3,ao_num,ao_num,n_points_final_grid) ) + tmp = 0.d0 + + !$OMP DO + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i_1s = 1, List_all_comb_size + + coef = List_all_j1b1s_coef (i_1s) + beta = List_all_j1b1s_expo (i_1s) + B_center(1) = List_all_j1b1s_cent(1,i_1s) + B_center(2) = List_all_j1b1s_cent(2,i_1s) + B_center(3) = List_all_j1b1s_cent(3,i_1s) + + call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r, ints ) + call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r, ints_coulomb) + + tmp(1,j,i,ipoint) += coef * (ints(1) - ints_coulomb(1)) + tmp(2,j,i,ipoint) += coef * (ints(2) - ints_coulomb(2)) + tmp(3,j,i,ipoint) += coef * (ints(3) - ints_coulomb(3)) + enddo + enddo + enddo + enddo + !$OMP END DO + + !$OMP CRITICAL + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + x_v_ij_erf_rk_cst_mu_tmp_j1b(1,j,i,ipoint) += tmp(1,j,i,ipoint) + x_v_ij_erf_rk_cst_mu_tmp_j1b(2,j,i,ipoint) += tmp(2,j,i,ipoint) + x_v_ij_erf_rk_cst_mu_tmp_j1b(3,j,i,ipoint) += tmp(3,j,i,ipoint) + enddo + enddo + enddo + !$OMP END CRITICAL + + deallocate( tmp ) + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, i-1 + x_v_ij_erf_rk_cst_mu_tmp(1,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(1,i,j,ipoint) + x_v_ij_erf_rk_cst_mu_tmp(2,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(2,i,j,ipoint) + x_v_ij_erf_rk_cst_mu_tmp(3,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(3,i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for x_v_ij_erf_rk_cst_mu_tmp', wall1 - wall0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2) u(mu, r12) + ! + END_DOC + + implicit none + integer :: i, j, ipoint, i_1s, i_fit + double precision :: r(3), int_fit, expo_fit, coef_fit + double precision :: coef, beta, B_center(3) + double precision :: wall0, wall1 + double precision, allocatable :: tmp(:,:,:) + + double precision, external :: overlap_gauss_r12_ao_with1s + + provide mu_erf final_grid_points j1b_pen + call wall_time(wall0) + + v_ij_u_cst_mu_j1b = 0.d0 + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit, tmp) & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_size, & + !$OMP final_grid_points, n_max_fit_slat, & + !$OMP expo_gauss_j_mu_x, coef_gauss_j_mu_x, & + !$OMP List_all_j1b1s_coef, List_all_j1b1s_expo, & + !$OMP List_all_j1b1s_cent, v_ij_u_cst_mu_j1b) + + allocate( tmp(ao_num,ao_num,n_points_final_grid) ) + tmp = 0.d0 + + !$OMP DO + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i_1s = 1, List_all_comb_size + + coef = List_all_j1b1s_coef (i_1s) + beta = List_all_j1b1s_expo (i_1s) + B_center(1) = List_all_j1b1s_cent(1,i_1s) + B_center(2) = List_all_j1b1s_cent(2,i_1s) + B_center(3) = List_all_j1b1s_cent(3,i_1s) + + do i_fit = 1, n_max_fit_slat + + expo_fit = expo_gauss_j_mu_x(i_fit) + coef_fit = coef_gauss_j_mu_x(i_fit) + int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) + + tmp(j,i,ipoint) += coef * coef_fit * int_fit + enddo + enddo + enddo + enddo + enddo + !$OMP END DO + + !$OMP CRITICAL + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + v_ij_u_cst_mu_j1b(j,i,ipoint) += tmp(j,i,ipoint) + enddo + enddo + enddo + !$OMP END CRITICAL + + deallocate( tmp ) + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, i-1 + v_ij_u_cst_mu_j1b(j,i,ipoint) = v_ij_u_cst_mu_j1b(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for v_ij_u_cst_mu_j1b', wall1 - wall0 + +END_PROVIDER + +! --- diff --git a/src/ao_many_one_e_ints/grad_related_ints.irp.f b/src/ao_many_one_e_ints/grad_related_ints.irp.f index c3c886f8..13fb1fc8 100644 --- a/src/ao_many_one_e_ints/grad_related_ints.irp.f +++ b/src/ao_many_one_e_ints/grad_related_ints.irp.f @@ -1,47 +1,64 @@ -BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu, ( ao_num, ao_num,n_points_final_grid)] - implicit none - BEGIN_DOC -! int dr phi_i(r) phi_j(r) (erf(mu(R) |r - R| - 1)/|r - R| - END_DOC - integer :: i,j,ipoint - double precision :: mu,r(3),NAI_pol_mult_erf_ao - double precision :: int_mu, int_coulomb - provide mu_erf final_grid_points - double precision :: wall0, wall1 - call wall_time(wall0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,ipoint,mu,r,int_mu,int_coulomb) & - !$OMP SHARED (ao_num,n_points_final_grid,v_ij_erf_rk_cst_mu,final_grid_points,mu_erf) + +! --- + +BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! int dr phi_i(r) phi_j(r) (erf(mu(R) |r - R| - 1) / |r - R| + ! + END_DOC + + implicit none + integer :: i, j, ipoint + double precision :: r(3) + double precision :: int_mu, int_coulomb + double precision :: wall0, wall1 + + double precision :: NAI_pol_mult_erf_ao + + provide mu_erf final_grid_points + call wall_time(wall0) + + v_ij_erf_rk_cst_mu = 0.d0 + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i, j, ipoint, r, int_mu, int_coulomb) & + !$OMP SHARED (ao_num, n_points_final_grid, v_ij_erf_rk_cst_mu, final_grid_points, mu_erf) !$OMP DO SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = i, ao_num - mu = mu_erf - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - int_mu = NAI_pol_mult_erf_ao(i,j,mu,r) - int_coulomb = NAI_pol_mult_erf_ao(i,j,1.d+9,r) - v_ij_erf_rk_cst_mu(j,i,ipoint)= (int_mu - int_coulomb ) - enddo + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + int_mu = NAI_pol_mult_erf_ao(i, j, mu_erf, r) + int_coulomb = NAI_pol_mult_erf_ao(i, j, 1.d+9, r) + + v_ij_erf_rk_cst_mu(j,i,ipoint) = int_mu - int_coulomb + enddo + enddo enddo - enddo !$OMP END DO !$OMP END PARALLEL - - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = 1, i-1 - v_ij_erf_rk_cst_mu(j,i,ipoint)= v_ij_erf_rk_cst_mu(i,j,ipoint) + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, i-1 + v_ij_erf_rk_cst_mu(j,i,ipoint) = v_ij_erf_rk_cst_mu(i,j,ipoint) + enddo enddo - enddo enddo + + call wall_time(wall1) + print*, 'wall time for v_ij_erf_rk_cst_mu ', wall1 - wall0 - call wall_time(wall1) - print*,'wall time for v_ij_erf_rk_cst_mu ',wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_transp, (n_points_final_grid, ao_num, ao_num)] implicit none BEGIN_DOC @@ -86,54 +103,62 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_transp, (n_points_final_gr print*,'wall time for v_ij_erf_rk_cst_mu_transp ',wall1 - wall0 END_PROVIDER +! --- + +BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp, (3, ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! int dr x * phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/|r - R| + END_DOC + + implicit none + integer :: i, j, ipoint, m + double precision :: r(3), ints(3), ints_coulomb(3) + double precision :: wall0, wall1 + + call wall_time(wall0) -BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp, (3,ao_num, ao_num,n_points_final_grid)] - implicit none - BEGIN_DOC -! int dr x * phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/|r - R| - END_DOC - integer :: i,j,ipoint,m - double precision :: mu,r(3),ints(3),ints_coulomb(3) - double precision :: wall0, wall1 - call wall_time(wall0) !$OMP PARALLEL & !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,ipoint,mu,r,ints,m,ints_coulomb) & + !$OMP PRIVATE (i,j,ipoint,r,ints,m,ints_coulomb) & !$OMP SHARED (ao_num,n_points_final_grid,x_v_ij_erf_rk_cst_mu_tmp,final_grid_points,mu_erf) !$OMP DO SCHEDULE (dynamic) - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = i, ao_num - mu = mu_erf - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - call NAI_pol_x_mult_erf_ao(i,j,mu,r,ints) - call NAI_pol_x_mult_erf_ao(i,j,1.d+9,r,ints_coulomb) - do m = 1, 3 - x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) = ( ints(m) - ints_coulomb(m)) + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + call NAI_pol_x_mult_erf_ao(i, j, mu_erf, r, ints ) + call NAI_pol_x_mult_erf_ao(i, j, 1.d+9 , r, ints_coulomb) + + do m = 1, 3 + x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) = (ints(m) - ints_coulomb(m)) + enddo + enddo enddo - enddo enddo - enddo !$OMP END DO !$OMP END PARALLEL - - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = 1, i-1 - do m = 1, 3 - x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint)= x_v_ij_erf_rk_cst_mu_tmp(m,i,j,ipoint) + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, i-1 + do m = 1, 3 + x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(m,i,j,ipoint) + enddo + enddo enddo - enddo enddo - enddo - call wall_time(wall1) - print*,'wall time for x_v_ij_erf_rk_cst_mu_tmp',wall1 - wall0 + call wall_time(wall1) + print*,'wall time for x_v_ij_erf_rk_cst_mu_tmp',wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu, (ao_num, ao_num,n_points_final_grid,3)] implicit none BEGIN_DOC diff --git a/src/ao_one_e_ints/pot_ao_erf_ints.irp.f b/src/ao_one_e_ints/pot_ao_erf_ints.irp.f index 42505194..d4ef4b28 100644 --- a/src/ao_one_e_ints/pot_ao_erf_ints.irp.f +++ b/src/ao_one_e_ints/pot_ao_erf_ints.irp.f @@ -1,3 +1,6 @@ + +! --- + subroutine give_all_erf_kl_ao(integrals_ao,mu_in,C_center) implicit none BEGIN_DOC @@ -15,142 +18,328 @@ subroutine give_all_erf_kl_ao(integrals_ao,mu_in,C_center) enddo end +! --- + +double precision function NAI_pol_mult_erf_ao(i_ao, j_ao, mu_in, C_center) -double precision function NAI_pol_mult_erf_ao(i_ao,j_ao,mu_in,C_center) - implicit none BEGIN_DOC + ! ! Computes the following integral : - ! $\int_{-\infty}^{infty} dr \chi_i(r) \chi_j(r) \frac{\erf(\mu | r - R_C | )}{ | r - R_C | }$. + ! $\int_{-\infty}^{infty} dr \chi_i(r) \chi_j(r) \frac{\erf(\mu |r - R_C|)}{|r - R_C|}$. + ! END_DOC - integer, intent(in) :: i_ao,j_ao + + implicit none + integer, intent(in) :: i_ao, j_ao double precision, intent(in) :: mu_in, C_center(3) - integer :: i,j,num_A,num_B, power_A(3), power_B(3), n_pt_in - double precision :: A_center(3), B_center(3),integral, alpha,beta + + integer :: i, j, num_A, num_B, power_A(3), power_B(3), n_pt_in + double precision :: A_center(3), B_center(3), integral, alpha, beta + double precision :: NAI_pol_mult_erf - num_A = ao_nucl(i_ao) - power_A(1:3)= ao_power(i_ao,1:3) + + num_A = ao_nucl(i_ao) + power_A(1:3) = ao_power(i_ao,1:3) A_center(1:3) = nucl_coord(num_A,1:3) - num_B = ao_nucl(j_ao) - power_B(1:3)= ao_power(j_ao,1:3) + num_B = ao_nucl(j_ao) + power_B(1:3) = ao_power(j_ao,1:3) B_center(1:3) = nucl_coord(num_B,1:3) + n_pt_in = n_pt_max_integrals + NAI_pol_mult_erf_ao = 0.d0 do i = 1, ao_prim_num(i_ao) alpha = ao_expo_ordered_transp(i,i_ao) do j = 1, ao_prim_num(j_ao) beta = ao_expo_ordered_transp(j,j_ao) - integral = NAI_pol_mult_erf(A_center,B_center,power_A,power_B,alpha,beta,C_center,n_pt_in,mu_in) - NAI_pol_mult_erf_ao += integral * ao_coef_normalized_ordered_transp(j,j_ao)*ao_coef_normalized_ordered_transp(i,i_ao) + + integral = NAI_pol_mult_erf(A_center, B_center, power_A, power_B, alpha, beta, C_center, n_pt_in,mu_in) + + NAI_pol_mult_erf_ao += integral * ao_coef_normalized_ordered_transp(j,j_ao) * ao_coef_normalized_ordered_transp(i,i_ao) enddo enddo -end +end function NAI_pol_mult_erf_ao +! --- +double precision function NAI_pol_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, mu_in, C_center) -double precision function NAI_pol_mult_erf(A_center,B_center,power_A,power_B,alpha,beta,C_center,n_pt_in,mu_in) BEGIN_DOC + ! + ! Computes the following integral : + ! $\int_{-\infty}^{infty} dr \chi_i(r) \chi_j(r) e^{-\beta (r - B_center)^2} \frac{\erf(\mu |r - R_C|)}{|r - R_C|}$. + ! + END_DOC + + implicit none + integer, intent(in) :: i_ao, j_ao + double precision, intent(in) :: beta, B_center(3) + double precision, intent(in) :: mu_in, C_center(3) + + integer :: i, j, power_A1(3), power_A2(3), n_pt_in + double precision :: A1_center(3), A2_center(3), alpha1, alpha2, coef12, integral + + double precision, external :: NAI_pol_mult_erf_with1s, NAI_pol_mult_erf_ao + + ASSERT(beta .lt. 0.d0) + if(beta .lt. 1d-10) then + NAI_pol_mult_erf_ao_with1s = NAI_pol_mult_erf_ao(i_ao, j_ao, mu_in, C_center) + return + endif + + power_A1(1:3) = ao_power(i_ao,1:3) + power_A2(1:3) = ao_power(j_ao,1:3) + + A1_center(1:3) = nucl_coord(ao_nucl(i_ao),1:3) + A2_center(1:3) = nucl_coord(ao_nucl(j_ao),1:3) + + n_pt_in = n_pt_max_integrals + + NAI_pol_mult_erf_ao_with1s = 0.d0 + do i = 1, ao_prim_num(i_ao) + alpha1 = ao_expo_ordered_transp(i,i_ao) + do j = 1, ao_prim_num(j_ao) + alpha2 = ao_expo_ordered_transp(j,j_ao) + + coef12 = ao_coef_normalized_ordered_transp(j,j_ao) * ao_coef_normalized_ordered_transp(i,i_ao) + if(coef12 .lt. 1d-14) cycle + + integral = NAI_pol_mult_erf_with1s( A1_center, A2_center, power_A1, power_A2, alpha1, alpha2 & + , beta, B_center, C_center, n_pt_in, mu_in ) + + NAI_pol_mult_erf_ao_with1s += integral * coef12 + enddo + enddo + +end function NAI_pol_mult_erf_ao_with1s + +! --- + +double precision function NAI_pol_mult_erf(A_center, B_center, power_A, power_B, alpha, beta, C_center, n_pt_in, mu_in) + + BEGIN_DOC + ! ! Computes the following integral : ! ! .. math:: ! ! \int dr (x-A_x)^a (x-B_x)^b \exp(-\alpha (x-A_x)^2 - \beta (x-B_x)^2 ) - ! \frac{\erf(\mu | r - R_C | )}{ | r - R_C | }$. + ! \frac{\erf(\mu |r - R_C |)}{| r - R_C |}$. ! END_DOC - implicit none - integer, intent(in) :: n_pt_in - double precision,intent(in) :: C_center(3),A_center(3),B_center(3),alpha,beta,mu_in - integer, intent(in) :: power_A(3),power_B(3) - integer :: i,j,k,l,n_pt - double precision :: P_center(3) - - double precision :: d(0:n_pt_in),pouet,coeff,dist,const,pouet_2,factor - double precision :: I_n_special_exact,integrate_bourrin,I_n_bibi - double precision :: V_e_n,const_factor,dist_integral,tmp - double precision :: accu,rint,p_inv,p,rho,p_inv_2 - integer :: n_pt_out,lmax include 'utils/constants.include.F' - p = alpha + beta - p_inv = 1.d0/p - p_inv_2 = 0.5d0 * p_inv - rho = alpha * beta * p_inv - dist = 0.d0 + implicit none + integer, intent(in) :: n_pt_in + integer, intent(in) :: power_A(3), power_B(3) + double precision, intent(in) :: C_center(3), A_center(3), B_center(3), alpha, beta, mu_in + + integer :: i, n_pt, n_pt_out + double precision :: P_center(3) + double precision :: d(0:n_pt_in), coeff, dist, const, factor + double precision :: const_factor, dist_integral + double precision :: accu, p_inv, p, rho, p_inv_2 + double precision :: p_new + + double precision :: rint + + p = alpha + beta + p_inv = 1.d0 / p + p_inv_2 = 0.5d0 * p_inv + rho = alpha * beta * p_inv + + dist = 0.d0 dist_integral = 0.d0 do i = 1, 3 - P_center(i) = (alpha * A_center(i) + beta * B_center(i)) * p_inv - dist += (A_center(i) - B_center(i))*(A_center(i) - B_center(i)) - dist_integral += (P_center(i) - C_center(i))*(P_center(i) - C_center(i)) + P_center(i) = (alpha * A_center(i) + beta * B_center(i)) * p_inv + dist += (A_center(i) - B_center(i)) * (A_center(i) - B_center(i)) + dist_integral += (P_center(i) - C_center(i)) * (P_center(i) - C_center(i)) enddo - const_factor = dist*rho - if(const_factor > 80.d0)then + const_factor = dist * rho + if(const_factor > 80.d0) then NAI_pol_mult_erf = 0.d0 return endif - double precision :: p_new - p_new = mu_in/dsqrt(p+ mu_in * mu_in) - factor = dexp(-const_factor) - coeff = dtwo_pi * factor * p_inv * p_new - lmax = 20 - ! print*, "b" + p_new = mu_in / dsqrt(p + mu_in * mu_in) + factor = dexp(-const_factor) + coeff = dtwo_pi * factor * p_inv * p_new + + n_pt = 2 * ( (power_A(1) + power_B(1)) + (power_A(2) + power_B(2)) + (power_A(3) + power_B(3)) ) + const = p * dist_integral * p_new * p_new + if(n_pt == 0) then + NAI_pol_mult_erf = coeff * rint(0, const) + return + endif + do i = 0, n_pt_in d(i) = 0.d0 enddo - n_pt = 2 * ( (power_A(1) + power_B(1)) +(power_A(2) + power_B(2)) +(power_A(3) + power_B(3)) ) - const = p * dist_integral * p_new * p_new - if (n_pt == 0) then - pouet = rint(0,const) - NAI_pol_mult_erf = coeff * pouet - return - endif - ! call give_polynomial_mult_center_one_e_erf(A_center,B_center,alpha,beta,power_A,power_B,C_center,n_pt_in,d,n_pt_out,mu_in) p_new = p_new * p_new - call give_polynomial_mult_center_one_e_erf_opt(A_center,B_center,alpha,beta,power_A,power_B,C_center,n_pt_in,d,n_pt_out,mu_in,p,p_inv,p_inv_2,p_new,P_center) + call give_polynomial_mult_center_one_e_erf_opt( A_center, B_center, power_A, power_B, C_center & + , n_pt_in, d, n_pt_out, p_inv_2, p_new, P_center) - - if(n_pt_out<0)then + if(n_pt_out < 0) then NAI_pol_mult_erf = 0.d0 return endif - accu = 0.d0 ! sum of integrals of type : int {t,[0,1]} exp-(rho.(P-Q)^2 * t^2) * t^i - do i =0 ,n_pt_out,2 - accu += d(i) * rint(i/2,const) + accu = 0.d0 + do i = 0, n_pt_out, 2 + accu += d(i) * rint(i/2, const) enddo NAI_pol_mult_erf = accu * coeff -end +end function NAI_pol_mult_erf + +! --- + +double precision function NAI_pol_mult_erf_with1s( A1_center, A2_center, power_A1, power_A2, alpha1, alpha2 & + , beta, B_center, C_center, n_pt_in, mu_in ) + + BEGIN_DOC + ! + ! Computes the following integral : + ! + ! .. math:: + ! + ! \int dx (x - A1_x)^a_1 (x - B1_x)^a_2 \exp(-\alpha_1 (x - A1_x)^2 - \alpha_2 (x - A2_x)^2) + ! \int dy (y - A1_y)^b_1 (y - B1_y)^b_2 \exp(-\alpha_1 (y - A1_y)^2 - \alpha_2 (y - A2_y)^2) + ! \int dz (x - A1_z)^c_1 (z - B1_z)^c_2 \exp(-\alpha_1 (z - A1_z)^2 - \alpha_2 (z - A2_z)^2) + ! \exp(-\beta (r - B)^2) + ! \frac{\erf(\mu |r - R_C|)}{|r - R_C|}$. + ! + END_DOC + + include 'utils/constants.include.F' + + implicit none + integer, intent(in) :: n_pt_in + integer, intent(in) :: power_A1(3), power_A2(3) + double precision, intent(in) :: C_center(3), A1_center(3), A2_center(3), B_center(3) + double precision, intent(in) :: alpha1, alpha2, beta, mu_in + + integer :: i, n_pt, n_pt_out + double precision :: alpha12, alpha12_inv, alpha12_inv_2, rho12, A12_center(3), dist12, const_factor12 + double precision :: p, p_inv, p_inv_2, rho, P_center(3), dist, const_factor + double precision :: dist_integral + double precision :: d(0:n_pt_in), coeff, const, factor + double precision :: accu + double precision :: p_new + + double precision :: rint -subroutine give_polynomial_mult_center_one_e_erf_opt(A_center,B_center,alpha,beta,& - power_A,power_B,C_center,n_pt_in,d,n_pt_out,mu_in,p,p_inv,p_inv_2,p_new,P_center) + ! e^{-alpha1 (r - A1)^2} e^{-alpha2 (r - A2)^2} = e^{K12} e^{-alpha12 (r - A12)^2} + alpha12 = alpha1 + alpha2 + alpha12_inv = 1.d0 / alpha12 + alpha12_inv_2 = 0.5d0 * alpha12_inv + rho12 = alpha1 * alpha2 * alpha12_inv + + dist12 = 0.d0 + do i = 1, 3 + A12_center(i) = (alpha1 * A1_center(i) + alpha2 * A2_center(i)) * alpha12_inv + dist12 += (A1_center(i) - A2_center(i)) * (A1_center(i) - A2_center(i)) + enddo + + const_factor12 = dist12 * rho12 + if(const_factor12 > 80.d0) then + NAI_pol_mult_erf_with1s = 0.d0 + return + endif + + ! --- + + ! e^{K12} e^{-alpha12 (r - A12)^2} e^{-beta (r - B)^2} = e^{K} e^{-p (r - P)^2} + p = alpha12 + beta + p_inv = 1.d0 / p + p_inv_2 = 0.5d0 * p_inv + rho = alpha12 * beta * p_inv + + dist = 0.d0 + dist_integral = 0.d0 + do i = 1, 3 + P_center(i) = (alpha12 * A12_center(i) + beta * B_center(i)) * p_inv + dist += (A12_center(i) - B_center(i)) * (A12_center(i) - B_center(i)) + dist_integral += (P_center(i) - C_center(i)) * (P_center(i) - C_center(i)) + enddo + + const_factor = const_factor12 + dist * rho + if(const_factor > 80.d0) then + NAI_pol_mult_erf_with1s = 0.d0 + return + endif + + ! --- + + p_new = mu_in / dsqrt(p + mu_in * mu_in) + factor = dexp(-const_factor) + coeff = dtwo_pi * factor * p_inv * p_new + + n_pt = 2 * ( (power_A1(1) + power_A2(1)) + (power_A1(2) + power_A2(2)) + (power_A1(3) + power_A2(3)) ) + const = p * dist_integral * p_new * p_new + if(n_pt == 0) then + NAI_pol_mult_erf_with1s = coeff * rint(0, const) + return + endif + + do i = 0, n_pt_in + d(i) = 0.d0 + enddo + p_new = p_new * p_new + + call give_polynomial_mult_center_one_e_erf_opt( A1_center, A2_center, power_A1, power_A2, C_center & + , n_pt_in, d, n_pt_out, p_inv_2, p_new, P_center) + + if(n_pt_out < 0) then + NAI_pol_mult_erf_with1s = 0.d0 + return + endif + + ! sum of integrals of type : int {t,[0,1]} exp-(rho.(P-Q)^2 * t^2) * t^i + accu = 0.d0 + do i = 0, n_pt_out, 2 + accu += d(i) * rint(i/2, const) + enddo + NAI_pol_mult_erf_with1s = accu * coeff + +end function NAI_pol_mult_erf_with1s + +! --- + +subroutine give_polynomial_mult_center_one_e_erf_opt( A_center, B_center, power_A, power_B, C_center & + , n_pt_in, d, n_pt_out, p_inv_2, p_new, P_center) + BEGIN_DOC ! Returns the explicit polynomial in terms of the $t$ variable of the ! following polynomial: ! ! $I_{x1}(a_x, d_x,p,q) \times I_{x1}(a_y, d_y,p,q) \times I_{x1}(a_z, d_z,p,q)$. END_DOC + implicit none - integer, intent(in) :: n_pt_in - integer,intent(out) :: n_pt_out - double precision, intent(in) :: A_center(3), B_center(3),C_center(3),p,p_inv,p_inv_2,p_new,P_center(3) - double precision, intent(in) :: alpha,beta,mu_in - integer, intent(in) :: power_A(3), power_B(3) - integer :: a_x,b_x,a_y,b_y,a_z,b_z - double precision :: d(0:n_pt_in) - double precision :: d1(0:n_pt_in) - double precision :: d2(0:n_pt_in) - double precision :: d3(0:n_pt_in) - double precision :: accu + integer, intent(in) :: n_pt_in + integer, intent(in) :: power_A(3), power_B(3) + double precision, intent(in) :: A_center(3), B_center(3), C_center(3), p_inv_2, p_new, P_center(3) + integer, intent(out) :: n_pt_out + double precision, intent(out) :: d(0:n_pt_in) + + integer :: a_x, b_x, a_y, b_y, a_z, b_z + integer :: n_pt1, n_pt2, n_pt3, dim, i + integer :: n_pt_tmp + double precision :: d1(0:n_pt_in) + double precision :: d2(0:n_pt_in) + double precision :: d3(0:n_pt_in) + double precision :: accu + double precision :: R1x(0:2), B01(0:2), R1xp(0:2), R2x(0:2) + accu = 0.d0 ASSERT (n_pt_in > 1) - double precision :: R1x(0:2), B01(0:2), R1xp(0:2),R2x(0:2) R1x(0) = (P_center(1) - A_center(1)) R1x(1) = 0.d0 R1x(2) = -(P_center(1) - C_center(1))* p_new @@ -161,27 +350,22 @@ subroutine give_polynomial_mult_center_one_e_erf_opt(A_center,B_center,alpha,bet !R1xp = (P_x - B_x) - (P_x - C_x) ( t * mu/sqrt(p+mu^2) )^2 R2x(0) = p_inv_2 R2x(1) = 0.d0 - R2x(2) = -p_inv_2* p_new + R2x(2) = -p_inv_2 * p_new !R2x = 0.5 / p - 0.5/p ( t * mu/sqrt(p+mu^2) )^2 - do i = 0,n_pt_in - d(i) = 0.d0 - enddo - do i = 0,n_pt_in + + do i = 0, n_pt_in + d (i) = 0.d0 d1(i) = 0.d0 - enddo - do i = 0,n_pt_in d2(i) = 0.d0 - enddo - do i = 0,n_pt_in d3(i) = 0.d0 enddo - integer :: n_pt1,n_pt2,n_pt3,dim,i + n_pt1 = n_pt_in n_pt2 = n_pt_in n_pt3 = n_pt_in a_x = power_A(1) b_x = power_B(1) - call I_x1_pol_mult_one_e(a_x,b_x,R1x,R1xp,R2x,d1,n_pt1,n_pt_in) + call I_x1_pol_mult_one_e(a_x, b_x, R1x, R1xp, R2x, d1, n_pt1, n_pt_in) if(n_pt1<0)then n_pt_out = -1 do i = 0,n_pt_in @@ -200,7 +384,7 @@ subroutine give_polynomial_mult_center_one_e_erf_opt(A_center,B_center,alpha,bet !R1xp = (P_x - B_x) - (P_x - C_x) ( t * mu/sqrt(p+mu^2) )^2 a_y = power_A(2) b_y = power_B(2) - call I_x1_pol_mult_one_e(a_y,b_y,R1x,R1xp,R2x,d2,n_pt2,n_pt_in) + call I_x1_pol_mult_one_e(a_y, b_y, R1x, R1xp, R2x, d2, n_pt2, n_pt_in) if(n_pt2<0)then n_pt_out = -1 do i = 0,n_pt_in @@ -209,41 +393,40 @@ subroutine give_polynomial_mult_center_one_e_erf_opt(A_center,B_center,alpha,bet return endif - R1x(0) = (P_center(3) - A_center(3)) R1x(1) = 0.d0 - R1x(2) = -(P_center(3) - C_center(3))* p_new + R1x(2) = -(P_center(3) - C_center(3)) * p_new ! R1x = (P_x - A_x) - (P_x - C_x) ( t * mu/sqrt(p+mu^2) )^2 R1xp(0) = (P_center(3) - B_center(3)) R1xp(1) = 0.d0 - R1xp(2) =-(P_center(3) - C_center(3))* p_new + R1xp(2) =-(P_center(3) - C_center(3)) * p_new !R2x = 0.5 / p - 0.5/p ( t * mu/sqrt(p+mu^2) )^2 a_z = power_A(3) b_z = power_B(3) - call I_x1_pol_mult_one_e(a_z,b_z,R1x,R1xp,R2x,d3,n_pt3,n_pt_in) - if(n_pt3<0)then + call I_x1_pol_mult_one_e(a_z, b_z, R1x, R1xp, R2x, d3, n_pt3, n_pt_in) + if(n_pt3 < 0) then n_pt_out = -1 do i = 0,n_pt_in d(i) = 0.d0 enddo return endif - integer :: n_pt_tmp + n_pt_tmp = 0 - call multiply_poly(d1,n_pt1,d2,n_pt2,d,n_pt_tmp) - do i = 0,n_pt_tmp + call multiply_poly(d1, n_pt1, d2, n_pt2, d, n_pt_tmp) + do i = 0, n_pt_tmp d1(i) = 0.d0 enddo n_pt_out = 0 - call multiply_poly(d ,n_pt_tmp ,d3,n_pt3,d1,n_pt_out) + call multiply_poly(d, n_pt_tmp, d3, n_pt3, d1, n_pt_out) do i = 0, n_pt_out d(i) = d1(i) enddo -end - +end subroutine give_polynomial_mult_center_one_e_erf_opt +! --- subroutine give_polynomial_mult_center_one_e_erf(A_center,B_center,alpha,beta,& diff --git a/src/non_h_ints_mu/fit_j.irp.f b/src/non_h_ints_mu/fit_j.irp.f index 695ead7f..d359d489 100644 --- a/src/non_h_ints_mu/fit_j.irp.f +++ b/src/non_h_ints_mu/fit_j.irp.f @@ -12,33 +12,45 @@ BEGIN_PROVIDER [ double precision, expo_j_xmu, (n_fit_1_erf_x) ] END_PROVIDER +! --- + BEGIN_PROVIDER [double precision, expo_gauss_j_mu_x, (n_max_fit_slat)] &BEGIN_PROVIDER [double precision, coef_gauss_j_mu_x, (n_max_fit_slat)] - implicit none - BEGIN_DOC -! J(mu,r12) = 1/2 r12 * (1 - erf(mu*r12)) - 1/(2 sqrt(pi)*mu) exp(-(mu*r12)^2) is expressed as -! -! J(mu,r12) = 0.5/mu * F(r12*mu) where F(x) = x * (1 - erf(x)) - 1/sqrt(pi) * exp(-x**2) -! -! F(x) is fitted by - 1/sqrt(pi) * exp(-alpha * x) exp(-beta*mu^2x^2) (see expo_j_xmu) -! -! The slater function exp(-alpha * x) is fitted with n_max_fit_slat gaussians -! -! See Appendix 2 of JCP 154, 084119 (2021) -! - END_DOC - integer :: i - double precision :: expos(n_max_fit_slat),alpha,beta - alpha = expo_j_xmu(1) * mu_erf - call expo_fit_slater_gam(alpha,expos) - beta = expo_j_xmu(2) * mu_erf**2.d0 - - do i = 1, n_max_fit_slat - expo_gauss_j_mu_x(i) = expos(i) + beta - coef_gauss_j_mu_x(i) = coef_fit_slat_gauss(i) / (2.d0 * mu_erf) * (- 1/dsqrt(dacos(-1.d0))) - enddo + + BEGIN_DOC + ! + ! J(mu,r12) = 1/2 r12 * (1 - erf(mu*r12)) - 1/(2 sqrt(pi)*mu) exp(-(mu*r12)^2) is expressed as + ! + ! J(mu,r12) = 0.5/mu * F(r12*mu) where F(x) = x * (1 - erf(x)) - 1/sqrt(pi) * exp(-x**2) + ! + ! F(x) is fitted by - 1/sqrt(pi) * exp(-alpha * x) exp(-beta*mu^2x^2) (see expo_j_xmu) + ! + ! The slater function exp(-alpha * x) is fitted with n_max_fit_slat gaussians + ! + ! See Appendix 2 of JCP 154, 084119 (2021) + ! + END_DOC + + implicit none + integer :: i + double precision :: tmp + double precision :: expos(n_max_fit_slat), alpha, beta + + tmp = -0.5d0 / (mu_erf * sqrt(dacos(-1.d0))) + + alpha = expo_j_xmu(1) * mu_erf + call expo_fit_slater_gam(alpha, expos) + beta = expo_j_xmu(2) * mu_erf * mu_erf + + do i = 1, n_max_fit_slat + expo_gauss_j_mu_x(i) = expos(i) + beta + coef_gauss_j_mu_x(i) = tmp * coef_fit_slat_gauss(i) + enddo + END_PROVIDER +! --- + double precision function F_x_j(x) implicit none BEGIN_DOC @@ -89,3 +101,6 @@ double precision function j_mu_fit_gauss(x) enddo end + +! --- + diff --git a/src/non_h_ints_mu/new_grad_tc.irp.f b/src/non_h_ints_mu/new_grad_tc.irp.f index 068381b4..8f4883eb 100644 --- a/src/non_h_ints_mu/new_grad_tc.irp.f +++ b/src/non_h_ints_mu/new_grad_tc.irp.f @@ -1,30 +1,84 @@ -BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, ( ao_num, ao_num,n_points_final_grid,3)] - implicit none - BEGIN_DOC - ! grad_1_u_ij_mu(i,j,ipoint) = -1 * \int dr2 \grad_r1 u(r1,r2) \phi_i(r2) \phi_j(r2) - ! - ! where r1 = r(ipoint) - ! - ! grad_1_u_ij_mu(i,j,ipoint) = \int dr2 (r1 - r2) (erf(mu * r12)-1)/2 r_12 \phi_i(r2) \phi_j(r2) - END_DOC - integer :: ipoint,i,j,m - double precision :: r(3) - do m = 1, 3 - do ipoint = 1, n_points_final_grid - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - do j = 1, ao_num - do i = 1, ao_num - grad_1_u_ij_mu(i,j,ipoint,m) = v_ij_erf_rk_cst_mu(i,j,ipoint) * r(m) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,m) + +! --- + +BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num,n_points_final_grid, 3)] + + BEGIN_DOC + ! + ! grad_1_u_ij_mu(i,j,ipoint) = \int dr2 [-1 * \grad_r1 u(r1,r2)] \phi_i(r2) \phi_j(r2) x 1s_j1b(r2) + ! = \int dr2 [(r1 - r2) (erf(mu * r12)-1)/2 r_12] \phi_i(r2) \phi_j(r2) x 1s_j1b(r2) + ! + ! where r1 = r(ipoint) + ! + END_DOC + + implicit none + integer :: ipoint, i, j, i_1s + double precision :: r(3) + double precision :: a, d, e, tmp1, tmp2, fact_r, fact_xyz(3) + + PROVIDE j1b_type j1b_pen + + if(j1b_type .eq. 3) then + + do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + fact_r = 1.d0 + fact_xyz(1) = 1.d0 + fact_xyz(2) = 1.d0 + fact_xyz(3) = 1.d0 + do i_1s = 1, nucl_num + a = j1b_pen(i_1s) + d = (r(1) - nucl_coord(i_1s,1)) * (r(1) - nucl_coord(i_1s,1)) & + + (r(2) - nucl_coord(i_1s,2)) * (r(2) - nucl_coord(i_1s,2)) & + + (r(3) - nucl_coord(i_1s,3)) * (r(3) - nucl_coord(i_1s,3)) + e = dexp(-a*d) + + fact_r = fact_r * (1.d0 - e) + fact_xyz(1) = fact_xyz(1) * (2.d0 * a * (r(1) - nucl_coord(i_1s,1)) * e) + fact_xyz(2) = fact_xyz(2) * (2.d0 * a * (r(2) - nucl_coord(i_1s,2)) * e) + fact_xyz(3) = fact_xyz(3) * (2.d0 * a * (r(3) - nucl_coord(i_1s,3)) * e) + enddo + + do j = 1, ao_num + do i = 1, ao_num + + tmp1 = fact_r * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) + tmp2 = v_ij_u_cst_mu_j1b (i,j,ipoint) + + grad_1_u_ij_mu(i,j,ipoint,1) = tmp1 * r(1) - fact_r * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) + fact_xyz(1) * tmp2 + grad_1_u_ij_mu(i,j,ipoint,2) = tmp1 * r(2) - fact_r * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) + fact_xyz(2) * tmp2 + grad_1_u_ij_mu(i,j,ipoint,3) = tmp1 * r(3) - fact_r * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + fact_xyz(3) * tmp2 + enddo + enddo enddo - enddo - enddo - enddo - grad_1_u_ij_mu *= 0.5d0 + + else + + do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do j = 1, ao_num + do i = 1, ao_num + grad_1_u_ij_mu(i,j,ipoint,1) = v_ij_erf_rk_cst_mu(i,j,ipoint) * r(1) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,1) + grad_1_u_ij_mu(i,j,ipoint,2) = v_ij_erf_rk_cst_mu(i,j,ipoint) * r(2) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,2) + grad_1_u_ij_mu(i,j,ipoint,3) = v_ij_erf_rk_cst_mu(i,j,ipoint) * r(3) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,3) + enddo + enddo + enddo + + endif + + grad_1_u_ij_mu *= 0.5d0 END_PROVIDER +! --- + BEGIN_PROVIDER [double precision, tc_grad_and_lapl_ao, (ao_num, ao_num, ao_num, ao_num)] implicit none BEGIN_DOC From b913277daacad629f62af0f87a7d7910e31ca594 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Thu, 13 Oct 2022 19:22:12 +0200 Subject: [PATCH 03/10] 1st term of grad2 added --- src/ao_many_one_e_ints/grad2_jmu_modif.irp.f | 213 ++++++++++++++ ...b_ints.irp.f => grad_lapl_jmu_modif.irp.f} | 126 ++++----- .../grad_related_ints.irp.f | 4 +- src/non_h_ints_mu/debug_integ_jmu_modif.irp.f | 93 ++++++ src/non_h_ints_mu/grad_squared.irp.f | 168 ++++++----- src/non_h_ints_mu/jmu_modif.irp.f | 266 ++++++++++++++++++ src/non_h_ints_mu/new_grad_tc.irp.f | 135 ++++++--- src/non_h_ints_mu/numerical_integ.irp.f | 135 +++++++++ 8 files changed, 976 insertions(+), 164 deletions(-) create mode 100644 src/ao_many_one_e_ints/grad2_jmu_modif.irp.f rename src/ao_many_one_e_ints/{grad_J1b_ints.irp.f => grad_lapl_jmu_modif.irp.f} (65%) create mode 100644 src/non_h_ints_mu/debug_integ_jmu_modif.irp.f create mode 100644 src/non_h_ints_mu/jmu_modif.irp.f create mode 100644 src/non_h_ints_mu/numerical_integ.irp.f diff --git a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f new file mode 100644 index 00000000..c3cdb491 --- /dev/null +++ b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f @@ -0,0 +1,213 @@ + +! --- + +BEGIN_PROVIDER [ integer, List_all_comb_b3_size] + + implicit none + + List_all_comb_b3_size = 3**nucl_num + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ integer, List_all_comb_b3, (nucl_num, List_all_comb_b3_size)] + + implicit none + integer :: i, j, ii, jj + integer, allocatable :: M(:,:), p(:) + + if(nucl_num .gt. 32) then + print *, ' nucl_num = ', nucl_num, '> 32' + stop + endif + + List_all_comb_b3(:,:) = 0 + List_all_comb_b3(:,List_all_comb_b3_size) = 2 + + allocate(p(nucl_num)) + p = 0 + + do i = 2, List_all_comb_b3_size-1 + do j = 1, nucl_num + + ii = 0 + do jj = 1, j-1, 1 + ii = ii + p(jj) * 3**(jj-1) + enddo + p(j) = modulo(i-1-ii, 3**j) / 3**(j-1) + + List_all_comb_b3(j,i) = p(j) + enddo + enddo + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [ double precision, List_all_comb_b3_coef, ( List_all_comb_b3_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b3_expo, ( List_all_comb_b3_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b3_cent, (3, List_all_comb_b3_size)] + + implicit none + integer :: i, j, k, phase + double precision :: tmp_alphaj, tmp_alphak, facto + + provide j1b_pen + + List_all_comb_b3_coef = 0.d0 + List_all_comb_b3_expo = 0.d0 + List_all_comb_b3_cent = 0.d0 + + do i = 1, List_all_comb_b3_size + + do j = 1, nucl_num + tmp_alphaj = dble(List_all_comb_b3(j,i)) * j1b_pen(j) + + List_all_comb_b3_expo(i) += tmp_alphaj + List_all_comb_b3_cent(1,i) += tmp_alphaj * nucl_coord(j,1) + List_all_comb_b3_cent(2,i) += tmp_alphaj * nucl_coord(j,2) + List_all_comb_b3_cent(3,i) += tmp_alphaj * nucl_coord(j,3) + + enddo + + ASSERT(List_all_comb_b3_expo(i) .gt. 0d0) + if(List_all_comb_b3_expo(i) .lt. 1d-10) cycle + + List_all_comb_b3_cent(1,i) = List_all_comb_b3_cent(1,i) / List_all_comb_b3_expo(i) + List_all_comb_b3_cent(2,i) = List_all_comb_b3_cent(2,i) / List_all_comb_b3_expo(i) + List_all_comb_b3_cent(3,i) = List_all_comb_b3_cent(3,i) / List_all_comb_b3_expo(i) + enddo + + ! --- + + do i = 1, List_all_comb_b3_size + + do j = 2, nucl_num, 1 + tmp_alphaj = dble(List_all_comb_b3(j,i)) * j1b_pen(j) + do k = 1, j-1, 1 + tmp_alphak = dble(List_all_comb_b3(k,i)) * j1b_pen(k) + + List_all_comb_b3_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & + + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & + + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) + enddo + enddo + + if(List_all_comb_b3_expo(i) .lt. 1d-10) cycle + + List_all_comb_b3_coef(i) = List_all_comb_b3_coef(i) / List_all_comb_b3_expo(i) + enddo + + ! --- + + do i = 1, List_all_comb_b3_size + + facto = 1.d0 + phase = 0 + do j = 1, nucl_num + tmp_alphaj = dble(List_all_comb_b3(j,i)) + + facto *= 2.d0 / (gamma(tmp_alphaj+1.d0) * gamma(3.d0-tmp_alphaj)) + phase += List_all_comb_b3(j,i) + enddo + + List_all_comb_b3_coef(i) = (-1.d0)**dble(phase) * facto * dexp(-List_all_comb_b3_coef(i)) + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, int2_grad1u_grad2u_j1b, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! -\frac{1}{4} x int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 [1 - erf(mu r12)]^2 + ! + END_DOC + + implicit none + integer :: i, j, ipoint, i_1s, i_fit + double precision :: r(3), int_fit, expo_fit, coef_fit + double precision :: coef, beta, B_center(3) + double precision :: wall0, wall1 + double precision, allocatable :: tmp(:,:,:) + + double precision, external :: overlap_gauss_r12_ao_with1s + + provide mu_erf final_grid_points j1b_pen + call wall_time(wall0) + + int2_grad1u_grad2u_j1b = 0.d0 + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit, tmp) & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & + !$OMP final_grid_points, n_max_fit_slat, & + !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & + !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & + !$OMP List_all_comb_b3_cent, int2_grad1u_grad2u_j1b) + + allocate( tmp(ao_num,ao_num,n_points_final_grid) ) + tmp = 0.d0 + + !$OMP DO + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i_1s = 1, List_all_comb_b3_size + + coef = List_all_comb_b3_coef (i_1s) + beta = List_all_comb_b3_expo (i_1s) + B_center(1) = List_all_comb_b3_cent(1,i_1s) + B_center(2) = List_all_comb_b3_cent(2,i_1s) + B_center(3) = List_all_comb_b3_cent(3,i_1s) + + do i_fit = 1, n_max_fit_slat + + expo_fit = expo_gauss_1_erf_x_2(i_fit) + coef_fit = coef_gauss_1_erf_x_2(i_fit) + int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) + + tmp(j,i,ipoint) += -0.25d0 * coef * coef_fit * int_fit + enddo + enddo + enddo + enddo + enddo + !$OMP END DO + + !$OMP CRITICAL + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + int2_grad1u_grad2u_j1b(j,i,ipoint) += tmp(j,i,ipoint) + enddo + enddo + enddo + !$OMP END CRITICAL + + deallocate( tmp ) + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, i-1 + int2_grad1u_grad2u_j1b(j,i,ipoint) = int2_grad1u_grad2u_j1b(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for int2_grad1u_grad2u_j1b', wall1 - wall0 + +END_PROVIDER + +! --- + diff --git a/src/ao_many_one_e_ints/grad_J1b_ints.irp.f b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f similarity index 65% rename from src/ao_many_one_e_ints/grad_J1b_ints.irp.f rename to src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f index 30e0acc8..c789ad36 100644 --- a/src/ao_many_one_e_ints/grad_J1b_ints.irp.f +++ b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f @@ -1,17 +1,17 @@ ! --- -BEGIN_PROVIDER [ integer, List_all_comb_size] +BEGIN_PROVIDER [ integer, List_all_comb_b2_size] implicit none - List_all_comb_size = 2**nucl_num + List_all_comb_b2_size = 2**nucl_num END_PROVIDER ! --- -BEGIN_PROVIDER [ integer, List_all_comb, (nucl_num, List_all_comb_size)] +BEGIN_PROVIDER [ integer, List_all_comb_b2, (nucl_num, List_all_comb_b2_size)] implicit none integer :: i, j @@ -21,12 +21,12 @@ BEGIN_PROVIDER [ integer, List_all_comb, (nucl_num, List_all_comb_size)] stop endif - List_all_comb = 0 + List_all_comb_b2 = 0 - do i = 0, List_all_comb_size-1 + do i = 0, List_all_comb_b2_size-1 do j = 0, nucl_num-1 if (btest(i,j)) then - List_all_comb(j+1,i+1) = 1 + List_all_comb_b2(j+1,i+1) = 1 endif enddo enddo @@ -35,9 +35,9 @@ END_PROVIDER ! --- - BEGIN_PROVIDER [ double precision, List_all_j1b1s_coef, ( List_all_comb_size)] -&BEGIN_PROVIDER [ double precision, List_all_j1b1s_expo, ( List_all_comb_size)] -&BEGIN_PROVIDER [ double precision, List_all_j1b1s_cent, (3, List_all_comb_size)] + BEGIN_PROVIDER [ double precision, List_all_comb_b2_coef, ( List_all_comb_b2_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b2_expo, ( List_all_comb_b2_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b2_cent, (3, List_all_comb_b2_size)] implicit none integer :: i, j, k, phase @@ -45,60 +45,60 @@ END_PROVIDER provide j1b_pen - List_all_j1b1s_coef = 0.d0 - List_all_j1b1s_expo = 0.d0 - List_all_j1b1s_cent = 0.d0 + List_all_comb_b2_coef = 0.d0 + List_all_comb_b2_expo = 0.d0 + List_all_comb_b2_cent = 0.d0 - do i = 1, List_all_comb_size + do i = 1, List_all_comb_b2_size do j = 1, nucl_num - tmp_alphaj = dble(List_all_comb(j,i)) * j1b_pen(j) + tmp_alphaj = dble(List_all_comb_b2(j,i)) * j1b_pen(j) - List_all_j1b1s_expo(i) += tmp_alphaj - List_all_j1b1s_cent(1,i) += tmp_alphaj * nucl_coord(j,1) - List_all_j1b1s_cent(2,i) += tmp_alphaj * nucl_coord(j,2) - List_all_j1b1s_cent(3,i) += tmp_alphaj * nucl_coord(j,3) + List_all_comb_b2_expo(i) += tmp_alphaj + List_all_comb_b2_cent(1,i) += tmp_alphaj * nucl_coord(j,1) + List_all_comb_b2_cent(2,i) += tmp_alphaj * nucl_coord(j,2) + List_all_comb_b2_cent(3,i) += tmp_alphaj * nucl_coord(j,3) enddo - ASSERT(List_all_j1b1s_expo(i) .gt. 0d0) - if(List_all_j1b1s_expo(i) .lt. 1d-10) cycle + ASSERT(List_all_comb_b2_expo(i) .gt. 0d0) + if(List_all_comb_b2_expo(i) .lt. 1d-10) cycle - List_all_j1b1s_cent(1,i) = List_all_j1b1s_cent(1,i) / List_all_j1b1s_expo(i) - List_all_j1b1s_cent(2,i) = List_all_j1b1s_cent(2,i) / List_all_j1b1s_expo(i) - List_all_j1b1s_cent(3,i) = List_all_j1b1s_cent(3,i) / List_all_j1b1s_expo(i) + List_all_comb_b2_cent(1,i) = List_all_comb_b2_cent(1,i) / List_all_comb_b2_expo(i) + List_all_comb_b2_cent(2,i) = List_all_comb_b2_cent(2,i) / List_all_comb_b2_expo(i) + List_all_comb_b2_cent(3,i) = List_all_comb_b2_cent(3,i) / List_all_comb_b2_expo(i) enddo ! --- - do i = 1, List_all_comb_size + do i = 1, List_all_comb_b2_size do j = 2, nucl_num, 1 - tmp_alphaj = dble(List_all_comb(j,i)) * j1b_pen(j) + tmp_alphaj = dble(List_all_comb_b2(j,i)) * j1b_pen(j) do k = 1, j-1, 1 - tmp_alphak = dble(List_all_comb(k,i)) * j1b_pen(k) + tmp_alphak = dble(List_all_comb_b2(k,i)) * j1b_pen(k) - List_all_j1b1s_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & - + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & - + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) + List_all_comb_b2_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & + + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & + + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) enddo enddo - if(List_all_j1b1s_expo(i) .lt. 1d-10) cycle + if(List_all_comb_b2_expo(i) .lt. 1d-10) cycle - List_all_j1b1s_coef(i) = List_all_j1b1s_coef(i) / List_all_j1b1s_expo(i) + List_all_comb_b2_coef(i) = List_all_comb_b2_coef(i) / List_all_comb_b2_expo(i) enddo ! --- - do i = 1, List_all_comb_size + do i = 1, List_all_comb_b2_size phase = 0 do j = 1, nucl_num - phase += List_all_comb(j,i) + phase += List_all_comb_b2(j,i) enddo - List_all_j1b1s_coef(i) = (-1.d0)**dble(phase) * dexp(-List_all_j1b1s_coef(i)) + List_all_comb_b2_coef(i) = (-1.d0)**dble(phase) * dexp(-List_all_comb_b2_coef(i)) enddo END_PROVIDER @@ -129,8 +129,8 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, int_mu, int_coulomb, tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_size, final_grid_points, & - !$OMP List_all_j1b1s_coef, List_all_j1b1s_expo, List_all_j1b1s_cent, & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, final_grid_points, & + !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, List_all_comb_b2_cent, & !$OMP v_ij_erf_rk_cst_mu_j1b, mu_erf) allocate( tmp(ao_num,ao_num,n_points_final_grid) ) @@ -144,13 +144,13 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) - do i_1s = 1, List_all_comb_size + do i_1s = 1, List_all_comb_b2_size - coef = List_all_j1b1s_coef (i_1s) - beta = List_all_j1b1s_expo (i_1s) - B_center(1) = List_all_j1b1s_cent(1,i_1s) - B_center(2) = List_all_j1b1s_cent(2,i_1s) - B_center(3) = List_all_j1b1s_cent(3,i_1s) + coef = List_all_comb_b2_coef (i_1s) + beta = List_all_comb_b2_expo (i_1s) + B_center(1) = List_all_comb_b2_cent(1,i_1s) + B_center(2) = List_all_comb_b2_cent(2,i_1s) + B_center(3) = List_all_comb_b2_cent(3,i_1s) int_mu = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r) int_coulomb = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r) @@ -238,8 +238,8 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_ !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, ints, ints_coulomb, tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_size, final_grid_points, & - !$OMP List_all_j1b1s_coef, List_all_j1b1s_expo, List_all_j1b1s_cent, & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, final_grid_points,& + !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, List_all_comb_b2_cent, & !$OMP x_v_ij_erf_rk_cst_mu_tmp_j1b, mu_erf) allocate( tmp(3,ao_num,ao_num,n_points_final_grid) ) @@ -253,13 +253,13 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_ r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) - do i_1s = 1, List_all_comb_size + do i_1s = 1, List_all_comb_b2_size - coef = List_all_j1b1s_coef (i_1s) - beta = List_all_j1b1s_expo (i_1s) - B_center(1) = List_all_j1b1s_cent(1,i_1s) - B_center(2) = List_all_j1b1s_cent(2,i_1s) - B_center(3) = List_all_j1b1s_cent(3,i_1s) + coef = List_all_comb_b2_coef (i_1s) + beta = List_all_comb_b2_expo (i_1s) + B_center(1) = List_all_comb_b2_cent(1,i_1s) + B_center(2) = List_all_comb_b2_cent(2,i_1s) + B_center(3) = List_all_comb_b2_cent(3,i_1s) call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r, ints ) call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r, ints_coulomb) @@ -291,15 +291,15 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_ do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = 1, i-1 - x_v_ij_erf_rk_cst_mu_tmp(1,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(1,i,j,ipoint) - x_v_ij_erf_rk_cst_mu_tmp(2,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(2,i,j,ipoint) - x_v_ij_erf_rk_cst_mu_tmp(3,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(3,i,j,ipoint) + x_v_ij_erf_rk_cst_mu_tmp_j1b(1,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp_j1b(1,i,j,ipoint) + x_v_ij_erf_rk_cst_mu_tmp_j1b(2,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp_j1b(2,i,j,ipoint) + x_v_ij_erf_rk_cst_mu_tmp_j1b(3,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp_j1b(3,i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for x_v_ij_erf_rk_cst_mu_tmp', wall1 - wall0 + print*, ' wall time for x_v_ij_erf_rk_cst_mu_tmp_j1b', wall1 - wall0 END_PROVIDER @@ -330,11 +330,11 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_ !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & !$OMP coef_fit, expo_fit, int_fit, tmp) & - !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_size, & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, & !$OMP final_grid_points, n_max_fit_slat, & !$OMP expo_gauss_j_mu_x, coef_gauss_j_mu_x, & - !$OMP List_all_j1b1s_coef, List_all_j1b1s_expo, & - !$OMP List_all_j1b1s_cent, v_ij_u_cst_mu_j1b) + !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, & + !$OMP List_all_comb_b2_cent, v_ij_u_cst_mu_j1b) allocate( tmp(ao_num,ao_num,n_points_final_grid) ) tmp = 0.d0 @@ -347,13 +347,13 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_ r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) - do i_1s = 1, List_all_comb_size + do i_1s = 1, List_all_comb_b2_size - coef = List_all_j1b1s_coef (i_1s) - beta = List_all_j1b1s_expo (i_1s) - B_center(1) = List_all_j1b1s_cent(1,i_1s) - B_center(2) = List_all_j1b1s_cent(2,i_1s) - B_center(3) = List_all_j1b1s_cent(3,i_1s) + coef = List_all_comb_b2_coef (i_1s) + beta = List_all_comb_b2_expo (i_1s) + B_center(1) = List_all_comb_b2_cent(1,i_1s) + B_center(2) = List_all_comb_b2_cent(2,i_1s) + B_center(3) = List_all_comb_b2_cent(3,i_1s) do i_fit = 1, n_max_fit_slat diff --git a/src/ao_many_one_e_ints/grad_related_ints.irp.f b/src/ao_many_one_e_ints/grad_related_ints.irp.f index 13fb1fc8..7b183d83 100644 --- a/src/ao_many_one_e_ints/grad_related_ints.irp.f +++ b/src/ao_many_one_e_ints/grad_related_ints.irp.f @@ -134,7 +134,7 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp, (3, ao_num, ao_num, call NAI_pol_x_mult_erf_ao(i, j, 1.d+9 , r, ints_coulomb) do m = 1, 3 - x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) = (ints(m) - ints_coulomb(m)) + x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) = (ints(m) - ints_coulomb(m)) enddo enddo enddo @@ -153,7 +153,7 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp, (3, ao_num, ao_num, enddo call wall_time(wall1) - print*,'wall time for x_v_ij_erf_rk_cst_mu_tmp',wall1 - wall0 + print*, ' wall time for x_v_ij_erf_rk_cst_mu_tmp', wall1 - wall0 END_PROVIDER diff --git a/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f new file mode 100644 index 00000000..54026349 --- /dev/null +++ b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f @@ -0,0 +1,93 @@ + +! -- + +program debug_integ_jmu_modif + + implicit none + + my_grid_becke = .True. + !my_n_pt_r_grid = 30 + !my_n_pt_a_grid = 50 + my_n_pt_r_grid = 100 + my_n_pt_a_grid = 170 + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + PROVIDE mu_erf j1b_pen + + call test_grad_1_u_ij_mu() + +end + +! --- + +subroutine test_grad_1_u_ij_mu() + + implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num + double precision, external :: num_grad_1_u_ij_mu_x + double precision, external :: num_grad_1_u_ij_mu_y + double precision, external :: num_grad_1_u_ij_mu_z + + print*, ' test_grad_1_u_ij_mu ...' + + PROVIDE grad_1_u_ij_mu +! PROVIDE num_grad_1_u_ij_mu + + eps_ij = 1d-6 + acc_tot = 0.d0 + + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + + i_exc = grad_1_u_ij_mu(i,j,ipoint,1) + !i_num = num_grad_1_u_ij_mu(i,j,ipoint,1) + i_num = num_grad_1_u_ij_mu_x(i, j, ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in x part of grad_1_u_ij_mu on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + + i_exc = grad_1_u_ij_mu(i,j,ipoint,2) + !i_num = num_grad_1_u_ij_mu(i,j,ipoint,2) + i_num = num_grad_1_u_ij_mu_y(i, j, ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in y part of grad_1_u_ij_mu on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + + i_exc = grad_1_u_ij_mu(i,j,ipoint,3) + !i_num = num_grad_1_u_ij_mu(i,j,ipoint,3) + i_num = num_grad_1_u_ij_mu_z(i, j, ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in y part of grad_1_u_ij_mu on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + + enddo + enddo + enddo + + acc_tot = acc_tot / dble(ao_num*ao_num*n_points_final_grid) + print*, ' normalized acc = ', acc_tot + + return +end subroutine test_grad_1_u_ij_mu + +! --- + + + diff --git a/src/non_h_ints_mu/grad_squared.irp.f b/src/non_h_ints_mu/grad_squared.irp.f index a88521a1..d02edb12 100644 --- a/src/non_h_ints_mu/grad_squared.irp.f +++ b/src/non_h_ints_mu/grad_squared.irp.f @@ -1,72 +1,112 @@ - BEGIN_PROVIDER [ double precision, grad_1_squared_u_ij_mu, ( ao_num, ao_num,n_points_final_grid)] - implicit none - integer :: ipoint,i,j,m,igauss - BEGIN_DOC - ! grad_1_squared_u_ij_mu(j,i,ipoint) = -1/2 \int dr2 phi_j(r2) phi_i(r2) |\grad_r1 u(r1,r2,\mu)|^2 - ! |\grad_r1 u(r1,r2,\mu)|^2 = 1/4 * (1 - erf(mu*r12))^2 - ! ! (1 - erf(mu*r12))^2 = \sum_i coef_gauss_1_erf_x_2(i) * exp(-expo_gauss_1_erf_x_2(i) * r12^2) - END_DOC - double precision :: r(3),delta,coef - double precision :: overlap_gauss_r12_ao,time0,time1 - print*,'providing grad_1_squared_u_ij_mu ...' - call wall_time(time0) - !TODO : strong optmization : write the loops in a different way - ! : for each couple of AO, the gaussian product are done once for all - do ipoint = 1, n_points_final_grid - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - do j = 1, ao_num - do i = 1, ao_num - ! \int dr2 phi_j(r2) phi_i(r2) (1 - erf(mu*r12))^2 - ! = \sum_i coef_gauss_1_erf_x_2(i) \int dr2 phi_j(r2) phi_i(r2) exp(-expo_gauss_1_erf_x_2(i) * (r_1 - r_2)^2) - do igauss = 1, n_max_fit_slat - delta = expo_gauss_1_erf_x_2(igauss) - coef = coef_gauss_1_erf_x_2(igauss) - grad_1_squared_u_ij_mu(j,i,ipoint) += -0.25 * coef * overlap_gauss_r12_ao(r,delta,i,j) - enddo - enddo - enddo - enddo - call wall_time(time1) - print*,'Wall time for grad_1_squared_u_ij_mu = ',time1 - time0 - END_PROVIDER -BEGIN_PROVIDER [double precision, tc_grad_square_ao, (ao_num, ao_num, ao_num, ao_num)] - implicit none - BEGIN_DOC - ! tc_grad_square_ao(k,i,l,j) = -1/2 - ! - END_DOC - integer :: ipoint,i,j,k,l - double precision :: contrib,weight1 - double precision, allocatable :: ac_mat(:,:,:,:) - allocate(ac_mat(ao_num, ao_num, ao_num, ao_num)) - ac_mat = 0.d0 - do ipoint = 1, n_points_final_grid - weight1 = final_weight_at_r_vector(ipoint) - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - contrib = weight1 *0.5D0* (aos_in_r_array_transp(ipoint,k) * aos_in_r_array_transp(ipoint,i)) - ! \int dr1 phi_k(r1) phi_i(r1) . \int dr2 |\grad_1 u(r1,r2)|^2 \phi_l(r2) \phi_j(r2) - ac_mat(k,i,l,j) += grad_1_squared_u_ij_mu(l,j,ipoint) * contrib +! --- + +! TODO : strong optmization : write the loops in a different way +! : for each couple of AO, the gaussian product are done once for all + +BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num,n_points_final_grid)] + + BEGIN_DOC + ! + ! -1/2 [ (grad_1 u)^2 + (grad_2 u^2)] = - 1/4 * (1 - erf(mu*r12))^2 + ! + ! and + ! (1 - erf(mu*r12))^2 = \sum_i coef_gauss_1_erf_x_2(i) * exp(-expo_gauss_1_erf_x_2(i) * r12^2) + ! + END_DOC + + implicit none + integer :: ipoint, i, j, m, igauss + double precision :: r(3), delta, coef + double precision :: time0, time1 + double precision, external :: overlap_gauss_r12_ao + + print*, ' providing gradu_squared_u_ij_mu ...' + call wall_time(time0) + + PROVIDE j1b_type j1b_pen + + if(j1b_type .eq. 3) then + + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + gradu_squared_u_ij_mu(j,i,ipoint) += fact3_j12(ipoint) * int2_grad1u_grad2u_j1b(i,j,ipoint) + enddo enddo - enddo enddo - enddo - enddo - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - tc_grad_square_ao(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + else + + do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do j = 1, ao_num + do i = 1, ao_num + do igauss = 1, n_max_fit_slat + delta = expo_gauss_1_erf_x_2(igauss) + coef = coef_gauss_1_erf_x_2(igauss) + gradu_squared_u_ij_mu(j,i,ipoint) += -0.25d0 * coef * overlap_gauss_r12_ao(r, delta, i, j) + enddo + enddo + enddo enddo - enddo - enddo - enddo + + endif + + call wall_time(time1) + print*, ' Wall time for gradu_squared_u_ij_mu = ', time1 - time0 END_PROVIDER +! --- + +BEGIN_PROVIDER [double precision, tc_grad_square_ao, (ao_num, ao_num, ao_num, ao_num)] + + BEGIN_DOC + ! + ! tc_grad_square_ao(k,i,l,j) = -1/2 + ! + END_DOC + + implicit none + integer :: ipoint, i, j, k, l + double precision :: contrib, weight1 + double precision, allocatable :: ac_mat(:,:,:,:) + + allocate(ac_mat(ao_num,ao_num,ao_num,ao_num)) + ac_mat = 0.d0 + + do ipoint = 1, n_points_final_grid + weight1 = final_weight_at_r_vector(ipoint) + + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + contrib = weight1 * 0.5d0 * (aos_in_r_array_transp(ipoint,k) * aos_in_r_array_transp(ipoint,i)) + ! \int dr1 phi_k(r1) phi_i(r1) . \int dr2 |\grad_1 u(r1,r2)|^2 \phi_l(r2) \phi_j(r2) + ac_mat(k,i,l,j) += gradu_squared_u_ij_mu(l,j,ipoint) * contrib + enddo + enddo + enddo + enddo + enddo + + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + tc_grad_square_ao(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + enddo + enddo + enddo + enddo + + deallocate(ac_mat) + +END_PROVIDER + +! --- + diff --git a/src/non_h_ints_mu/jmu_modif.irp.f b/src/non_h_ints_mu/jmu_modif.irp.f new file mode 100644 index 00000000..59a4a104 --- /dev/null +++ b/src/non_h_ints_mu/jmu_modif.irp.f @@ -0,0 +1,266 @@ + +! --- + +double precision function jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision, external :: j12_mu, j12_nucl + + jmu_modif = j12_mu(r1, r2) * j12_nucl(r1, r2) + + return +end function jmu_modif + +! --- + +double precision function j12_mu(r1, r2) + + include 'constants.include.F' + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: mu_r12, r12 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + mu_r12 = mu_erf * r12 + + j12_mu = 0.5d0 * r12 * (1.d0 - derf(mu_r12)) - inv_sq_pi_2 * dexp(-mu_r12*mu_r12) / mu_erf + + return +end function j12_mu + +! --- + +double precision function j12_nucl(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + integer :: i, j + double precision :: a1, d1, e1, a2, d2, e2 + + j12_nucl = 1.d0 + do i = 1, nucl_num + a1 = j1b_pen(i) + d1 = ( (r1(1) - nucl_coord(i,1)) * (r1(1) - nucl_coord(i,1)) & + + (r1(2) - nucl_coord(i,2)) * (r1(2) - nucl_coord(i,2)) & + + (r1(3) - nucl_coord(i,3)) * (r1(3) - nucl_coord(i,3)) ) + e1 = 1.d0 - exp(-a1*d1) + + do j = 1, nucl_num + a2 = j1b_pen(j) + d2 = ( (r2(1) - nucl_coord(j,1)) * (r2(1) - nucl_coord(j,1)) & + + (r2(2) - nucl_coord(j,2)) * (r2(2) - nucl_coord(j,2)) & + + (r2(3) - nucl_coord(j,3)) * (r2(3) - nucl_coord(j,3)) ) + e2 = 1.d0 - exp(-a2*d2) + + j12_nucl = j12_nucl * e1 * e2 + enddo + enddo + + return +end function j12_nucl + +! --- + +! --------------------------------------------------------------------------------------- + +double precision function grad1_x_jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: jmu_modif + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(1))) + + r1_eps(1) = r1_eps(1) + delta + fp = jmu_modif(r1_eps, r2) + r1_eps(1) = r1_eps(1) - 2.d0 * delta + fm = jmu_modif(r1_eps, r2) + + grad1_x_jmu_modif = 0.5d0 * (fp - fm) / delta + + return +end function grad1_x_jmu_modif + +double precision function grad1_y_jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: jmu_modif + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(2))) + + r1_eps(2) = r1_eps(2) + delta + fp = jmu_modif(r1_eps, r2) + r1_eps(2) = r1_eps(2) - 2.d0 * delta + fm = jmu_modif(r1_eps, r2) + + grad1_y_jmu_modif = 0.5d0 * (fp - fm) / delta + + return +end function grad1_y_jmu_modif + +double precision function grad1_z_jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: jmu_modif + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(3))) + + r1_eps(3) = r1_eps(3) + delta + fp = jmu_modif(r1_eps, r2) + r1_eps(3) = r1_eps(3) - 2.d0 * delta + fm = jmu_modif(r1_eps, r2) + + grad1_z_jmu_modif = 0.5d0 * (fp - fm) / delta + + return +end function grad1_z_jmu_modif + +! --------------------------------------------------------------------------------------- + +! --- + +! --------------------------------------------------------------------------------------- + +double precision function grad1_x_j12_mu_num(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: j12_mu + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(1))) + + r1_eps(1) = r1_eps(1) + delta + fp = j12_mu(r1_eps, r2) + r1_eps(1) = r1_eps(1) - 2.d0 * delta + fm = j12_mu(r1_eps, r2) + + grad1_x_j12_mu_num = 0.5d0 * (fp - fm) / delta + + return +end function grad1_x_j12_mu_num + +double precision function grad1_y_j12_mu_num(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: j12_mu + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(2))) + + r1_eps(2) = r1_eps(2) + delta + fp = j12_mu(r1_eps, r2) + r1_eps(2) = r1_eps(2) - 2.d0 * delta + fm = j12_mu(r1_eps, r2) + + grad1_y_j12_mu_num = 0.5d0 * (fp - fm) / delta + + return +end function grad1_y_j12_mu_num + +double precision function grad1_z_j12_mu_num(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: j12_mu + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(3))) + + r1_eps(3) = r1_eps(3) + delta + fp = j12_mu(r1_eps, r2) + r1_eps(3) = r1_eps(3) - 2.d0 * delta + fm = j12_mu(r1_eps, r2) + + grad1_z_j12_mu_num = 0.5d0 * (fp - fm) / delta + + return +end function grad1_z_j12_mu_num + +! --------------------------------------------------------------------------------------- + +! --- + +! --------------------------------------------------------------------------------------- + +double precision function grad1_x_j12_mu_exc(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r12 + + grad1_x_j12_mu_exc = 0.d0 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + if(r12 .lt. 1d-10) return + + grad1_x_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(1) - r2(1)) / r12 + + return +end function grad1_x_j12_mu_exc + +double precision function grad1_y_j12_mu_exc(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r12 + + grad1_y_j12_mu_exc = 0.d0 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + if(r12 .lt. 1d-10) return + + grad1_y_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(2) - r2(2)) / r12 + + return +end function grad1_y_j12_mu_exc + +double precision function grad1_z_j12_mu_exc(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r12 + + grad1_z_j12_mu_exc = 0.d0 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + if(r12 .lt. 1d-10) return + + grad1_z_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(3) - r2(3)) / r12 + + return +end function grad1_z_j12_mu_exc + +! --------------------------------------------------------------------------------------- + +! --- + + diff --git a/src/non_h_ints_mu/new_grad_tc.irp.f b/src/non_h_ints_mu/new_grad_tc.irp.f index 8f4883eb..26ed642c 100644 --- a/src/non_h_ints_mu/new_grad_tc.irp.f +++ b/src/non_h_ints_mu/new_grad_tc.irp.f @@ -1,7 +1,85 @@ ! --- -BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num,n_points_final_grid, 3)] + BEGIN_PROVIDER [ double precision, fact1_j12, ( n_points_final_grid)] +&BEGIN_PROVIDER [ double precision, fact2_j12, (3, n_points_final_grid)] +&BEGIN_PROVIDER [ double precision, fact3_j12, ( n_points_final_grid)] + + implicit none + integer :: ipoint, i, j, phase + double precision :: x, y, z, dx, dy, dz + double precision :: a, d, e, fact_r, fact_r_sq + double precision :: fact_x, fact_y, fact_z + double precision :: ax_der, ay_der, az_der, a_expo + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + ! --- + + fact_r = 1.d0 + fact_r_sq = 1.d0 + do j = 1, nucl_num + a = j1b_pen(j) + dx = x - nucl_coord(j,1) + dy = y - nucl_coord(j,2) + dz = z - nucl_coord(j,3) + d = x*x + y*y + z*z + e = 1.d0 - dexp(-a*d) + + fact_r = fact_r * e + fact_r_sq = fact_r_sq * e * e + enddo + fact1_j12(ipoint) = fact_r + fact3_j12(ipoint) = fact_r_sq + + ! --- + + fact_x = 0.d0 + fact_y = 0.d0 + fact_z = 0.d0 + do i = 1, List_all_comb_b2_size + + phase = 0 + a_expo = 0.d0 + ax_der = 0.d0 + ay_der = 0.d0 + az_der = 0.d0 + do j = 1, nucl_num + a = dble(List_all_comb_b2(j,i)) * j1b_pen(j) + dx = x - nucl_coord(j,1) + dy = y - nucl_coord(j,2) + dz = z - nucl_coord(j,3) + + phase += List_all_comb_b2(j,i) + a_expo += a * (dx*dx + dy*dy + dz*dz) + ax_der += a * dx + ay_der += a * dy + az_der += a * dz + enddo + e = -2.d0 * (-1.d0)**dble(phase) * dexp(-a_expo) + + fact_x += e * ax_der + fact_y += e * ay_der + fact_z += e * az_der + enddo + + fact2_j12(1,ipoint) = fact_x + fact2_j12(2,ipoint) = fact_y + fact2_j12(3,ipoint) = fact_z + + ! --- + + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num, n_points_final_grid, 3)] BEGIN_DOC ! @@ -13,45 +91,32 @@ BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num,n_points_fina END_DOC implicit none - integer :: ipoint, i, j, i_1s - double precision :: r(3) - double precision :: a, d, e, tmp1, tmp2, fact_r, fact_xyz(3) + integer :: ipoint, i, j + double precision :: x, y, z, tmp_x, tmp_y, tmp_z, tmp0, tmp1, tmp2 PROVIDE j1b_type j1b_pen if(j1b_type .eq. 3) then do ipoint = 1, n_points_final_grid - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + tmp0 = fact1_j12(ipoint) + tmp_x = fact2_j12(1,ipoint) + tmp_y = fact2_j12(2,ipoint) + tmp_z = fact2_j12(3,ipoint) - fact_r = 1.d0 - fact_xyz(1) = 1.d0 - fact_xyz(2) = 1.d0 - fact_xyz(3) = 1.d0 - do i_1s = 1, nucl_num - a = j1b_pen(i_1s) - d = (r(1) - nucl_coord(i_1s,1)) * (r(1) - nucl_coord(i_1s,1)) & - + (r(2) - nucl_coord(i_1s,2)) * (r(2) - nucl_coord(i_1s,2)) & - + (r(3) - nucl_coord(i_1s,3)) * (r(3) - nucl_coord(i_1s,3)) - e = dexp(-a*d) - - fact_r = fact_r * (1.d0 - e) - fact_xyz(1) = fact_xyz(1) * (2.d0 * a * (r(1) - nucl_coord(i_1s,1)) * e) - fact_xyz(2) = fact_xyz(2) * (2.d0 * a * (r(2) - nucl_coord(i_1s,2)) * e) - fact_xyz(3) = fact_xyz(3) * (2.d0 * a * (r(3) - nucl_coord(i_1s,3)) * e) - enddo - do j = 1, ao_num do i = 1, ao_num - tmp1 = fact_r * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) - tmp2 = v_ij_u_cst_mu_j1b (i,j,ipoint) + tmp1 = tmp0 * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) + tmp2 = v_ij_u_cst_mu_j1b(i,j,ipoint) - grad_1_u_ij_mu(i,j,ipoint,1) = tmp1 * r(1) - fact_r * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) + fact_xyz(1) * tmp2 - grad_1_u_ij_mu(i,j,ipoint,2) = tmp1 * r(2) - fact_r * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) + fact_xyz(2) * tmp2 - grad_1_u_ij_mu(i,j,ipoint,3) = tmp1 * r(3) - fact_r * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + fact_xyz(3) * tmp2 + grad_1_u_ij_mu(i,j,ipoint,1) = tmp1 * x - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) + tmp_x * tmp2 + grad_1_u_ij_mu(i,j,ipoint,2) = tmp1 * y - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) + tmp_y * tmp2 + grad_1_u_ij_mu(i,j,ipoint,3) = tmp1 * z - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + tmp_z * tmp2 enddo enddo enddo @@ -59,14 +124,14 @@ BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num,n_points_fina else do ipoint = 1, n_points_final_grid - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) do j = 1, ao_num do i = 1, ao_num - grad_1_u_ij_mu(i,j,ipoint,1) = v_ij_erf_rk_cst_mu(i,j,ipoint) * r(1) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,1) - grad_1_u_ij_mu(i,j,ipoint,2) = v_ij_erf_rk_cst_mu(i,j,ipoint) * r(2) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,2) - grad_1_u_ij_mu(i,j,ipoint,3) = v_ij_erf_rk_cst_mu(i,j,ipoint) * r(3) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,3) + grad_1_u_ij_mu(i,j,ipoint,1) = v_ij_erf_rk_cst_mu(i,j,ipoint) * x - x_v_ij_erf_rk_cst_mu(i,j,ipoint,1) + grad_1_u_ij_mu(i,j,ipoint,2) = v_ij_erf_rk_cst_mu(i,j,ipoint) * y - x_v_ij_erf_rk_cst_mu(i,j,ipoint,2) + grad_1_u_ij_mu(i,j,ipoint,3) = v_ij_erf_rk_cst_mu(i,j,ipoint) * z - x_v_ij_erf_rk_cst_mu(i,j,ipoint,3) enddo enddo enddo diff --git a/src/non_h_ints_mu/numerical_integ.irp.f b/src/non_h_ints_mu/numerical_integ.irp.f new file mode 100644 index 00000000..abac1874 --- /dev/null +++ b/src/non_h_ints_mu/numerical_integ.irp.f @@ -0,0 +1,135 @@ + +! --- + +! +! \int dr2 [-1 * \grad_r1 u(r1,r2)] \phi_i(r2) \phi_j(r2) x 1s_j1b(r2) +! +BEGIN_PROVIDER [ double precision, num_grad_1_u_ij_mu, (ao_num, ao_num, n_points_final_grid, 3)] + + implicit none + + integer :: i, j, ipoint, jpoint + double precision :: tmp, r1(3), r2(3) + + double precision, external :: ao_value + double precision, external :: j12_nucl + double precision, external :: grad1_x_j12_mu_num, grad1_x_j12_mu_exc + double precision, external :: grad1_y_j12_mu_num, grad1_y_j12_mu_exc + double precision, external :: grad1_z_j12_mu_num, grad1_z_j12_mu_exc + + num_grad_1_u_ij_mu = 0.d0 + + do j = 1, ao_num + do i = 1, ao_num + + do ipoint = 1, n_points_final_grid + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) + + num_grad_1_u_ij_mu(i,j,ipoint,1) += tmp * (-1.d0 * grad1_x_j12_mu_exc(r1, r2)) + num_grad_1_u_ij_mu(i,j,ipoint,2) += tmp * (-1.d0 * grad1_y_j12_mu_exc(r1, r2)) + num_grad_1_u_ij_mu(i,j,ipoint,3) += tmp * (-1.d0 * grad1_z_j12_mu_exc(r1, r2)) + enddo + + enddo + enddo + enddo + +END_PROVIDER + +! --- + +double precision function num_grad_1_u_ij_mu_x(i, j, ipoint) + + implicit none + integer, intent(in) :: i, j, ipoint + integer :: jpoint + double precision :: tmp, r1(3), r2(3) + double precision, external :: ao_value + double precision, external :: j12_nucl + double precision, external :: grad1_x_j12_mu_num, grad1_x_j12_mu_exc + + num_grad_1_u_ij_mu_x = 0.d0 + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) + + num_grad_1_u_ij_mu_x += tmp * (-1.d0 * grad1_x_j12_mu_exc(r1, r2)) + enddo + +end function num_grad_1_u_ij_mu_x + +! --- + +double precision function num_grad_1_u_ij_mu_y(i, j, ipoint) + + implicit none + integer, intent(in) :: i, j, ipoint + integer :: jpoint + double precision :: tmp, r1(3), r2(3) + double precision, external :: ao_value + double precision, external :: j12_nucl + double precision, external :: grad1_y_j12_mu_num, grad1_y_j12_mu_exc + + num_grad_1_u_ij_mu_y = 0.d0 + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) + + num_grad_1_u_ij_mu_y += tmp * (-1.d0 * grad1_y_j12_mu_exc(r1, r2)) + enddo + +end function num_grad_1_u_ij_mu_y + +! --- + +double precision function num_grad_1_u_ij_mu_z(i, j, ipoint) + + implicit none + integer, intent(in) :: i, j, ipoint + integer :: jpoint + double precision :: tmp, r1(3), r2(3) + double precision, external :: ao_value + double precision, external :: j12_nucl + double precision, external :: grad1_z_j12_mu_num, grad1_z_j12_mu_exc + + num_grad_1_u_ij_mu_z = 0.d0 + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) + + num_grad_1_u_ij_mu_z += tmp * (-1.d0 * grad1_z_j12_mu_exc(r1, r2)) + enddo + +end function num_grad_1_u_ij_mu_z + +! --- + From fce9db0fcea42553d0faccf406da6ab7c106bb34 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Thu, 13 Oct 2022 20:48:26 +0200 Subject: [PATCH 04/10] u^2 j1b^2 added --- src/ao_many_one_e_ints/grad2_jmu_modif.irp.f | 92 ++++++++++++++++++++ src/non_h_ints_mu/fit_j.irp.f | 37 +++++++- 2 files changed, 128 insertions(+), 1 deletion(-) diff --git a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f index c3cdb491..4c108322 100644 --- a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f +++ b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f @@ -211,3 +211,95 @@ END_PROVIDER ! --- +BEGIN_PROVIDER [ double precision, int2_u2_j1b, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 [u_12^mu]^2 + ! + END_DOC + + implicit none + integer :: i, j, ipoint, i_1s, i_fit + double precision :: r(3), int_fit, expo_fit, coef_fit + double precision :: coef, beta, B_center(3) + double precision :: wall0, wall1 + double precision, allocatable :: tmp(:,:,:) + + double precision, external :: overlap_gauss_r12_ao_with1s + + provide mu_erf final_grid_points j1b_pen + call wall_time(wall0) + + int2_u2_j1b = 0.d0 + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit, tmp) & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & + !$OMP final_grid_points, n_max_fit_slat, & + !$OMP expo_gauss_j_mu_x_2, coef_gauss_j_mu_x_2, & + !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & + !$OMP List_all_comb_b3_cent, int2_u2_j1b) + + allocate( tmp(ao_num,ao_num,n_points_final_grid) ) + tmp = 0.d0 + + !$OMP DO + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i_1s = 1, List_all_comb_b3_size + + coef = List_all_comb_b3_coef (i_1s) + beta = List_all_comb_b3_expo (i_1s) + B_center(1) = List_all_comb_b3_cent(1,i_1s) + B_center(2) = List_all_comb_b3_cent(2,i_1s) + B_center(3) = List_all_comb_b3_cent(3,i_1s) + + do i_fit = 1, n_max_fit_slat + + expo_fit = expo_gauss_j_mu_x_2(i_fit) + coef_fit = coef_gauss_j_mu_x_2(i_fit) + int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) + + tmp(j,i,ipoint) += coef * coef_fit * int_fit + enddo + enddo + enddo + enddo + enddo + !$OMP END DO + + !$OMP CRITICAL + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + int2_u2_j1b(j,i,ipoint) += tmp(j,i,ipoint) + enddo + enddo + enddo + !$OMP END CRITICAL + + deallocate( tmp ) + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, i-1 + int2_u2_j1b(j,i,ipoint) = int2_u2_j1b(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for int2_u2_j1b', wall1 - wall0 + +END_PROVIDER + +! --- + diff --git a/src/non_h_ints_mu/fit_j.irp.f b/src/non_h_ints_mu/fit_j.irp.f index d359d489..5defc4e5 100644 --- a/src/non_h_ints_mu/fit_j.irp.f +++ b/src/non_h_ints_mu/fit_j.irp.f @@ -23,7 +23,7 @@ END_PROVIDER ! ! J(mu,r12) = 0.5/mu * F(r12*mu) where F(x) = x * (1 - erf(x)) - 1/sqrt(pi) * exp(-x**2) ! - ! F(x) is fitted by - 1/sqrt(pi) * exp(-alpha * x) exp(-beta*mu^2x^2) (see expo_j_xmu) + ! F(x) is fitted by - 1/sqrt(pi) * exp(-alpha * x) exp(-beta * x^2) (see expo_j_xmu) ! ! The slater function exp(-alpha * x) is fitted with n_max_fit_slat gaussians ! @@ -49,6 +49,41 @@ END_PROVIDER END_PROVIDER +! --- + + BEGIN_PROVIDER [double precision, expo_gauss_j_mu_x_2, (n_max_fit_slat)] +&BEGIN_PROVIDER [double precision, coef_gauss_j_mu_x_2, (n_max_fit_slat)] + + BEGIN_DOC + ! + ! J(mu,r12)^2 = 0.25/mu^2 F(r12*mu)^2 + ! + ! F(x) = 1 /pi * exp(-2 * alpha * x) exp(-2 * beta * x^2) + ! + ! The slater function exp(-2 * alpha * x) is fitted with n_max_fit_slat gaussians + ! + ! See Appendix 2 of JCP 154, 084119 (2021) + ! + END_DOC + + implicit none + integer :: i + double precision :: tmp + double precision :: expos(n_max_fit_slat), alpha, beta + + tmp = 0.25d0 / (mu_erf * mu_erf * dacos(-1.d0)) + + alpha = 2.d0 * expo_j_xmu(1) * mu_erf + call expo_fit_slater_gam(alpha, expos) + beta = 2.d0 * expo_j_xmu(2) * mu_erf * mu_erf + + do i = 1, n_max_fit_slat + expo_gauss_j_mu_x_2(i) = expos(i) + beta + coef_gauss_j_mu_x_2(i) = tmp * coef_fit_slat_gauss(i) + enddo + +END_PROVIDER + ! --- double precision function F_x_j(x) From 4f0a0f68fc561c5db0405beb792eec0c55c286f4 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Thu, 13 Oct 2022 22:02:22 +0200 Subject: [PATCH 05/10] u grad u with j1b added --- src/ao_many_one_e_ints/grad2_jmu_modif.irp.f | 218 +++++++++++++++++++ src/non_h_ints_mu/fit_j.irp.f | 33 ++- src/non_h_ints_mu/grad_squared.irp.f | 7 +- 3 files changed, 254 insertions(+), 4 deletions(-) diff --git a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f index 4c108322..04d4efb9 100644 --- a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f +++ b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f @@ -303,3 +303,221 @@ END_PROVIDER ! --- +BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b, (3, ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 u_12^mu [\grad_1 u_12^mu] r2 + ! + END_DOC + + implicit none + integer :: i, j, ipoint, i_1s, i_fit + double precision :: r(3), int_fit(3), expo_fit, coef_fit + double precision :: coef, beta, B_center(3) + double precision :: alpha_1s, alpha_1s_inv, centr_1s(3), expo_coef_1s, coeff_1s + double precision :: wall0, wall1 + double precision, allocatable :: tmp(:,:,:,:) + + provide mu_erf final_grid_points j1b_pen + call wall_time(wall0) + + int2_u_grad1u_x_j1b = 0.d0 + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit, tmp, alpha_1s, & + !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coeff_1s) & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & + !$OMP final_grid_points, n_max_fit_slat, & + !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & + !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & + !$OMP List_all_comb_b3_cent, int2_u_grad1u_x_j1b) + + allocate( tmp(3,ao_num,ao_num,n_points_final_grid) ) + tmp = 0.d0 + + !$OMP DO + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i_1s = 1, List_all_comb_b3_size + + coef = List_all_comb_b3_coef (i_1s) + beta = List_all_comb_b3_expo (i_1s) + B_center(1) = List_all_comb_b3_cent(1,i_1s) + B_center(2) = List_all_comb_b3_cent(2,i_1s) + B_center(3) = List_all_comb_b3_cent(3,i_1s) + + do i_fit = 1, n_max_fit_slat + + expo_fit = expo_gauss_j_mu_1_erf(i_fit) + coef_fit = coef_gauss_j_mu_1_erf(i_fit) + + alpha_1s = beta + expo_fit + alpha_1s_inv = 1.d0 / alpha_1s + centr_1s(1) = alpha_1s_inv * (beta * B_center(1) + expo_fit * r(1)) + centr_1s(2) = alpha_1s_inv * (beta * B_center(2) + expo_fit * r(2)) + centr_1s(3) = alpha_1s_inv * (beta * B_center(3) + expo_fit * r(3)) + expo_coef_1s = -beta * expo_fit * alpha_1s_inv & + * ( (B_center(1) - r(1)) * (B_center(1) - r(1)) & + + (B_center(2) - r(2)) * (B_center(2) - r(2)) & + + (B_center(3) - r(3)) * (B_center(3) - r(3)) ) + if(expo_coef_1s .gt. 80.d0) cycle + coeff_1s = dexp(-expo_coef_1s) + + call NAI_pol_x_mult_erf_ao_with1s(i, j, alpha_1s, centr_1s, 1.d+9, r, int_fit) + + + tmp(1,j,i,ipoint) += coef * coef_fit * coeff_1s * int_fit(1) + tmp(2,j,i,ipoint) += coef * coef_fit * coeff_1s * int_fit(2) + tmp(3,j,i,ipoint) += coef * coef_fit * coeff_1s * int_fit(3) + enddo + enddo + enddo + enddo + enddo + !$OMP END DO + + !$OMP CRITICAL + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + int2_u_grad1u_x_j1b(1,j,i,ipoint) += tmp(1,j,i,ipoint) + int2_u_grad1u_x_j1b(2,j,i,ipoint) += tmp(2,j,i,ipoint) + int2_u_grad1u_x_j1b(3,j,i,ipoint) += tmp(3,j,i,ipoint) + enddo + enddo + enddo + !$OMP END CRITICAL + + deallocate( tmp ) + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, i-1 + int2_u_grad1u_x_j1b(1,j,i,ipoint) = int2_u_grad1u_x_j1b(1,i,j,ipoint) + int2_u_grad1u_x_j1b(2,j,i,ipoint) = int2_u_grad1u_x_j1b(2,i,j,ipoint) + int2_u_grad1u_x_j1b(3,j,i,ipoint) = int2_u_grad1u_x_j1b(3,i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for int2_u_grad1u_x_j1b', wall1 - wall0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b, (ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! int dr2 phi_i(r2) phi_j(r2) 1s_j1b(r2)^2 u_12^mu [\grad_1 u_12^mu] + ! + END_DOC + + implicit none + integer :: i, j, ipoint, i_1s, i_fit + double precision :: r(3), int_fit, expo_fit, coef_fit + double precision :: coef, beta, B_center(3) + double precision :: alpha_1s, alpha_1s_inv, centr_1s(3), expo_coef_1s, coeff_1s + double precision :: wall0, wall1 + double precision, allocatable :: tmp(:,:,:) + double precision, external :: NAI_pol_mult_erf_ao_with1s + + provide mu_erf final_grid_points j1b_pen + call wall_time(wall0) + + int2_u_grad1u_j1b = 0.d0 + + !$OMP PARALLEL DEFAULT (NONE) & + !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & + !$OMP coef_fit, expo_fit, int_fit, tmp, alpha_1s, & + !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coeff_1s) & + !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & + !$OMP final_grid_points, n_max_fit_slat, & + !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & + !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & + !$OMP List_all_comb_b3_cent, int2_u_grad1u_j1b) + + allocate( tmp(ao_num,ao_num,n_points_final_grid) ) + tmp = 0.d0 + + !$OMP DO + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i_1s = 1, List_all_comb_b3_size + + coef = List_all_comb_b3_coef (i_1s) + beta = List_all_comb_b3_expo (i_1s) + B_center(1) = List_all_comb_b3_cent(1,i_1s) + B_center(2) = List_all_comb_b3_cent(2,i_1s) + B_center(3) = List_all_comb_b3_cent(3,i_1s) + + do i_fit = 1, n_max_fit_slat + + expo_fit = expo_gauss_j_mu_1_erf(i_fit) + coef_fit = coef_gauss_j_mu_1_erf(i_fit) + + alpha_1s = beta + expo_fit + alpha_1s_inv = 1.d0 / alpha_1s + centr_1s(1) = alpha_1s_inv * (beta * B_center(1) + expo_fit * r(1)) + centr_1s(2) = alpha_1s_inv * (beta * B_center(2) + expo_fit * r(2)) + centr_1s(3) = alpha_1s_inv * (beta * B_center(3) + expo_fit * r(3)) + expo_coef_1s = -beta * expo_fit * alpha_1s_inv & + * ( (B_center(1) - r(1)) * (B_center(1) - r(1)) & + + (B_center(2) - r(2)) * (B_center(2) - r(2)) & + + (B_center(3) - r(3)) * (B_center(3) - r(3)) ) + if(expo_coef_1s .gt. 80.d0) cycle + coeff_1s = dexp(-expo_coef_1s) + + int_fit = NAI_pol_mult_erf_ao_with1s(i, j, alpha_1s, centr_1s, 1.d+9, r) + + + tmp(j,i,ipoint) += coef * coef_fit * coeff_1s * int_fit + enddo + enddo + enddo + enddo + enddo + !$OMP END DO + + !$OMP CRITICAL + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = i, ao_num + int2_u_grad1u_j1b(j,i,ipoint) += tmp(j,i,ipoint) + enddo + enddo + enddo + !$OMP END CRITICAL + + deallocate( tmp ) + !$OMP END PARALLEL + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, i-1 + int2_u_grad1u_j1b(j,i,ipoint) = int2_u_grad1u_j1b(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print*, ' wall time for int2_u_grad1u_j1b', wall1 - wall0 + +END_PROVIDER + +! --- diff --git a/src/non_h_ints_mu/fit_j.irp.f b/src/non_h_ints_mu/fit_j.irp.f index 5defc4e5..34f3a31a 100644 --- a/src/non_h_ints_mu/fit_j.irp.f +++ b/src/non_h_ints_mu/fit_j.irp.f @@ -58,7 +58,7 @@ END_PROVIDER ! ! J(mu,r12)^2 = 0.25/mu^2 F(r12*mu)^2 ! - ! F(x) = 1 /pi * exp(-2 * alpha * x) exp(-2 * beta * x^2) + ! F(x)^2 = 1 /pi * exp(-2 * alpha * x) exp(-2 * beta * x^2) ! ! The slater function exp(-2 * alpha * x) is fitted with n_max_fit_slat gaussians ! @@ -84,6 +84,37 @@ END_PROVIDER END_PROVIDER +! --- + + BEGIN_PROVIDER [double precision, expo_gauss_j_mu_1_erf, (n_max_fit_slat)] +&BEGIN_PROVIDER [double precision, coef_gauss_j_mu_1_erf, (n_max_fit_slat)] + + BEGIN_DOC + ! + ! J(mu,r12) x \frac{1 - erf(mu * r12)}{2} = + ! + ! - \frac{1}{4 \sqrt{\pi} \mu} \exp(-(alpha1 + alpha2) * mu * r12 - (beta1 + beta2) * mu^2 * r12^2) + ! + END_DOC + + implicit none + integer :: i + double precision :: tmp + double precision :: expos(n_max_fit_slat), alpha, beta + + tmp = -0.25d0 / (mu_erf * dsqrt(dacos(-1.d0))) + + alpha = (expo_j_xmu(1) + expo_gauss_1_erf_x(1)) * mu_erf + call expo_fit_slater_gam(alpha, expos) + beta = (expo_j_xmu(2) + expo_gauss_1_erf_x(2)) * mu_erf * mu_erf + + do i = 1, n_max_fit_slat + expo_gauss_j_mu_1_erf(i) = expos(i) + beta + coef_gauss_j_mu_1_erf(i) = tmp * coef_fit_slat_gauss(i) + enddo + +END_PROVIDER + ! --- double precision function F_x_j(x) diff --git a/src/non_h_ints_mu/grad_squared.irp.f b/src/non_h_ints_mu/grad_squared.irp.f index d02edb12..08152ddf 100644 --- a/src/non_h_ints_mu/grad_squared.irp.f +++ b/src/non_h_ints_mu/grad_squared.irp.f @@ -17,7 +17,7 @@ BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num,n_poin implicit none integer :: ipoint, i, j, m, igauss - double precision :: r(3), delta, coef + double precision :: r(3), delta, coef, tmp double precision :: time0, time1 double precision, external :: overlap_gauss_r12_ao @@ -29,9 +29,10 @@ BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num,n_poin if(j1b_type .eq. 3) then do ipoint = 1, n_points_final_grid + tmp = fact3_j12(ipoint) do j = 1, ao_num do i = 1, ao_num - gradu_squared_u_ij_mu(j,i,ipoint) += fact3_j12(ipoint) * int2_grad1u_grad2u_j1b(i,j,ipoint) + gradu_squared_u_ij_mu(j,i,ipoint) += tmp * int2_grad1u_grad2u_j1b(i,j,ipoint) enddo enddo enddo @@ -47,7 +48,7 @@ BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num,n_poin do igauss = 1, n_max_fit_slat delta = expo_gauss_1_erf_x_2(igauss) coef = coef_gauss_1_erf_x_2(igauss) - gradu_squared_u_ij_mu(j,i,ipoint) += -0.25d0 * coef * overlap_gauss_r12_ao(r, delta, i, j) + gradu_squared_u_ij_mu(j,i,ipoint) += -0.25d0 * coef * overlap_gauss_r12_ao(r, delta, i, j) enddo enddo enddo From dbbae1f990f2ec96beeecdb9513aab2d6f90e1f7 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Mon, 17 Oct 2022 10:55:53 +0200 Subject: [PATCH 06/10] list2, list3, j1b, grad_j1, lapl_j1b ok --- src/ao_many_one_e_ints/grad2_jmu_modif.irp.f | 118 ---- .../grad_lapl_jmu_modif.irp.f | 105 --- .../listj1b_product_to_sum.irp.f | 227 +++++++ src/ao_one_e_ints/pot_ao_erf_ints.irp.f | 46 +- src/non_h_ints_mu/debug_integ_jmu_modif.irp.f | 339 +++++++++- src/non_h_ints_mu/grad_squared.irp.f | 3 +- src/non_h_ints_mu/j12_nucl_utils.irp.f | 598 ++++++++++++++++++ src/non_h_ints_mu/jmu_modif.irp.f | 266 -------- src/non_h_ints_mu/new_grad_tc.irp.f | 86 +-- src/non_h_ints_mu/numerical_integ.irp.f | 165 +++-- 10 files changed, 1253 insertions(+), 700 deletions(-) create mode 100644 src/ao_many_one_e_ints/listj1b_product_to_sum.irp.f create mode 100644 src/non_h_ints_mu/j12_nucl_utils.irp.f delete mode 100644 src/non_h_ints_mu/jmu_modif.irp.f diff --git a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f index 04d4efb9..163f6e2d 100644 --- a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f +++ b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f @@ -1,124 +1,6 @@ ! --- -BEGIN_PROVIDER [ integer, List_all_comb_b3_size] - - implicit none - - List_all_comb_b3_size = 3**nucl_num - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ integer, List_all_comb_b3, (nucl_num, List_all_comb_b3_size)] - - implicit none - integer :: i, j, ii, jj - integer, allocatable :: M(:,:), p(:) - - if(nucl_num .gt. 32) then - print *, ' nucl_num = ', nucl_num, '> 32' - stop - endif - - List_all_comb_b3(:,:) = 0 - List_all_comb_b3(:,List_all_comb_b3_size) = 2 - - allocate(p(nucl_num)) - p = 0 - - do i = 2, List_all_comb_b3_size-1 - do j = 1, nucl_num - - ii = 0 - do jj = 1, j-1, 1 - ii = ii + p(jj) * 3**(jj-1) - enddo - p(j) = modulo(i-1-ii, 3**j) / 3**(j-1) - - List_all_comb_b3(j,i) = p(j) - enddo - enddo - -END_PROVIDER - -! --- - - BEGIN_PROVIDER [ double precision, List_all_comb_b3_coef, ( List_all_comb_b3_size)] -&BEGIN_PROVIDER [ double precision, List_all_comb_b3_expo, ( List_all_comb_b3_size)] -&BEGIN_PROVIDER [ double precision, List_all_comb_b3_cent, (3, List_all_comb_b3_size)] - - implicit none - integer :: i, j, k, phase - double precision :: tmp_alphaj, tmp_alphak, facto - - provide j1b_pen - - List_all_comb_b3_coef = 0.d0 - List_all_comb_b3_expo = 0.d0 - List_all_comb_b3_cent = 0.d0 - - do i = 1, List_all_comb_b3_size - - do j = 1, nucl_num - tmp_alphaj = dble(List_all_comb_b3(j,i)) * j1b_pen(j) - - List_all_comb_b3_expo(i) += tmp_alphaj - List_all_comb_b3_cent(1,i) += tmp_alphaj * nucl_coord(j,1) - List_all_comb_b3_cent(2,i) += tmp_alphaj * nucl_coord(j,2) - List_all_comb_b3_cent(3,i) += tmp_alphaj * nucl_coord(j,3) - - enddo - - ASSERT(List_all_comb_b3_expo(i) .gt. 0d0) - if(List_all_comb_b3_expo(i) .lt. 1d-10) cycle - - List_all_comb_b3_cent(1,i) = List_all_comb_b3_cent(1,i) / List_all_comb_b3_expo(i) - List_all_comb_b3_cent(2,i) = List_all_comb_b3_cent(2,i) / List_all_comb_b3_expo(i) - List_all_comb_b3_cent(3,i) = List_all_comb_b3_cent(3,i) / List_all_comb_b3_expo(i) - enddo - - ! --- - - do i = 1, List_all_comb_b3_size - - do j = 2, nucl_num, 1 - tmp_alphaj = dble(List_all_comb_b3(j,i)) * j1b_pen(j) - do k = 1, j-1, 1 - tmp_alphak = dble(List_all_comb_b3(k,i)) * j1b_pen(k) - - List_all_comb_b3_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & - + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & - + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) - enddo - enddo - - if(List_all_comb_b3_expo(i) .lt. 1d-10) cycle - - List_all_comb_b3_coef(i) = List_all_comb_b3_coef(i) / List_all_comb_b3_expo(i) - enddo - - ! --- - - do i = 1, List_all_comb_b3_size - - facto = 1.d0 - phase = 0 - do j = 1, nucl_num - tmp_alphaj = dble(List_all_comb_b3(j,i)) - - facto *= 2.d0 / (gamma(tmp_alphaj+1.d0) * gamma(3.d0-tmp_alphaj)) - phase += List_all_comb_b3(j,i) - enddo - - List_all_comb_b3_coef(i) = (-1.d0)**dble(phase) * facto * dexp(-List_all_comb_b3_coef(i)) - enddo - -END_PROVIDER - -! --- - BEGIN_PROVIDER [ double precision, int2_grad1u_grad2u_j1b, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC diff --git a/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f index c789ad36..9dd715e2 100644 --- a/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f +++ b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f @@ -1,110 +1,6 @@ ! --- -BEGIN_PROVIDER [ integer, List_all_comb_b2_size] - - implicit none - - List_all_comb_b2_size = 2**nucl_num - -END_PROVIDER - -! --- - -BEGIN_PROVIDER [ integer, List_all_comb_b2, (nucl_num, List_all_comb_b2_size)] - - implicit none - integer :: i, j - - if(nucl_num .gt. 32) then - print *, ' nucl_num = ', nucl_num, '> 32' - stop - endif - - List_all_comb_b2 = 0 - - do i = 0, List_all_comb_b2_size-1 - do j = 0, nucl_num-1 - if (btest(i,j)) then - List_all_comb_b2(j+1,i+1) = 1 - endif - enddo - enddo - -END_PROVIDER - -! --- - - BEGIN_PROVIDER [ double precision, List_all_comb_b2_coef, ( List_all_comb_b2_size)] -&BEGIN_PROVIDER [ double precision, List_all_comb_b2_expo, ( List_all_comb_b2_size)] -&BEGIN_PROVIDER [ double precision, List_all_comb_b2_cent, (3, List_all_comb_b2_size)] - - implicit none - integer :: i, j, k, phase - double precision :: tmp_alphaj, tmp_alphak - - provide j1b_pen - - List_all_comb_b2_coef = 0.d0 - List_all_comb_b2_expo = 0.d0 - List_all_comb_b2_cent = 0.d0 - - do i = 1, List_all_comb_b2_size - - do j = 1, nucl_num - tmp_alphaj = dble(List_all_comb_b2(j,i)) * j1b_pen(j) - - List_all_comb_b2_expo(i) += tmp_alphaj - List_all_comb_b2_cent(1,i) += tmp_alphaj * nucl_coord(j,1) - List_all_comb_b2_cent(2,i) += tmp_alphaj * nucl_coord(j,2) - List_all_comb_b2_cent(3,i) += tmp_alphaj * nucl_coord(j,3) - - enddo - - ASSERT(List_all_comb_b2_expo(i) .gt. 0d0) - if(List_all_comb_b2_expo(i) .lt. 1d-10) cycle - - List_all_comb_b2_cent(1,i) = List_all_comb_b2_cent(1,i) / List_all_comb_b2_expo(i) - List_all_comb_b2_cent(2,i) = List_all_comb_b2_cent(2,i) / List_all_comb_b2_expo(i) - List_all_comb_b2_cent(3,i) = List_all_comb_b2_cent(3,i) / List_all_comb_b2_expo(i) - enddo - - ! --- - - do i = 1, List_all_comb_b2_size - - do j = 2, nucl_num, 1 - tmp_alphaj = dble(List_all_comb_b2(j,i)) * j1b_pen(j) - do k = 1, j-1, 1 - tmp_alphak = dble(List_all_comb_b2(k,i)) * j1b_pen(k) - - List_all_comb_b2_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & - + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & - + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) - enddo - enddo - - if(List_all_comb_b2_expo(i) .lt. 1d-10) cycle - - List_all_comb_b2_coef(i) = List_all_comb_b2_coef(i) / List_all_comb_b2_expo(i) - enddo - - ! --- - - do i = 1, List_all_comb_b2_size - - phase = 0 - do j = 1, nucl_num - phase += List_all_comb_b2(j,i) - enddo - - List_all_comb_b2_coef(i) = (-1.d0)**dble(phase) * dexp(-List_all_comb_b2_coef(i)) - enddo - -END_PROVIDER - -! --- - BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC @@ -157,7 +53,6 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po tmp(j,i,ipoint) += coef * (int_mu - int_coulomb) enddo - enddo enddo enddo diff --git a/src/ao_many_one_e_ints/listj1b_product_to_sum.irp.f b/src/ao_many_one_e_ints/listj1b_product_to_sum.irp.f new file mode 100644 index 00000000..ff9f8ae5 --- /dev/null +++ b/src/ao_many_one_e_ints/listj1b_product_to_sum.irp.f @@ -0,0 +1,227 @@ + +! --- + +BEGIN_PROVIDER [ integer, List_all_comb_b2_size] + + implicit none + + List_all_comb_b2_size = 2**nucl_num + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ integer, List_all_comb_b2, (nucl_num, List_all_comb_b2_size)] + + implicit none + integer :: i, j + + if(nucl_num .gt. 32) then + print *, ' nucl_num = ', nucl_num, '> 32' + stop + endif + + List_all_comb_b2 = 0 + + do i = 0, List_all_comb_b2_size-1 + do j = 0, nucl_num-1 + if (btest(i,j)) then + List_all_comb_b2(j+1,i+1) = 1 + endif + enddo + enddo + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [ double precision, List_all_comb_b2_coef, ( List_all_comb_b2_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b2_expo, ( List_all_comb_b2_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b2_cent, (3, List_all_comb_b2_size)] + + implicit none + integer :: i, j, k, phase + double precision :: tmp_alphaj, tmp_alphak + double precision :: tmp_cent_x, tmp_cent_y, tmp_cent_z + + provide j1b_pen + + List_all_comb_b2_coef = 0.d0 + List_all_comb_b2_expo = 0.d0 + List_all_comb_b2_cent = 0.d0 + + do i = 1, List_all_comb_b2_size + + tmp_cent_x = 0.d0 + tmp_cent_y = 0.d0 + tmp_cent_z = 0.d0 + do j = 1, nucl_num + tmp_alphaj = dble(List_all_comb_b2(j,i)) * j1b_pen(j) + List_all_comb_b2_expo(i) += tmp_alphaj + tmp_cent_x += tmp_alphaj * nucl_coord(j,1) + tmp_cent_y += tmp_alphaj * nucl_coord(j,2) + tmp_cent_z += tmp_alphaj * nucl_coord(j,3) + enddo + + ASSERT(List_all_comb_b2_expo(i) .gt. 0d0) + if(List_all_comb_b2_expo(i) .lt. 1d-10) cycle + + List_all_comb_b2_cent(1,i) = tmp_cent_x / List_all_comb_b2_expo(i) + List_all_comb_b2_cent(2,i) = tmp_cent_y / List_all_comb_b2_expo(i) + List_all_comb_b2_cent(3,i) = tmp_cent_z / List_all_comb_b2_expo(i) + enddo + + ! --- + + do i = 1, List_all_comb_b2_size + + do j = 2, nucl_num, 1 + tmp_alphaj = dble(List_all_comb_b2(j,i)) * j1b_pen(j) + do k = 1, j-1, 1 + tmp_alphak = dble(List_all_comb_b2(k,i)) * j1b_pen(k) + + List_all_comb_b2_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & + + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & + + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) + enddo + enddo + + if(List_all_comb_b2_expo(i) .lt. 1d-10) cycle + + List_all_comb_b2_coef(i) = List_all_comb_b2_coef(i) / List_all_comb_b2_expo(i) + enddo + + ! --- + + do i = 1, List_all_comb_b2_size + + phase = 0 + do j = 1, nucl_num + phase += List_all_comb_b2(j,i) + enddo + + List_all_comb_b2_coef(i) = (-1.d0)**dble(phase) * dexp(-List_all_comb_b2_coef(i)) + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ integer, List_all_comb_b3_size] + + implicit none + + List_all_comb_b3_size = 3**nucl_num + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ integer, List_all_comb_b3, (nucl_num, List_all_comb_b3_size)] + + implicit none + integer :: i, j, ii, jj + integer, allocatable :: M(:,:), p(:) + + if(nucl_num .gt. 32) then + print *, ' nucl_num = ', nucl_num, '> 32' + stop + endif + + List_all_comb_b3(:,:) = 0 + List_all_comb_b3(:,List_all_comb_b3_size) = 2 + + allocate(p(nucl_num)) + p = 0 + + do i = 2, List_all_comb_b3_size-1 + do j = 1, nucl_num + + ii = 0 + do jj = 1, j-1, 1 + ii = ii + p(jj) * 3**(jj-1) + enddo + p(j) = modulo(i-1-ii, 3**j) / 3**(j-1) + + List_all_comb_b3(j,i) = p(j) + enddo + enddo + +END_PROVIDER + +! --- + + BEGIN_PROVIDER [ double precision, List_all_comb_b3_coef, ( List_all_comb_b3_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b3_expo, ( List_all_comb_b3_size)] +&BEGIN_PROVIDER [ double precision, List_all_comb_b3_cent, (3, List_all_comb_b3_size)] + + implicit none + integer :: i, j, k, phase + double precision :: tmp_alphaj, tmp_alphak, facto + + provide j1b_pen + + List_all_comb_b3_coef = 0.d0 + List_all_comb_b3_expo = 0.d0 + List_all_comb_b3_cent = 0.d0 + + do i = 1, List_all_comb_b3_size + + do j = 1, nucl_num + tmp_alphaj = dble(List_all_comb_b3(j,i)) * j1b_pen(j) + + List_all_comb_b3_expo(i) += tmp_alphaj + List_all_comb_b3_cent(1,i) += tmp_alphaj * nucl_coord(j,1) + List_all_comb_b3_cent(2,i) += tmp_alphaj * nucl_coord(j,2) + List_all_comb_b3_cent(3,i) += tmp_alphaj * nucl_coord(j,3) + + enddo + + ASSERT(List_all_comb_b3_expo(i) .gt. 0d0) + if(List_all_comb_b3_expo(i) .lt. 1d-10) cycle + + List_all_comb_b3_cent(1,i) = List_all_comb_b3_cent(1,i) / List_all_comb_b3_expo(i) + List_all_comb_b3_cent(2,i) = List_all_comb_b3_cent(2,i) / List_all_comb_b3_expo(i) + List_all_comb_b3_cent(3,i) = List_all_comb_b3_cent(3,i) / List_all_comb_b3_expo(i) + enddo + + ! --- + + do i = 1, List_all_comb_b3_size + + do j = 2, nucl_num, 1 + tmp_alphaj = dble(List_all_comb_b3(j,i)) * j1b_pen(j) + do k = 1, j-1, 1 + tmp_alphak = dble(List_all_comb_b3(k,i)) * j1b_pen(k) + + List_all_comb_b3_coef(i) += tmp_alphaj * tmp_alphak * ( (nucl_coord(j,1) - nucl_coord(k,1)) * (nucl_coord(j,1) - nucl_coord(k,1)) & + + (nucl_coord(j,2) - nucl_coord(k,2)) * (nucl_coord(j,2) - nucl_coord(k,2)) & + + (nucl_coord(j,3) - nucl_coord(k,3)) * (nucl_coord(j,3) - nucl_coord(k,3)) ) + enddo + enddo + + if(List_all_comb_b3_expo(i) .lt. 1d-10) cycle + + List_all_comb_b3_coef(i) = List_all_comb_b3_coef(i) / List_all_comb_b3_expo(i) + enddo + + ! --- + + do i = 1, List_all_comb_b3_size + + facto = 1.d0 + phase = 0 + do j = 1, nucl_num + tmp_alphaj = dble(List_all_comb_b3(j,i)) + + facto *= 2.d0 / (gamma(tmp_alphaj+1.d0) * gamma(3.d0-tmp_alphaj)) + phase += List_all_comb_b3(j,i) + enddo + + List_all_comb_b3_coef(i) = (-1.d0)**dble(phase) * facto * dexp(-List_all_comb_b3_coef(i)) + enddo + +END_PROVIDER + +! --- + diff --git a/src/ao_one_e_ints/pot_ao_erf_ints.irp.f b/src/ao_one_e_ints/pot_ao_erf_ints.irp.f index d4ef4b28..e0d6254a 100644 --- a/src/ao_one_e_ints/pot_ao_erf_ints.irp.f +++ b/src/ao_one_e_ints/pot_ao_erf_ints.irp.f @@ -82,7 +82,7 @@ double precision function NAI_pol_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, double precision, external :: NAI_pol_mult_erf_with1s, NAI_pol_mult_erf_ao - ASSERT(beta .lt. 0.d0) + ASSERT(beta .ge. 0.d0) if(beta .lt. 1d-10) then NAI_pol_mult_erf_ao_with1s = NAI_pol_mult_erf_ao(i_ao, j_ao, mu_in, C_center) return @@ -234,17 +234,17 @@ double precision function NAI_pol_mult_erf_with1s( A1_center, A2_center, power_A double precision :: rint - ! e^{-alpha1 (r - A1)^2} e^{-alpha2 (r - A2)^2} = e^{K12} e^{-alpha12 (r - A12)^2} + ! e^{-alpha1 (r - A1)^2} e^{-alpha2 (r - A2)^2} = e^{-K12} e^{-alpha12 (r - A12)^2} alpha12 = alpha1 + alpha2 alpha12_inv = 1.d0 / alpha12 alpha12_inv_2 = 0.5d0 * alpha12_inv rho12 = alpha1 * alpha2 * alpha12_inv - - dist12 = 0.d0 - do i = 1, 3 - A12_center(i) = (alpha1 * A1_center(i) + alpha2 * A2_center(i)) * alpha12_inv - dist12 += (A1_center(i) - A2_center(i)) * (A1_center(i) - A2_center(i)) - enddo + A12_center(1) = (alpha1 * A1_center(1) + alpha2 * A2_center(1)) * alpha12_inv + A12_center(2) = (alpha1 * A1_center(2) + alpha2 * A2_center(2)) * alpha12_inv + A12_center(3) = (alpha1 * A1_center(3) + alpha2 * A2_center(3)) * alpha12_inv + dist12 = ( (A1_center(1) - A2_center(1)) * (A1_center(1) - A2_center(1)) & + + (A1_center(2) - A2_center(2)) * (A1_center(2) - A2_center(2)) & + + (A1_center(3) - A2_center(3)) * (A1_center(3) - A2_center(3)) ) const_factor12 = dist12 * rho12 if(const_factor12 > 80.d0) then @@ -254,19 +254,17 @@ double precision function NAI_pol_mult_erf_with1s( A1_center, A2_center, power_A ! --- - ! e^{K12} e^{-alpha12 (r - A12)^2} e^{-beta (r - B)^2} = e^{K} e^{-p (r - P)^2} - p = alpha12 + beta - p_inv = 1.d0 / p - p_inv_2 = 0.5d0 * p_inv - rho = alpha12 * beta * p_inv - - dist = 0.d0 - dist_integral = 0.d0 - do i = 1, 3 - P_center(i) = (alpha12 * A12_center(i) + beta * B_center(i)) * p_inv - dist += (A12_center(i) - B_center(i)) * (A12_center(i) - B_center(i)) - dist_integral += (P_center(i) - C_center(i)) * (P_center(i) - C_center(i)) - enddo + ! e^{-K12} e^{-alpha12 (r - A12)^2} e^{-beta (r - B)^2} = e^{-K} e^{-p (r - P)^2} + p = alpha12 + beta + p_inv = 1.d0 / p + p_inv_2 = 0.5d0 * p_inv + rho = alpha12 * beta * p_inv + P_center(1) = (alpha12 * A12_center(1) + beta * B_center(1)) * p_inv + P_center(2) = (alpha12 * A12_center(2) + beta * B_center(2)) * p_inv + P_center(3) = (alpha12 * A12_center(3) + beta * B_center(3)) * p_inv + dist = ( (A12_center(1) - B_center(1)) * (A12_center(1) - B_center(1)) & + + (A12_center(2) - B_center(2)) * (A12_center(2) - B_center(2)) & + + (A12_center(3) - B_center(3)) * (A12_center(3) - B_center(3)) ) const_factor = const_factor12 + dist * rho if(const_factor > 80.d0) then @@ -274,6 +272,12 @@ double precision function NAI_pol_mult_erf_with1s( A1_center, A2_center, power_A return endif + dist_integral = 0.d0 + do i = 1, 3 + dist_integral += (P_center(i) - C_center(i)) * (P_center(i) - C_center(i)) + enddo + + ! --- p_new = mu_in / dsqrt(p + mu_in * mu_in) diff --git a/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f index 54026349..2bcd1358 100644 --- a/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f +++ b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f @@ -6,15 +6,24 @@ program debug_integ_jmu_modif implicit none my_grid_becke = .True. - !my_n_pt_r_grid = 30 - !my_n_pt_a_grid = 50 - my_n_pt_r_grid = 100 - my_n_pt_a_grid = 170 + + my_n_pt_r_grid = 30 + my_n_pt_a_grid = 50 + !my_n_pt_r_grid = 100 + !my_n_pt_a_grid = 170 touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid PROVIDE mu_erf j1b_pen - call test_grad_1_u_ij_mu() + call test_j1b_nucl() + call test_grad_j1b_nucl() + call test_lapl_j1b_nucl() + + call test_list_b2() + call test_list_b3() + + !call test_grad_1_u_ij_mu() + !call test_gradu_squared_u_ij_mu() end @@ -23,16 +32,13 @@ end subroutine test_grad_1_u_ij_mu() implicit none - integer :: i, j, ipoint - double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num - double precision, external :: num_grad_1_u_ij_mu_x - double precision, external :: num_grad_1_u_ij_mu_y - double precision, external :: num_grad_1_u_ij_mu_z + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: integ(3) print*, ' test_grad_1_u_ij_mu ...' - PROVIDE grad_1_u_ij_mu -! PROVIDE num_grad_1_u_ij_mu + PROVIDE grad_1_u_ij_mu eps_ij = 1d-6 acc_tot = 0.d0 @@ -41,9 +47,10 @@ subroutine test_grad_1_u_ij_mu() do j = 1, ao_num do i = 1, ao_num - i_exc = grad_1_u_ij_mu(i,j,ipoint,1) - !i_num = num_grad_1_u_ij_mu(i,j,ipoint,1) - i_num = num_grad_1_u_ij_mu_x(i, j, ipoint) + call num_grad_1_u_ij_mu(i, j, ipoint, integ) + + i_exc = grad_1_u_ij_mu(i,j,ipoint,1) + i_num = integ(1) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then print *, ' problem in x part of grad_1_u_ij_mu on', i, j, ipoint @@ -52,10 +59,10 @@ subroutine test_grad_1_u_ij_mu() print *, ' diff = ', acc_ij endif acc_tot += acc_ij + normalz += dabs(i_num) - i_exc = grad_1_u_ij_mu(i,j,ipoint,2) - !i_num = num_grad_1_u_ij_mu(i,j,ipoint,2) - i_num = num_grad_1_u_ij_mu_y(i, j, ipoint) + i_exc = grad_1_u_ij_mu(i,j,ipoint,2) + i_num = integ(2) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then print *, ' problem in y part of grad_1_u_ij_mu on', i, j, ipoint @@ -64,30 +71,316 @@ subroutine test_grad_1_u_ij_mu() print *, ' diff = ', acc_ij endif acc_tot += acc_ij + normalz += dabs(i_num) - i_exc = grad_1_u_ij_mu(i,j,ipoint,3) - !i_num = num_grad_1_u_ij_mu(i,j,ipoint,3) - i_num = num_grad_1_u_ij_mu_z(i, j, ipoint) + i_exc = grad_1_u_ij_mu(i,j,ipoint,3) + i_num = integ(3) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in y part of grad_1_u_ij_mu on', i, j, ipoint + print *, ' problem in z part of grad_1_u_ij_mu on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij endif acc_tot += acc_ij + normalz += dabs(i_num) enddo enddo enddo - acc_tot = acc_tot / dble(ao_num*ao_num*n_points_final_grid) + acc_tot = acc_tot / normalz print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz return end subroutine test_grad_1_u_ij_mu ! --- +subroutine test_gradu_squared_u_ij_mu() + implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision, external :: num_gradu_squared_u_ij_mu + + print*, ' test_gradu_squared_u_ij_mu ...' + + PROVIDE gradu_squared_u_ij_mu + + eps_ij = 1d-6 + acc_tot = 0.d0 + + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + + i_exc = gradu_squared_u_ij_mu(i,j,ipoint) + i_num = num_gradu_squared_u_ij_mu(i, j, ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in gradu_squared_u_ij_mu on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_num) + + enddo + enddo + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_gradu_squared_u_ij_mu + +! --- + +subroutine test_j1b_nucl() + + implicit none + integer :: ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: r(3) + double precision, external :: j1b_nucl + + print*, ' test_j1b_nucl ...' + + PROVIDE v_1b + + eps_ij = 1d-7 + acc_tot = 0.d0 + + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + i_exc = v_1b(ipoint) + i_num = j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in v_1b on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_j1b_nucl + +! --- + +subroutine test_grad_j1b_nucl() + + implicit none + integer :: ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: r(3) + double precision, external :: grad_x_j1b_nucl + double precision, external :: grad_y_j1b_nucl + double precision, external :: grad_z_j1b_nucl + + print*, ' test_grad_j1b_nucl ...' + + PROVIDE v_1b_grad + + eps_ij = 1d-6 + acc_tot = 0.d0 + + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + i_exc = v_1b_grad(1,ipoint) + i_num = grad_x_j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in x of v_1b_grad on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + i_exc = v_1b_grad(2,ipoint) + i_num = grad_y_j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in y of v_1b_grad on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + i_exc = v_1b_grad(3,ipoint) + i_num = grad_z_j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in z of v_1b_grad on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_grad_j1b_nucl + +! --- + +subroutine test_lapl_j1b_nucl() + + implicit none + integer :: ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: r(3) + double precision, external :: lapl_j1b_nucl + + print*, ' test_lapl_j1b_nucl ...' + + PROVIDE v_1b_lapl + + eps_ij = 1d-5 + acc_tot = 0.d0 + + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + i_exc = v_1b_lapl(ipoint) + i_num = lapl_j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in v_1b_lapl on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_lapl_j1b_nucl + +! --- + +subroutine test_list_b2() + + implicit none + integer :: ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: r(3) + double precision, external :: j1b_nucl + + print*, ' test_list_b2 ...' + + PROVIDE v_1b_list_b2 + + eps_ij = 1d-7 + acc_tot = 0.d0 + + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + i_exc = v_1b_list_b2(ipoint) + i_num = j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in list_b2 on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_list_b2 + +! --- + +subroutine test_list_b3() + + implicit none + integer :: ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_tmp, i_num, normalz + double precision :: r(3) + double precision, external :: j1b_nucl + + print*, ' test_list_b3 ...' + + PROVIDE v_1b_list_b3 + + eps_ij = 1d-7 + acc_tot = 0.d0 + + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + i_exc = v_1b_list_b3(ipoint) + i_tmp = j1b_nucl(r) + i_num = i_tmp * i_tmp + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in list_b3 on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_list_b3 + +! --- diff --git a/src/non_h_ints_mu/grad_squared.irp.f b/src/non_h_ints_mu/grad_squared.irp.f index 08152ddf..6ef34118 100644 --- a/src/non_h_ints_mu/grad_squared.irp.f +++ b/src/non_h_ints_mu/grad_squared.irp.f @@ -27,9 +27,10 @@ BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num,n_poin PROVIDE j1b_type j1b_pen if(j1b_type .eq. 3) then + ! v1_1b^2 \int d2 \phi_i(2) \phi_j(2) \frac{-[1 - \erf(\mu r12)]^2}{4} v2_1b^2 do ipoint = 1, n_points_final_grid - tmp = fact3_j12(ipoint) + tmp = v_1b(ipoint) * v_1b(ipoint) do j = 1, ao_num do i = 1, ao_num gradu_squared_u_ij_mu(j,i,ipoint) += tmp * int2_grad1u_grad2u_j1b(i,j,ipoint) diff --git a/src/non_h_ints_mu/j12_nucl_utils.irp.f b/src/non_h_ints_mu/j12_nucl_utils.irp.f new file mode 100644 index 00000000..344b1fa8 --- /dev/null +++ b/src/non_h_ints_mu/j12_nucl_utils.irp.f @@ -0,0 +1,598 @@ + +! --- + +BEGIN_PROVIDER [ double precision, v_1b, (n_points_final_grid)] + + implicit none + integer :: ipoint, i, j, phase + double precision :: x, y, z, dx, dy, dz + double precision :: a, d, e, fact_r + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + fact_r = 1.d0 + do j = 1, nucl_num + a = j1b_pen(j) + dx = x - nucl_coord(j,1) + dy = y - nucl_coord(j,2) + dz = z - nucl_coord(j,3) + d = dx*dx + dy*dy + dz*dz + e = 1.d0 - dexp(-a*d) + + fact_r = fact_r * e + enddo + + v_1b(ipoint) = fact_r + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, v_1b_grad, (3, n_points_final_grid)] + + implicit none + integer :: ipoint, i, j, phase + double precision :: x, y, z, dx, dy, dz + double precision :: a, d, e + double precision :: fact_x, fact_y, fact_z + double precision :: ax_der, ay_der, az_der, a_expo + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + fact_x = 0.d0 + fact_y = 0.d0 + fact_z = 0.d0 + do i = 1, List_all_comb_b2_size + + phase = 0 + a_expo = 0.d0 + ax_der = 0.d0 + ay_der = 0.d0 + az_der = 0.d0 + do j = 1, nucl_num + a = dble(List_all_comb_b2(j,i)) * j1b_pen(j) + dx = x - nucl_coord(j,1) + dy = y - nucl_coord(j,2) + dz = z - nucl_coord(j,3) + + phase += List_all_comb_b2(j,i) + a_expo += a * (dx*dx + dy*dy + dz*dz) + ax_der += a * dx + ay_der += a * dy + az_der += a * dz + enddo + e = -2.d0 * (-1.d0)**dble(phase) * dexp(-a_expo) + + fact_x += e * ax_der + fact_y += e * ay_der + fact_z += e * az_der + enddo + + v_1b_grad(1,ipoint) = fact_x + v_1b_grad(2,ipoint) = fact_y + v_1b_grad(3,ipoint) = fact_z + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, v_1b_lapl, (n_points_final_grid)] + + implicit none + integer :: ipoint, i, j, phase + double precision :: x, y, z, dx, dy, dz + double precision :: a, d, e, b + double precision :: fact_r + double precision :: ax_der, ay_der, az_der, a_expo + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + fact_r = 0.d0 + do i = 1, List_all_comb_b2_size + + phase = 0 + b = 0.d0 + a_expo = 0.d0 + ax_der = 0.d0 + ay_der = 0.d0 + az_der = 0.d0 + do j = 1, nucl_num + a = dble(List_all_comb_b2(j,i)) * j1b_pen(j) + dx = x - nucl_coord(j,1) + dy = y - nucl_coord(j,2) + dz = z - nucl_coord(j,3) + + phase += List_all_comb_b2(j,i) + b += a + a_expo += a * (dx*dx + dy*dy + dz*dz) + ax_der += a * dx + ay_der += a * dy + az_der += a * dz + enddo + + fact_r += (-1.d0)**dble(phase) * (-6.d0*b + 4.d0*(ax_der*ax_der + ay_der*ay_der + az_der*az_der) ) * dexp(-a_expo) + enddo + + v_1b_lapl(ipoint) = fact_r + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, v_1b_list_b2, (n_points_final_grid)] + + implicit none + integer :: i, ipoint + double precision :: x, y, z, coef, expo, dx, dy, dz + double precision :: fact_r + + PROVIDE List_all_comb_b2_coef List_all_comb_b2_expo List_all_comb_b2_cent + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + fact_r = 0.d0 + do i = 1, List_all_comb_b2_size + + coef = List_all_comb_b2_coef(i) + expo = List_all_comb_b2_expo(i) + + dx = x - List_all_comb_b2_cent(1,i) + dy = y - List_all_comb_b2_cent(2,i) + dz = z - List_all_comb_b2_cent(3,i) + + fact_r += coef * dexp(-expo * (dx*dx + dy*dy + dz*dz)) + enddo + + v_1b_list_b2(ipoint) = fact_r + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, v_1b_list_b3, (n_points_final_grid)] + + implicit none + integer :: i, ipoint + double precision :: x, y, z, coef, expo, dx, dy, dz + double precision :: fact_r + + PROVIDE List_all_comb_b3_coef List_all_comb_b3_expo List_all_comb_b3_cent + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + fact_r = 0.d0 + do i = 1, List_all_comb_b3_size + + coef = List_all_comb_b3_coef(i) + expo = List_all_comb_b3_expo(i) + + dx = x - List_all_comb_b3_cent(1,i) + dy = y - List_all_comb_b3_cent(2,i) + dz = z - List_all_comb_b3_cent(3,i) + + fact_r += coef * dexp(-expo * (dx*dx + dy*dy + dz*dz)) + enddo + + v_1b_list_b3(ipoint) = fact_r + enddo + +END_PROVIDER + +! --- + +double precision function jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision, external :: j12_mu, j12_nucl + + jmu_modif = j12_mu(r1, r2) * j12_nucl(r1, r2) + + return +end function jmu_modif + +! --- + +double precision function j12_mu(r1, r2) + + include 'constants.include.F' + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: mu_r12, r12 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + mu_r12 = mu_erf * r12 + + j12_mu = 0.5d0 * r12 * (1.d0 - derf(mu_r12)) - inv_sq_pi_2 * dexp(-mu_r12*mu_r12) / mu_erf + + return +end function j12_mu + +! --- + +double precision function j1b_nucl(r) + + implicit none + double precision, intent(in) :: r(3) + integer :: i + double precision :: a, d, e + + j1b_nucl = 1.d0 + + do i = 1, nucl_num + a = j1b_pen(i) + d = ( (r(1) - nucl_coord(i,1)) * (r(1) - nucl_coord(i,1)) & + + (r(2) - nucl_coord(i,2)) * (r(2) - nucl_coord(i,2)) & + + (r(3) - nucl_coord(i,3)) * (r(3) - nucl_coord(i,3)) ) + e = 1.d0 - exp(-a*d) + + j1b_nucl = j1b_nucl * e + enddo + + return +end function j1b_nucl + +! --- + +double precision function j12_nucl(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision, external :: j1b_nucl + + j12_nucl = j1b_nucl(r1) * j1b_nucl(r2) + + return +end function j12_nucl + +! --- + +! --------------------------------------------------------------------------------------- + +double precision function grad_x_j1b_nucl(r) + + implicit none + double precision, intent(in) :: r(3) + double precision :: r_eps(3), eps, fp, fm, delta + double precision, external :: j1b_nucl + + eps = 1d-6 + r_eps = r + delta = max(eps, dabs(eps*r(1))) + + r_eps(1) = r_eps(1) + delta + fp = j1b_nucl(r_eps) + r_eps(1) = r_eps(1) - 2.d0 * delta + fm = j1b_nucl(r_eps) + + grad_x_j1b_nucl = 0.5d0 * (fp - fm) / delta + + return +end function grad_x_j1b_nucl + +double precision function grad_y_j1b_nucl(r) + + implicit none + double precision, intent(in) :: r(3) + double precision :: r_eps(3), eps, fp, fm, delta + double precision, external :: j1b_nucl + + eps = 1d-6 + r_eps = r + delta = max(eps, dabs(eps*r(2))) + + r_eps(2) = r_eps(2) + delta + fp = j1b_nucl(r_eps) + r_eps(2) = r_eps(2) - 2.d0 * delta + fm = j1b_nucl(r_eps) + + grad_y_j1b_nucl = 0.5d0 * (fp - fm) / delta + + return +end function grad_y_j1b_nucl + +double precision function grad_z_j1b_nucl(r) + + implicit none + double precision, intent(in) :: r(3) + double precision :: r_eps(3), eps, fp, fm, delta + double precision, external :: j1b_nucl + + eps = 1d-6 + r_eps = r + delta = max(eps, dabs(eps*r(3))) + + r_eps(3) = r_eps(3) + delta + fp = j1b_nucl(r_eps) + r_eps(3) = r_eps(3) - 2.d0 * delta + fm = j1b_nucl(r_eps) + + grad_z_j1b_nucl = 0.5d0 * (fp - fm) / delta + + return +end function grad_z_j1b_nucl + +! --------------------------------------------------------------------------------------- + +! --- + +double precision function lapl_j1b_nucl(r) + + implicit none + double precision, intent(in) :: r(3) + double precision :: r_eps(3), eps, fp, fm, delta + double precision, external :: grad_x_j1b_nucl + double precision, external :: grad_y_j1b_nucl + double precision, external :: grad_z_j1b_nucl + + eps = 1d-5 + r_eps = r + + lapl_j1b_nucl = 0.d0 + + ! --- + + delta = max(eps, dabs(eps*r(1))) + r_eps(1) = r_eps(1) + delta + fp = grad_x_j1b_nucl(r_eps) + r_eps(1) = r_eps(1) - 2.d0 * delta + fm = grad_x_j1b_nucl(r_eps) + r_eps(1) = r_eps(1) + delta + + lapl_j1b_nucl += 0.5d0 * (fp - fm) / delta + + ! --- + + delta = max(eps, dabs(eps*r(2))) + r_eps(2) = r_eps(2) + delta + fp = grad_y_j1b_nucl(r_eps) + r_eps(2) = r_eps(2) - 2.d0 * delta + fm = grad_y_j1b_nucl(r_eps) + r_eps(2) = r_eps(2) + delta + + lapl_j1b_nucl += 0.5d0 * (fp - fm) / delta + + ! --- + + delta = max(eps, dabs(eps*r(3))) + r_eps(3) = r_eps(3) + delta + fp = grad_z_j1b_nucl(r_eps) + r_eps(3) = r_eps(3) - 2.d0 * delta + fm = grad_z_j1b_nucl(r_eps) + r_eps(3) = r_eps(3) + delta + + lapl_j1b_nucl += 0.5d0 * (fp - fm) / delta + + ! --- + + return +end function lapl_j1b_nucl + +! --- + +! --------------------------------------------------------------------------------------- + +double precision function grad1_x_jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: jmu_modif + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(1))) + + r1_eps(1) = r1_eps(1) + delta + fp = jmu_modif(r1_eps, r2) + r1_eps(1) = r1_eps(1) - 2.d0 * delta + fm = jmu_modif(r1_eps, r2) + + grad1_x_jmu_modif = 0.5d0 * (fp - fm) / delta + + return +end function grad1_x_jmu_modif + +double precision function grad1_y_jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: jmu_modif + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(2))) + + r1_eps(2) = r1_eps(2) + delta + fp = jmu_modif(r1_eps, r2) + r1_eps(2) = r1_eps(2) - 2.d0 * delta + fm = jmu_modif(r1_eps, r2) + + grad1_y_jmu_modif = 0.5d0 * (fp - fm) / delta + + return +end function grad1_y_jmu_modif + +double precision function grad1_z_jmu_modif(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: jmu_modif + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(3))) + + r1_eps(3) = r1_eps(3) + delta + fp = jmu_modif(r1_eps, r2) + r1_eps(3) = r1_eps(3) - 2.d0 * delta + fm = jmu_modif(r1_eps, r2) + + grad1_z_jmu_modif = 0.5d0 * (fp - fm) / delta + + return +end function grad1_z_jmu_modif + +! --------------------------------------------------------------------------------------- + +! --- + +! --------------------------------------------------------------------------------------- + +double precision function grad1_x_j12_mu_num(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: j12_mu + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(1))) + + r1_eps(1) = r1_eps(1) + delta + fp = j12_mu(r1_eps, r2) + r1_eps(1) = r1_eps(1) - 2.d0 * delta + fm = j12_mu(r1_eps, r2) + + grad1_x_j12_mu_num = 0.5d0 * (fp - fm) / delta + + return +end function grad1_x_j12_mu_num + +double precision function grad1_y_j12_mu_num(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: j12_mu + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(2))) + + r1_eps(2) = r1_eps(2) + delta + fp = j12_mu(r1_eps, r2) + r1_eps(2) = r1_eps(2) - 2.d0 * delta + fm = j12_mu(r1_eps, r2) + + grad1_y_j12_mu_num = 0.5d0 * (fp - fm) / delta + + return +end function grad1_y_j12_mu_num + +double precision function grad1_z_j12_mu_num(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r1_eps(3), eps, fp, fm, delta + double precision, external :: j12_mu + + eps = 1d-7 + r1_eps = r1 + delta = max(eps, dabs(eps*r1(3))) + + r1_eps(3) = r1_eps(3) + delta + fp = j12_mu(r1_eps, r2) + r1_eps(3) = r1_eps(3) - 2.d0 * delta + fm = j12_mu(r1_eps, r2) + + grad1_z_j12_mu_num = 0.5d0 * (fp - fm) / delta + + return +end function grad1_z_j12_mu_num + +! --------------------------------------------------------------------------------------- + +! --- + +! --------------------------------------------------------------------------------------- + +double precision function grad1_x_j12_mu_exc(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r12 + + grad1_x_j12_mu_exc = 0.d0 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + if(r12 .lt. 1d-10) return + + grad1_x_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(1) - r2(1)) / r12 + + return +end function grad1_x_j12_mu_exc + +double precision function grad1_y_j12_mu_exc(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r12 + + grad1_y_j12_mu_exc = 0.d0 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + if(r12 .lt. 1d-10) return + + grad1_y_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(2) - r2(2)) / r12 + + return +end function grad1_y_j12_mu_exc + +double precision function grad1_z_j12_mu_exc(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + double precision :: r12 + + grad1_z_j12_mu_exc = 0.d0 + + r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + if(r12 .lt. 1d-10) return + + grad1_z_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(3) - r2(3)) / r12 + + return +end function grad1_z_j12_mu_exc + +! --------------------------------------------------------------------------------------- + +! --- + + diff --git a/src/non_h_ints_mu/jmu_modif.irp.f b/src/non_h_ints_mu/jmu_modif.irp.f deleted file mode 100644 index 59a4a104..00000000 --- a/src/non_h_ints_mu/jmu_modif.irp.f +++ /dev/null @@ -1,266 +0,0 @@ - -! --- - -double precision function jmu_modif(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision, external :: j12_mu, j12_nucl - - jmu_modif = j12_mu(r1, r2) * j12_nucl(r1, r2) - - return -end function jmu_modif - -! --- - -double precision function j12_mu(r1, r2) - - include 'constants.include.F' - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: mu_r12, r12 - - r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & - + (r1(2) - r2(2)) * (r1(2) - r2(2)) & - + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) - mu_r12 = mu_erf * r12 - - j12_mu = 0.5d0 * r12 * (1.d0 - derf(mu_r12)) - inv_sq_pi_2 * dexp(-mu_r12*mu_r12) / mu_erf - - return -end function j12_mu - -! --- - -double precision function j12_nucl(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - integer :: i, j - double precision :: a1, d1, e1, a2, d2, e2 - - j12_nucl = 1.d0 - do i = 1, nucl_num - a1 = j1b_pen(i) - d1 = ( (r1(1) - nucl_coord(i,1)) * (r1(1) - nucl_coord(i,1)) & - + (r1(2) - nucl_coord(i,2)) * (r1(2) - nucl_coord(i,2)) & - + (r1(3) - nucl_coord(i,3)) * (r1(3) - nucl_coord(i,3)) ) - e1 = 1.d0 - exp(-a1*d1) - - do j = 1, nucl_num - a2 = j1b_pen(j) - d2 = ( (r2(1) - nucl_coord(j,1)) * (r2(1) - nucl_coord(j,1)) & - + (r2(2) - nucl_coord(j,2)) * (r2(2) - nucl_coord(j,2)) & - + (r2(3) - nucl_coord(j,3)) * (r2(3) - nucl_coord(j,3)) ) - e2 = 1.d0 - exp(-a2*d2) - - j12_nucl = j12_nucl * e1 * e2 - enddo - enddo - - return -end function j12_nucl - -! --- - -! --------------------------------------------------------------------------------------- - -double precision function grad1_x_jmu_modif(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r1_eps(3), eps, fp, fm, delta - double precision, external :: jmu_modif - - eps = 1d-7 - r1_eps = r1 - delta = max(eps, dabs(eps*r1(1))) - - r1_eps(1) = r1_eps(1) + delta - fp = jmu_modif(r1_eps, r2) - r1_eps(1) = r1_eps(1) - 2.d0 * delta - fm = jmu_modif(r1_eps, r2) - - grad1_x_jmu_modif = 0.5d0 * (fp - fm) / delta - - return -end function grad1_x_jmu_modif - -double precision function grad1_y_jmu_modif(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r1_eps(3), eps, fp, fm, delta - double precision, external :: jmu_modif - - eps = 1d-7 - r1_eps = r1 - delta = max(eps, dabs(eps*r1(2))) - - r1_eps(2) = r1_eps(2) + delta - fp = jmu_modif(r1_eps, r2) - r1_eps(2) = r1_eps(2) - 2.d0 * delta - fm = jmu_modif(r1_eps, r2) - - grad1_y_jmu_modif = 0.5d0 * (fp - fm) / delta - - return -end function grad1_y_jmu_modif - -double precision function grad1_z_jmu_modif(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r1_eps(3), eps, fp, fm, delta - double precision, external :: jmu_modif - - eps = 1d-7 - r1_eps = r1 - delta = max(eps, dabs(eps*r1(3))) - - r1_eps(3) = r1_eps(3) + delta - fp = jmu_modif(r1_eps, r2) - r1_eps(3) = r1_eps(3) - 2.d0 * delta - fm = jmu_modif(r1_eps, r2) - - grad1_z_jmu_modif = 0.5d0 * (fp - fm) / delta - - return -end function grad1_z_jmu_modif - -! --------------------------------------------------------------------------------------- - -! --- - -! --------------------------------------------------------------------------------------- - -double precision function grad1_x_j12_mu_num(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r1_eps(3), eps, fp, fm, delta - double precision, external :: j12_mu - - eps = 1d-7 - r1_eps = r1 - delta = max(eps, dabs(eps*r1(1))) - - r1_eps(1) = r1_eps(1) + delta - fp = j12_mu(r1_eps, r2) - r1_eps(1) = r1_eps(1) - 2.d0 * delta - fm = j12_mu(r1_eps, r2) - - grad1_x_j12_mu_num = 0.5d0 * (fp - fm) / delta - - return -end function grad1_x_j12_mu_num - -double precision function grad1_y_j12_mu_num(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r1_eps(3), eps, fp, fm, delta - double precision, external :: j12_mu - - eps = 1d-7 - r1_eps = r1 - delta = max(eps, dabs(eps*r1(2))) - - r1_eps(2) = r1_eps(2) + delta - fp = j12_mu(r1_eps, r2) - r1_eps(2) = r1_eps(2) - 2.d0 * delta - fm = j12_mu(r1_eps, r2) - - grad1_y_j12_mu_num = 0.5d0 * (fp - fm) / delta - - return -end function grad1_y_j12_mu_num - -double precision function grad1_z_j12_mu_num(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r1_eps(3), eps, fp, fm, delta - double precision, external :: j12_mu - - eps = 1d-7 - r1_eps = r1 - delta = max(eps, dabs(eps*r1(3))) - - r1_eps(3) = r1_eps(3) + delta - fp = j12_mu(r1_eps, r2) - r1_eps(3) = r1_eps(3) - 2.d0 * delta - fm = j12_mu(r1_eps, r2) - - grad1_z_j12_mu_num = 0.5d0 * (fp - fm) / delta - - return -end function grad1_z_j12_mu_num - -! --------------------------------------------------------------------------------------- - -! --- - -! --------------------------------------------------------------------------------------- - -double precision function grad1_x_j12_mu_exc(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r12 - - grad1_x_j12_mu_exc = 0.d0 - - r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & - + (r1(2) - r2(2)) * (r1(2) - r2(2)) & - + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) - if(r12 .lt. 1d-10) return - - grad1_x_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(1) - r2(1)) / r12 - - return -end function grad1_x_j12_mu_exc - -double precision function grad1_y_j12_mu_exc(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r12 - - grad1_y_j12_mu_exc = 0.d0 - - r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & - + (r1(2) - r2(2)) * (r1(2) - r2(2)) & - + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) - if(r12 .lt. 1d-10) return - - grad1_y_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(2) - r2(2)) / r12 - - return -end function grad1_y_j12_mu_exc - -double precision function grad1_z_j12_mu_exc(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r12 - - grad1_z_j12_mu_exc = 0.d0 - - r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & - + (r1(2) - r2(2)) * (r1(2) - r2(2)) & - + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) - if(r12 .lt. 1d-10) return - - grad1_z_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(3) - r2(3)) / r12 - - return -end function grad1_z_j12_mu_exc - -! --------------------------------------------------------------------------------------- - -! --- - - diff --git a/src/non_h_ints_mu/new_grad_tc.irp.f b/src/non_h_ints_mu/new_grad_tc.irp.f index 26ed642c..9832e81d 100644 --- a/src/non_h_ints_mu/new_grad_tc.irp.f +++ b/src/non_h_ints_mu/new_grad_tc.irp.f @@ -1,81 +1,3 @@ - -! --- - - BEGIN_PROVIDER [ double precision, fact1_j12, ( n_points_final_grid)] -&BEGIN_PROVIDER [ double precision, fact2_j12, (3, n_points_final_grid)] -&BEGIN_PROVIDER [ double precision, fact3_j12, ( n_points_final_grid)] - - implicit none - integer :: ipoint, i, j, phase - double precision :: x, y, z, dx, dy, dz - double precision :: a, d, e, fact_r, fact_r_sq - double precision :: fact_x, fact_y, fact_z - double precision :: ax_der, ay_der, az_der, a_expo - - do ipoint = 1, n_points_final_grid - - x = final_grid_points(1,ipoint) - y = final_grid_points(2,ipoint) - z = final_grid_points(3,ipoint) - - ! --- - - fact_r = 1.d0 - fact_r_sq = 1.d0 - do j = 1, nucl_num - a = j1b_pen(j) - dx = x - nucl_coord(j,1) - dy = y - nucl_coord(j,2) - dz = z - nucl_coord(j,3) - d = x*x + y*y + z*z - e = 1.d0 - dexp(-a*d) - - fact_r = fact_r * e - fact_r_sq = fact_r_sq * e * e - enddo - fact1_j12(ipoint) = fact_r - fact3_j12(ipoint) = fact_r_sq - - ! --- - - fact_x = 0.d0 - fact_y = 0.d0 - fact_z = 0.d0 - do i = 1, List_all_comb_b2_size - - phase = 0 - a_expo = 0.d0 - ax_der = 0.d0 - ay_der = 0.d0 - az_der = 0.d0 - do j = 1, nucl_num - a = dble(List_all_comb_b2(j,i)) * j1b_pen(j) - dx = x - nucl_coord(j,1) - dy = y - nucl_coord(j,2) - dz = z - nucl_coord(j,3) - - phase += List_all_comb_b2(j,i) - a_expo += a * (dx*dx + dy*dy + dz*dz) - ax_der += a * dx - ay_der += a * dy - az_der += a * dz - enddo - e = -2.d0 * (-1.d0)**dble(phase) * dexp(-a_expo) - - fact_x += e * ax_der - fact_y += e * ay_der - fact_z += e * az_der - enddo - - fact2_j12(1,ipoint) = fact_x - fact2_j12(2,ipoint) = fact_y - fact2_j12(3,ipoint) = fact_z - - ! --- - - enddo - -END_PROVIDER ! --- @@ -103,10 +25,10 @@ BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num, n_points_fin y = final_grid_points(2,ipoint) z = final_grid_points(3,ipoint) - tmp0 = fact1_j12(ipoint) - tmp_x = fact2_j12(1,ipoint) - tmp_y = fact2_j12(2,ipoint) - tmp_z = fact2_j12(3,ipoint) + tmp0 = v_1b (ipoint) + tmp_x = v_1b_grad(1,ipoint) + tmp_y = v_1b_grad(2,ipoint) + tmp_z = v_1b_grad(3,ipoint) do j = 1, ao_num do i = 1, ao_num diff --git a/src/non_h_ints_mu/numerical_integ.irp.f b/src/non_h_ints_mu/numerical_integ.irp.f index abac1874..842851aa 100644 --- a/src/non_h_ints_mu/numerical_integ.irp.f +++ b/src/non_h_ints_mu/numerical_integ.irp.f @@ -4,132 +4,129 @@ ! ! \int dr2 [-1 * \grad_r1 u(r1,r2)] \phi_i(r2) \phi_j(r2) x 1s_j1b(r2) ! -BEGIN_PROVIDER [ double precision, num_grad_1_u_ij_mu, (ao_num, ao_num, n_points_final_grid, 3)] - implicit none - - integer :: i, j, ipoint, jpoint - double precision :: tmp, r1(3), r2(3) - - double precision, external :: ao_value - double precision, external :: j12_nucl - double precision, external :: grad1_x_j12_mu_num, grad1_x_j12_mu_exc - double precision, external :: grad1_y_j12_mu_num, grad1_y_j12_mu_exc - double precision, external :: grad1_z_j12_mu_num, grad1_z_j12_mu_exc - - num_grad_1_u_ij_mu = 0.d0 - - do j = 1, ao_num - do i = 1, ao_num - - do ipoint = 1, n_points_final_grid - r1(1) = final_grid_points(1,ipoint) - r1(2) = final_grid_points(2,ipoint) - r1(3) = final_grid_points(3,ipoint) - - do jpoint = 1, n_points_final_grid - r2(1) = final_grid_points(1,jpoint) - r2(2) = final_grid_points(2,jpoint) - r2(3) = final_grid_points(3,jpoint) - tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) - - num_grad_1_u_ij_mu(i,j,ipoint,1) += tmp * (-1.d0 * grad1_x_j12_mu_exc(r1, r2)) - num_grad_1_u_ij_mu(i,j,ipoint,2) += tmp * (-1.d0 * grad1_y_j12_mu_exc(r1, r2)) - num_grad_1_u_ij_mu(i,j,ipoint,3) += tmp * (-1.d0 * grad1_z_j12_mu_exc(r1, r2)) - enddo - - enddo - enddo - enddo - -END_PROVIDER +!BEGIN_PROVIDER [ double precision, num_grad_1_u_ij_mu, (ao_num, ao_num, n_points_final_grid, 3)] +! +! implicit none +! +! integer :: i, j, ipoint, jpoint +! double precision :: tmp, r1(3), r2(3) +! +! double precision, external :: ao_value +! double precision, external :: j12_nucl +! double precision, external :: grad1_x_j12_mu_num, grad1_x_j12_mu_exc +! double precision, external :: grad1_y_j12_mu_num, grad1_y_j12_mu_exc +! double precision, external :: grad1_z_j12_mu_num, grad1_z_j12_mu_exc +! +! num_grad_1_u_ij_mu = 0.d0 +! +! do j = 1, ao_num +! do i = 1, ao_num +! +! do ipoint = 1, n_points_final_grid +! r1(1) = final_grid_points(1,ipoint) +! r1(2) = final_grid_points(2,ipoint) +! r1(3) = final_grid_points(3,ipoint) +! +! do jpoint = 1, n_points_final_grid +! r2(1) = final_grid_points(1,jpoint) +! r2(2) = final_grid_points(2,jpoint) +! r2(3) = final_grid_points(3,jpoint) +! tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) +! +! num_grad_1_u_ij_mu(i,j,ipoint,1) += tmp * (-1.d0 * grad1_x_j12_mu_exc(r1, r2)) +! num_grad_1_u_ij_mu(i,j,ipoint,2) += tmp * (-1.d0 * grad1_y_j12_mu_exc(r1, r2)) +! num_grad_1_u_ij_mu(i,j,ipoint,3) += tmp * (-1.d0 * grad1_z_j12_mu_exc(r1, r2)) +! enddo +! +! enddo +! enddo +! enddo +! +!END_PROVIDER ! --- -double precision function num_grad_1_u_ij_mu_x(i, j, ipoint) +subroutine num_grad_1_u_ij_mu(i, j, ipoint, integ) implicit none - integer, intent(in) :: i, j, ipoint - integer :: jpoint - double precision :: tmp, r1(3), r2(3) - double precision, external :: ao_value - double precision, external :: j12_nucl - double precision, external :: grad1_x_j12_mu_num, grad1_x_j12_mu_exc - num_grad_1_u_ij_mu_x = 0.d0 + integer, intent(in) :: i, j, ipoint + double precision, intent(out) :: integ(3) + + integer :: jpoint + double precision :: tmp, r1(3), r2(3) + double precision :: tmp_x, tmp_y, tmp_z + + double precision, external :: ao_value + double precision, external :: j12_nucl + double precision, external :: grad1_x_j12_mu_num, grad1_x_j12_mu_exc + double precision, external :: grad1_y_j12_mu_num, grad1_y_j12_mu_exc + double precision, external :: grad1_z_j12_mu_num, grad1_z_j12_mu_exc r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) r1(3) = final_grid_points(3,ipoint) + tmp_x = 0.d0 + tmp_y = 0.d0 + tmp_z = 0.d0 do jpoint = 1, n_points_final_grid r2(1) = final_grid_points(1,jpoint) r2(2) = final_grid_points(2,jpoint) r2(3) = final_grid_points(3,jpoint) tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) - num_grad_1_u_ij_mu_x += tmp * (-1.d0 * grad1_x_j12_mu_exc(r1, r2)) + tmp_x += tmp * (-1.d0 * grad1_x_j12_mu_exc(r1, r2)) + tmp_y += tmp * (-1.d0 * grad1_y_j12_mu_exc(r1, r2)) + tmp_z += tmp * (-1.d0 * grad1_z_j12_mu_exc(r1, r2)) enddo -end function num_grad_1_u_ij_mu_x + integ(1) = tmp_x + integ(2) = tmp_y + integ(3) = tmp_z + + return +end subroutine num_grad_1_u_ij_mu ! --- -double precision function num_grad_1_u_ij_mu_y(i, j, ipoint) +double precision function num_gradu_squared_u_ij_mu(i, j, ipoint) implicit none + integer, intent(in) :: i, j, ipoint + integer :: jpoint - double precision :: tmp, r1(3), r2(3) + double precision :: tmp, r1(3), r2(3), r12 + double precision :: tmp_x, tmp_y, tmp_z, tmp1, tmp2 + double precision, external :: ao_value double precision, external :: j12_nucl - double precision, external :: grad1_y_j12_mu_num, grad1_y_j12_mu_exc - - num_grad_1_u_ij_mu_y = 0.d0 r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) r1(3) = final_grid_points(3,ipoint) + num_gradu_squared_u_ij_mu = 0.d0 do jpoint = 1, n_points_final_grid r2(1) = final_grid_points(1,jpoint) r2(2) = final_grid_points(2,jpoint) r2(3) = final_grid_points(3,jpoint) - tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) + tmp_x = r1(1) - r2(1) + tmp_y = r1(2) - r2(2) + tmp_z = r1(3) - r2(3) + r12 = dsqrt( tmp_x*tmp_x + tmp_y*tmp_y + tmp_z*tmp_z ) + tmp1 = 1.d0 - derf(mu_erf * r12) + tmp2 = j12_nucl(r1, r2) + tmp = -0.25d0 * tmp1 * tmp1 * tmp2 * tmp2 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) - num_grad_1_u_ij_mu_y += tmp * (-1.d0 * grad1_y_j12_mu_exc(r1, r2)) + num_gradu_squared_u_ij_mu += tmp enddo -end function num_grad_1_u_ij_mu_y + return +end function num_gradu_squared_u_ij_mu ! --- -double precision function num_grad_1_u_ij_mu_z(i, j, ipoint) - - implicit none - integer, intent(in) :: i, j, ipoint - integer :: jpoint - double precision :: tmp, r1(3), r2(3) - double precision, external :: ao_value - double precision, external :: j12_nucl - double precision, external :: grad1_z_j12_mu_num, grad1_z_j12_mu_exc - - num_grad_1_u_ij_mu_z = 0.d0 - - r1(1) = final_grid_points(1,ipoint) - r1(2) = final_grid_points(2,ipoint) - r1(3) = final_grid_points(3,ipoint) - - do jpoint = 1, n_points_final_grid - r2(1) = final_grid_points(1,jpoint) - r2(2) = final_grid_points(2,jpoint) - r2(3) = final_grid_points(3,jpoint) - tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) - - num_grad_1_u_ij_mu_z += tmp * (-1.d0 * grad1_z_j12_mu_exc(r1, r2)) - enddo - -end function num_grad_1_u_ij_mu_z - -! --- From b13a315cc11c3223d21f7ee7093ce682b2a8a27a Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Mon, 17 Oct 2022 17:51:46 +0200 Subject: [PATCH 07/10] integrals over r2 tested --- src/ao_many_one_e_ints/grad2_jmu_modif.irp.f | 38 +- .../grad_lapl_jmu_modif.irp.f | 3 + ...j1b_product_to_sum.irp.f => listj1b.irp.f} | 0 src/ao_one_e_ints/pot_ao_erf_ints.irp.f | 2 +- src/non_h_ints_mu/debug_integ_jmu_modif.irp.f | 475 +++++++++++++++++- src/non_h_ints_mu/fit_j.irp.f | 28 +- src/non_h_ints_mu/grad_squared.irp.f | 2 +- src/non_h_ints_mu/j12_nucl_utils.irp.f | 89 ++-- src/non_h_ints_mu/numerical_integ.irp.f | 269 +++++++++- 9 files changed, 810 insertions(+), 96 deletions(-) rename src/ao_many_one_e_ints/{listj1b_product_to_sum.irp.f => listj1b.irp.f} (100%) diff --git a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f index 163f6e2d..7e08bd97 100644 --- a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f +++ b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f @@ -1,7 +1,7 @@ ! --- -BEGIN_PROVIDER [ double precision, int2_grad1u_grad2u_j1b, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! @@ -21,7 +21,7 @@ BEGIN_PROVIDER [ double precision, int2_grad1u_grad2u_j1b, (ao_num, ao_num, n_po provide mu_erf final_grid_points j1b_pen call wall_time(wall0) - int2_grad1u_grad2u_j1b = 0.d0 + int2_grad1u2_grad2u2_j1b2 = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & @@ -30,12 +30,13 @@ BEGIN_PROVIDER [ double precision, int2_grad1u_grad2u_j1b, (ao_num, ao_num, n_po !$OMP final_grid_points, n_max_fit_slat, & !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & - !$OMP List_all_comb_b3_cent, int2_grad1u_grad2u_j1b) + !$OMP List_all_comb_b3_cent, int2_grad1u2_grad2u2_j1b2) allocate( tmp(ao_num,ao_num,n_points_final_grid) ) tmp = 0.d0 !$OMP DO + !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = i, ao_num @@ -69,7 +70,7 @@ BEGIN_PROVIDER [ double precision, int2_grad1u_grad2u_j1b, (ao_num, ao_num, n_po do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = i, ao_num - int2_grad1u_grad2u_j1b(j,i,ipoint) += tmp(j,i,ipoint) + int2_grad1u2_grad2u2_j1b2(j,i,ipoint) += tmp(j,i,ipoint) enddo enddo enddo @@ -81,19 +82,19 @@ BEGIN_PROVIDER [ double precision, int2_grad1u_grad2u_j1b, (ao_num, ao_num, n_po do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = 1, i-1 - int2_grad1u_grad2u_j1b(j,i,ipoint) = int2_grad1u_grad2u_j1b(i,j,ipoint) + int2_grad1u2_grad2u2_j1b2(j,i,ipoint) = int2_grad1u2_grad2u2_j1b2(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_grad1u_grad2u_j1b', wall1 - wall0 + print*, ' wall time for int2_grad1u2_grad2u2_j1b2', wall1 - wall0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, int2_u2_j1b, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [ double precision, int2_u2_j1b2, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! @@ -113,7 +114,7 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b, (ao_num, ao_num, n_points_final_ provide mu_erf final_grid_points j1b_pen call wall_time(wall0) - int2_u2_j1b = 0.d0 + int2_u2_j1b2 = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & @@ -122,12 +123,13 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b, (ao_num, ao_num, n_points_final_ !$OMP final_grid_points, n_max_fit_slat, & !$OMP expo_gauss_j_mu_x_2, coef_gauss_j_mu_x_2, & !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & - !$OMP List_all_comb_b3_cent, int2_u2_j1b) + !$OMP List_all_comb_b3_cent, int2_u2_j1b2) allocate( tmp(ao_num,ao_num,n_points_final_grid) ) tmp = 0.d0 !$OMP DO + !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = i, ao_num @@ -161,7 +163,7 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b, (ao_num, ao_num, n_points_final_ do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = i, ao_num - int2_u2_j1b(j,i,ipoint) += tmp(j,i,ipoint) + int2_u2_j1b2(j,i,ipoint) += tmp(j,i,ipoint) enddo enddo enddo @@ -173,13 +175,13 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b, (ao_num, ao_num, n_points_final_ do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = 1, i-1 - int2_u2_j1b(j,i,ipoint) = int2_u2_j1b(i,j,ipoint) + int2_u2_j1b2(j,i,ipoint) = int2_u2_j1b2(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_u2_j1b', wall1 - wall0 + print*, ' wall time for int2_u2_j1b2', wall1 - wall0 END_PROVIDER @@ -297,7 +299,7 @@ END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b, (ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! @@ -317,7 +319,7 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b, (ao_num, ao_num, n_points_ provide mu_erf final_grid_points j1b_pen call wall_time(wall0) - int2_u_grad1u_j1b = 0.d0 + int2_u_grad1u_j1b2 = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & @@ -327,7 +329,7 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b, (ao_num, ao_num, n_points_ !$OMP final_grid_points, n_max_fit_slat, & !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & - !$OMP List_all_comb_b3_cent, int2_u_grad1u_j1b) + !$OMP List_all_comb_b3_cent, int2_u_grad1u_j1b2) allocate( tmp(ao_num,ao_num,n_points_final_grid) ) tmp = 0.d0 @@ -380,7 +382,7 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b, (ao_num, ao_num, n_points_ do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = i, ao_num - int2_u_grad1u_j1b(j,i,ipoint) += tmp(j,i,ipoint) + int2_u_grad1u_j1b2(j,i,ipoint) += tmp(j,i,ipoint) enddo enddo enddo @@ -392,13 +394,13 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b, (ao_num, ao_num, n_points_ do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = 1, i-1 - int2_u_grad1u_j1b(j,i,ipoint) = int2_u_grad1u_j1b(i,j,ipoint) + int2_u_grad1u_j1b2(j,i,ipoint) = int2_u_grad1u_j1b2(i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_u_grad1u_j1b', wall1 - wall0 + print*, ' wall time for int2_u_grad1u_j1b2', wall1 - wall0 END_PROVIDER diff --git a/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f index 9dd715e2..b847a630 100644 --- a/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f +++ b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f @@ -33,6 +33,7 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po tmp = 0.d0 !$OMP DO + !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = i, ao_num @@ -141,6 +142,7 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_ tmp = 0.d0 !$OMP DO + !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = i, ao_num @@ -235,6 +237,7 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_ tmp = 0.d0 !$OMP DO + !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid do i = 1, ao_num do j = i, ao_num diff --git a/src/ao_many_one_e_ints/listj1b_product_to_sum.irp.f b/src/ao_many_one_e_ints/listj1b.irp.f similarity index 100% rename from src/ao_many_one_e_ints/listj1b_product_to_sum.irp.f rename to src/ao_many_one_e_ints/listj1b.irp.f diff --git a/src/ao_one_e_ints/pot_ao_erf_ints.irp.f b/src/ao_one_e_ints/pot_ao_erf_ints.irp.f index e0d6254a..1d2d8faf 100644 --- a/src/ao_one_e_ints/pot_ao_erf_ints.irp.f +++ b/src/ao_one_e_ints/pot_ao_erf_ints.irp.f @@ -103,7 +103,7 @@ double precision function NAI_pol_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, alpha2 = ao_expo_ordered_transp(j,j_ao) coef12 = ao_coef_normalized_ordered_transp(j,j_ao) * ao_coef_normalized_ordered_transp(i,i_ao) - if(coef12 .lt. 1d-14) cycle + if(dabs(coef12) .lt. 1d-14) cycle integral = NAI_pol_mult_erf_with1s( A1_center, A2_center, power_A1, power_A2, alpha1, alpha2 & , beta, B_center, C_center, n_pt_in, mu_in ) diff --git a/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f index 2bcd1358..e59b5f7a 100644 --- a/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f +++ b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f @@ -7,20 +7,32 @@ program debug_integ_jmu_modif my_grid_becke = .True. - my_n_pt_r_grid = 30 - my_n_pt_a_grid = 50 + !my_n_pt_r_grid = 30 + !my_n_pt_a_grid = 50 !my_n_pt_r_grid = 100 !my_n_pt_a_grid = 170 + my_n_pt_r_grid = 150 + my_n_pt_a_grid = 194 touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid PROVIDE mu_erf j1b_pen - call test_j1b_nucl() - call test_grad_j1b_nucl() - call test_lapl_j1b_nucl() + !call test_j1b_nucl() + !call test_grad_j1b_nucl() + !call test_lapl_j1b_nucl() - call test_list_b2() - call test_list_b3() + !call test_list_b2() + !call test_list_b3() + + !call test_fit_u() + call test_fit_u2() + !call test_fit_ugradu() + + !call test_v_ij_u_cst_mu_j1b() + !call test_v_ij_erf_rk_cst_mu_j1b() + !call test_x_v_ij_erf_rk_cst_mu_j1b() + !call test_int2_u2_j1b2() + !call test_int2_grad1u2_grad2u2_j1b2() !call test_grad_1_u_ij_mu() !call test_gradu_squared_u_ij_mu() @@ -29,6 +41,252 @@ end ! --- +subroutine test_v_ij_u_cst_mu_j1b() + + implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision, external :: num_v_ij_u_cst_mu_j1b + + print*, ' test_v_ij_u_cst_mu_j1b ...' + + PROVIDE v_ij_u_cst_mu_j1b + + eps_ij = 1d-8 + acc_tot = 0.d0 + + !do ipoint = 1, 10 + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + + i_exc = v_ij_u_cst_mu_j1b(i,j,ipoint) + i_num = num_v_ij_u_cst_mu_j1b(i,j,ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in v_ij_u_cst_mu_j1b on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + enddo + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_v_ij_u_cst_mu_j1b + +! --- + +subroutine test_v_ij_erf_rk_cst_mu_j1b() + + implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision, external :: num_v_ij_erf_rk_cst_mu_j1b + + print*, ' test_v_ij_erf_rk_cst_mu_j1b ...' + + PROVIDE v_ij_erf_rk_cst_mu_j1b + + eps_ij = 1d-8 + acc_tot = 0.d0 + + !do ipoint = 1, 10 + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + + i_exc = v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) + i_num = num_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in v_ij_erf_rk_cst_mu_j1b on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + enddo + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_v_ij_erf_rk_cst_mu_j1b + +! --- + +subroutine test_x_v_ij_erf_rk_cst_mu_j1b() + + implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: integ(3) + + print*, ' test_x_v_ij_erf_rk_cst_mu_j1b ...' + + PROVIDE x_v_ij_erf_rk_cst_mu_j1b + + eps_ij = 1d-8 + acc_tot = 0.d0 + + !do ipoint = 1, 10 + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + + call num_x_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint, integ) + + i_exc = x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) + i_num = integ(1) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in x part of x_v_ij_erf_rk_cst_mu_j1b on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_num) + + i_exc = x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) + i_num = integ(2) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in y part of x_v_ij_erf_rk_cst_mu_j1b on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_num) + + i_exc = x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + i_num = integ(3) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in z part of x_v_ij_erf_rk_cst_mu_j1b on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_num) + + enddo + enddo + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_x_v_ij_erf_rk_cst_mu_j1b + +! --- + +subroutine test_int2_u2_j1b2() + + implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision, external :: num_int2_u2_j1b2 + + print*, ' test_int2_u2_j1b2 ...' + + PROVIDE int2_u2_j1b2 + + eps_ij = 1d-8 + acc_tot = 0.d0 + + !do ipoint = 1, 10 + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + + i_exc = int2_u2_j1b2(i,j,ipoint) + i_num = num_int2_u2_j1b2(i,j,ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in int2_u2_j1b2 on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + enddo + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_int2_u2_j1b2 + +! --- + +subroutine test_int2_grad1u2_grad2u2_j1b2() + + implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision, external :: num_int2_grad1u2_grad2u2_j1b2 + + print*, ' test_int2_grad1u2_grad2u2_j1b2 ...' + + PROVIDE int2_grad1u2_grad2u2_j1b2 + + eps_ij = 1d-8 + acc_tot = 0.d0 + + !do ipoint = 1, 10 + do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num + + i_exc = int2_grad1u2_grad2u2_j1b2(i,j,ipoint) + i_num = num_int2_grad1u2_grad2u2_j1b2(i,j,ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in int2_grad1u2_grad2u2_j1b2 on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + enddo + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_int2_grad1u2_grad2u2_j1b2 + +! --- + subroutine test_grad_1_u_ij_mu() implicit none @@ -384,3 +642,206 @@ end subroutine test_list_b3 ! --- +subroutine test_fit_ugradu() + + implicit none + + integer :: ipoint, i + double precision :: i_exc, i_fit, i_num, x2 + double precision :: r1(3), r2(3), grad(3) + double precision :: eps_ij, acc_tot, acc_ij, normalz, coef, expo + + double precision, external :: j12_mu + + print*, ' test_fit_ugradu ...' + + eps_ij = 1d-7 + acc_tot = 0.d0 + + r2 = 0.d0 + + do ipoint = 1, n_points_final_grid + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + x2 = r1(1) * r1(1) + r1(2) * r1(2) + r1(3) * r1(3) + if(x2 .lt. 1d-10) cycle + + i_fit = 0.d0 + do i = 1, n_max_fit_slat + expo = expo_gauss_j_mu_1_erf(i) + coef = coef_gauss_j_mu_1_erf(i) + i_fit += coef * dexp(-expo*x2) + enddo + i_fit = i_fit / dsqrt(x2) + + call grad1_j12_mu_exc(r1, r2, grad) + + ! --- + + i_exc = j12_mu(r1, r2) * grad(1) + i_num = i_fit * r1(1) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem on x in test_fit_ugradu on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_exc) + + ! --- + + i_exc = j12_mu(r1, r2) * grad(2) + i_num = i_fit * r1(2) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem on y in test_fit_ugradu on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_exc) + + ! --- + + i_exc = j12_mu(r1, r2) * grad(3) + i_num = i_fit * r1(3) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem on z in test_fit_ugradu on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_exc) + + ! --- + + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_fit_ugradu + +! --- + +subroutine test_fit_u() + + implicit none + + integer :: ipoint, i + double precision :: i_exc, i_fit, i_num, x2 + double precision :: r1(3), r2(3) + double precision :: eps_ij, acc_tot, acc_ij, normalz, coef, expo + + double precision, external :: j12_mu + + print*, ' test_fit_u ...' + + eps_ij = 1d-7 + acc_tot = 0.d0 + + r2 = 0.d0 + + do ipoint = 1, n_points_final_grid + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + x2 = r1(1) * r1(1) + r1(2) * r1(2) + r1(3) * r1(3) + if(x2 .lt. 1d-10) cycle + + i_fit = 0.d0 + do i = 1, n_max_fit_slat + expo = expo_gauss_j_mu_x(i) + coef = coef_gauss_j_mu_x(i) + i_fit += coef * dexp(-expo*x2) + enddo + + i_exc = j12_mu(r1, r2) + i_num = i_fit + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in test_fit_u on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_exc) + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_fit_u + +! --- + +subroutine test_fit_u2() + + implicit none + + integer :: ipoint, i + double precision :: i_exc, i_fit, i_num, x2 + double precision :: r1(3), r2(3) + double precision :: eps_ij, acc_tot, acc_ij, normalz, coef, expo + + double precision, external :: j12_mu + + print*, ' test_fit_u2 ...' + + eps_ij = 1d-7 + acc_tot = 0.d0 + + r2 = 0.d0 + + do ipoint = 1, n_points_final_grid + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + x2 = r1(1) * r1(1) + r1(2) * r1(2) + r1(3) * r1(3) + if(x2 .lt. 1d-10) cycle + + i_fit = 0.d0 + do i = 1, n_max_fit_slat + expo = expo_gauss_j_mu_x_2(i) + coef = coef_gauss_j_mu_x_2(i) + i_fit += coef * dexp(-expo*x2) + enddo + + i_exc = j12_mu(r1, r2) * j12_mu(r1, r2) + i_num = i_fit + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in test_fit_u2 on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_exc) + enddo + + acc_tot = acc_tot / normalz + print*, ' normalized acc = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_fit_u2 + +! --- + diff --git a/src/non_h_ints_mu/fit_j.irp.f b/src/non_h_ints_mu/fit_j.irp.f index 34f3a31a..d53209d0 100644 --- a/src/non_h_ints_mu/fit_j.irp.f +++ b/src/non_h_ints_mu/fit_j.irp.f @@ -70,12 +70,20 @@ END_PROVIDER integer :: i double precision :: tmp double precision :: expos(n_max_fit_slat), alpha, beta + double precision :: alpha_opt, beta_opt + + !alpha_opt = 2.d0 * expo_j_xmu(1) + !beta_opt = 2.d0 * expo_j_xmu(2) + + ! direct opt + alpha_opt = 3.52751759d0 + beta_opt = 1.26214809d0 tmp = 0.25d0 / (mu_erf * mu_erf * dacos(-1.d0)) - alpha = 2.d0 * expo_j_xmu(1) * mu_erf + alpha = alpha_opt * mu_erf call expo_fit_slater_gam(alpha, expos) - beta = 2.d0 * expo_j_xmu(2) * mu_erf * mu_erf + beta = beta_opt * mu_erf * mu_erf do i = 1, n_max_fit_slat expo_gauss_j_mu_x_2(i) = expos(i) + beta @@ -101,12 +109,20 @@ END_PROVIDER integer :: i double precision :: tmp double precision :: expos(n_max_fit_slat), alpha, beta + double precision :: alpha_opt, beta_opt + + !alpha_opt = expo_j_xmu(1) + expo_gauss_1_erf_x(1) + !beta_opt = expo_j_xmu(2) + expo_gauss_1_erf_x(2) + + ! direct opt + alpha_opt = 2.87875632d0 + beta_opt = 1.34801003d0 tmp = -0.25d0 / (mu_erf * dsqrt(dacos(-1.d0))) - alpha = (expo_j_xmu(1) + expo_gauss_1_erf_x(1)) * mu_erf + alpha = alpha_opt * mu_erf call expo_fit_slater_gam(alpha, expos) - beta = (expo_j_xmu(2) + expo_gauss_1_erf_x(2)) * mu_erf * mu_erf + beta = beta_opt * mu_erf * mu_erf do i = 1, n_max_fit_slat expo_gauss_j_mu_1_erf(i) = expos(i) + beta @@ -162,8 +178,8 @@ double precision function j_mu_fit_gauss(x) j_mu_fit_gauss = 0.d0 do i = 1, n_max_fit_slat alpha = expo_gauss_j_mu_x(i) - coef = coef_gauss_j_mu_x(i) - j_mu_fit_gauss += coef_gauss_j_mu_x(i) * dexp(-expo_gauss_j_mu_x(i)*x*x) + coef = coef_gauss_j_mu_x(i) + j_mu_fit_gauss += coef * dexp(-alpha*x*x) enddo end diff --git a/src/non_h_ints_mu/grad_squared.irp.f b/src/non_h_ints_mu/grad_squared.irp.f index 6ef34118..b9c98ea9 100644 --- a/src/non_h_ints_mu/grad_squared.irp.f +++ b/src/non_h_ints_mu/grad_squared.irp.f @@ -33,7 +33,7 @@ BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num,n_poin tmp = v_1b(ipoint) * v_1b(ipoint) do j = 1, ao_num do i = 1, ao_num - gradu_squared_u_ij_mu(j,i,ipoint) += tmp * int2_grad1u_grad2u_j1b(i,j,ipoint) + gradu_squared_u_ij_mu(j,i,ipoint) += tmp * int2_grad1u2_grad2u2_j1b2(i,j,ipoint) enddo enddo enddo diff --git a/src/non_h_ints_mu/j12_nucl_utils.irp.f b/src/non_h_ints_mu/j12_nucl_utils.irp.f index 344b1fa8..a6dd0939 100644 --- a/src/non_h_ints_mu/j12_nucl_utils.irp.f +++ b/src/non_h_ints_mu/j12_nucl_utils.irp.f @@ -237,6 +237,30 @@ end function j12_mu ! --- +double precision function j12_mu_gauss(r1, r2) + + implicit none + double precision, intent(in) :: r1(3), r2(3) + integer :: i + double precision :: r12, coef, expo + + r12 = (r1(1) - r2(1)) * (r1(1) - r2(1)) & + + (r1(2) - r2(2)) * (r1(2) - r2(2)) & + + (r1(3) - r2(3)) * (r1(3) - r2(3)) + + j12_mu_gauss = 0.d0 + do i = 1, n_max_fit_slat + expo = expo_gauss_j_mu_x(i) + coef = coef_gauss_j_mu_x(i) + + j12_mu_gauss += coef * dexp(-expo*r12) + enddo + + return +end function j12_mu_gauss + +! --- + double precision function j1b_nucl(r) implicit none @@ -535,63 +559,30 @@ end function grad1_z_j12_mu_num ! --- -! --------------------------------------------------------------------------------------- - -double precision function grad1_x_j12_mu_exc(r1, r2) +subroutine grad1_j12_mu_exc(r1, r2, grad) implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r12 + double precision, intent(in) :: r1(3), r2(3) + double precision, intent(out) :: grad(3) + double precision :: dx, dy, dz, r12, tmp - grad1_x_j12_mu_exc = 0.d0 + grad = 0.d0 - r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & - + (r1(2) - r2(2)) * (r1(2) - r2(2)) & - + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + + r12 = dsqrt( dx * dx + dy * dy + dz * dz ) if(r12 .lt. 1d-10) return - grad1_x_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(1) - r2(1)) / r12 + tmp = 0.5d0 * (1.d0 - derf(mu_erf * r12)) / r12 + + grad(1) = tmp * dx + grad(2) = tmp * dy + grad(3) = tmp * dz return -end function grad1_x_j12_mu_exc - -double precision function grad1_y_j12_mu_exc(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r12 - - grad1_y_j12_mu_exc = 0.d0 - - r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & - + (r1(2) - r2(2)) * (r1(2) - r2(2)) & - + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) - if(r12 .lt. 1d-10) return - - grad1_y_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(2) - r2(2)) / r12 - - return -end function grad1_y_j12_mu_exc - -double precision function grad1_z_j12_mu_exc(r1, r2) - - implicit none - double precision, intent(in) :: r1(3), r2(3) - double precision :: r12 - - grad1_z_j12_mu_exc = 0.d0 - - r12 = dsqrt( (r1(1) - r2(1)) * (r1(1) - r2(1)) & - + (r1(2) - r2(2)) * (r1(2) - r2(2)) & - + (r1(3) - r2(3)) * (r1(3) - r2(3)) ) - if(r12 .lt. 1d-10) return - - grad1_z_j12_mu_exc = 0.5d0 * (1.d0 - derf(mu_erf * r12)) * (r1(3) - r2(3)) / r12 - - return -end function grad1_z_j12_mu_exc - -! --------------------------------------------------------------------------------------- +end subroutine grad1_j12_mu_exc ! --- diff --git a/src/non_h_ints_mu/numerical_integ.irp.f b/src/non_h_ints_mu/numerical_integ.irp.f index 842851aa..17b666aa 100644 --- a/src/non_h_ints_mu/numerical_integ.irp.f +++ b/src/non_h_ints_mu/numerical_integ.irp.f @@ -10,13 +10,10 @@ ! implicit none ! ! integer :: i, j, ipoint, jpoint -! double precision :: tmp, r1(3), r2(3) +! double precision :: tmp, r1(3), r2(3), grad(3) ! ! double precision, external :: ao_value ! double precision, external :: j12_nucl -! double precision, external :: grad1_x_j12_mu_num, grad1_x_j12_mu_exc -! double precision, external :: grad1_y_j12_mu_num, grad1_y_j12_mu_exc -! double precision, external :: grad1_z_j12_mu_num, grad1_z_j12_mu_exc ! ! num_grad_1_u_ij_mu = 0.d0 ! @@ -34,9 +31,11 @@ ! r2(3) = final_grid_points(3,jpoint) ! tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) ! -! num_grad_1_u_ij_mu(i,j,ipoint,1) += tmp * (-1.d0 * grad1_x_j12_mu_exc(r1, r2)) -! num_grad_1_u_ij_mu(i,j,ipoint,2) += tmp * (-1.d0 * grad1_y_j12_mu_exc(r1, r2)) -! num_grad_1_u_ij_mu(i,j,ipoint,3) += tmp * (-1.d0 * grad1_z_j12_mu_exc(r1, r2)) +! call grad1_j12_mu_exc(r1, r2, grad) +! +! num_grad_1_u_ij_mu(i,j,ipoint,1) += tmp * (-1.d0 * grad(1)) +! num_grad_1_u_ij_mu(i,j,ipoint,2) += tmp * (-1.d0 * grad(2)) +! num_grad_1_u_ij_mu(i,j,ipoint,3) += tmp * (-1.d0 * grad(3)) ! enddo ! ! enddo @@ -47,6 +46,249 @@ ! --- +double precision function num_v_ij_u_cst_mu_j1b(i, j, ipoint) + + BEGIN_DOC + ! + ! \int dr2 u12 \phi_i(r2) \phi_j(r2) x v_1b(r2) + ! + END_DOC + + implicit none + + integer, intent(in) :: i, j, ipoint + + integer :: jpoint + double precision :: r1(3), r2(3) + + double precision, external :: ao_value + double precision, external :: j12_mu, j1b_nucl, j12_mu_gauss + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + num_v_ij_u_cst_mu_j1b = 0.d0 + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + + num_v_ij_u_cst_mu_j1b += ao_value(i, r2) * ao_value(j, r2) * j12_mu_gauss(r1, r2) * j1b_nucl(r2) * final_weight_at_r_vector(jpoint) + enddo + + return +end function num_v_ij_u_cst_mu_j1b + +! --- + +double precision function num_int2_u2_j1b2(i, j, ipoint) + + BEGIN_DOC + ! + ! \int dr2 u12^2 \phi_i(r2) \phi_j(r2) x v_1b(r2)^2 + ! + END_DOC + + implicit none + + integer, intent(in) :: i, j, ipoint + + integer :: jpoint, i_fit + double precision :: r1(3), r2(3) + double precision :: dx, dy, dz, r12, x2, tmp1, tmp2, tmp3, coef, expo + + double precision, external :: ao_value + double precision, external :: j1b_nucl + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + num_int2_u2_j1b2 = 0.d0 + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + x2 = dx * dx + dy * dy + dz * dz + r12 = dsqrt(x2) + + tmp1 = j1b_nucl(r2) + tmp2 = tmp1 * tmp1 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) + + tmp3 = 0.d0 + do i_fit = 1, n_max_fit_slat + expo = expo_gauss_j_mu_x_2(i_fit) + coef = coef_gauss_j_mu_x_2(i_fit) + + tmp3 += coef * dexp(-expo*x2) + enddo + + num_int2_u2_j1b2 += tmp2 * tmp3 + enddo + + return +end function num_int2_u2_j1b2 + +! --- + +double precision function num_int2_grad1u2_grad2u2_j1b2(i, j, ipoint) + + BEGIN_DOC + ! + ! \int dr2 \frac{-[erf(mu r12) -1]^2}{4} \phi_i(r2) \phi_j(r2) x v_1b(r2)^2 + ! + END_DOC + + implicit none + + integer, intent(in) :: i, j, ipoint + + integer :: jpoint, i_fit + double precision :: r1(3), r2(3) + double precision :: dx, dy, dz, r12, x2, tmp1, tmp2, tmp3, coef, expo + + double precision, external :: ao_value + double precision, external :: j1b_nucl + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + num_int2_grad1u2_grad2u2_j1b2 = 0.d0 + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + x2 = dx * dx + dy * dy + dz * dz + r12 = dsqrt(x2) + + tmp1 = j1b_nucl(r2) + tmp2 = tmp1 * tmp1 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) + + tmp3 = 0.d0 + do i_fit = 1, n_max_fit_slat + expo = expo_gauss_1_erf_x_2(i_fit) + coef = coef_gauss_1_erf_x_2(i_fit) + + tmp3 += coef * dexp(-expo*x2) + enddo + tmp3 = -0.25d0 * tmp3 + + num_int2_grad1u2_grad2u2_j1b2 += tmp2 * tmp3 + enddo + + return +end function num_int2_grad1u2_grad2u2_j1b2 + +! --- + +double precision function num_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint) + + BEGIN_DOC + ! + ! \int dr2 [erf(mu r12) -1]/r12 \phi_i(r2) \phi_j(r2) x v_1b(r2) + ! + END_DOC + + implicit none + + integer, intent(in) :: i, j, ipoint + + integer :: jpoint + double precision :: r1(3), r2(3) + double precision :: dx, dy, dz, r12, tmp1, tmp2 + + double precision, external :: ao_value + double precision, external :: j1b_nucl + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + num_v_ij_erf_rk_cst_mu_j1b = 0.d0 + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + r12 = dsqrt( dx * dx + dy * dy + dz * dz ) + if(r12 .lt. 1d-10) cycle + + tmp1 = (derf(mu_erf * r12) - 1.d0) / r12 + tmp2 = tmp1 * ao_value(i, r2) * ao_value(j, r2) * j1b_nucl(r2) * final_weight_at_r_vector(jpoint) + + num_v_ij_erf_rk_cst_mu_j1b += tmp2 + enddo + + return +end function num_v_ij_erf_rk_cst_mu_j1b + +! --- + +subroutine num_x_v_ij_erf_rk_cst_mu_j1b(i, j, ipoint, integ) + + BEGIN_DOC + ! + ! \int dr2 [erf(mu r12) -1]/r12 \phi_i(r2) \phi_j(r2) x v_1b(r2) x r2 + ! + END_DOC + + implicit none + + integer, intent(in) :: i, j, ipoint + double precision, intent(out) :: integ(3) + + integer :: jpoint + double precision :: r1(3), r2(3), grad(3) + double precision :: dx, dy, dz, r12, tmp1, tmp2 + double precision :: tmp_x, tmp_y, tmp_z + + double precision, external :: ao_value + double precision, external :: j1b_nucl + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + tmp_x = 0.d0 + tmp_y = 0.d0 + tmp_z = 0.d0 + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + r12 = dsqrt( dx * dx + dy * dy + dz * dz ) + if(r12 .lt. 1d-10) cycle + + tmp1 = (derf(mu_erf * r12) - 1.d0) / r12 + tmp2 = tmp1 * ao_value(i, r2) * ao_value(j, r2) * j1b_nucl(r2) * final_weight_at_r_vector(jpoint) + + tmp_x += tmp2 * r2(1) + tmp_y += tmp2 * r2(2) + tmp_z += tmp2 * r2(3) + enddo + + integ(1) = tmp_x + integ(2) = tmp_y + integ(3) = tmp_z + + return +end subroutine num_x_v_ij_erf_rk_cst_mu_j1b + +! --- + subroutine num_grad_1_u_ij_mu(i, j, ipoint, integ) implicit none @@ -55,14 +297,11 @@ subroutine num_grad_1_u_ij_mu(i, j, ipoint, integ) double precision, intent(out) :: integ(3) integer :: jpoint - double precision :: tmp, r1(3), r2(3) + double precision :: tmp, r1(3), r2(3), grad(3) double precision :: tmp_x, tmp_y, tmp_z double precision, external :: ao_value double precision, external :: j12_nucl - double precision, external :: grad1_x_j12_mu_num, grad1_x_j12_mu_exc - double precision, external :: grad1_y_j12_mu_num, grad1_y_j12_mu_exc - double precision, external :: grad1_z_j12_mu_num, grad1_z_j12_mu_exc r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) @@ -77,9 +316,11 @@ subroutine num_grad_1_u_ij_mu(i, j, ipoint, integ) r2(3) = final_grid_points(3,jpoint) tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) - tmp_x += tmp * (-1.d0 * grad1_x_j12_mu_exc(r1, r2)) - tmp_y += tmp * (-1.d0 * grad1_y_j12_mu_exc(r1, r2)) - tmp_z += tmp * (-1.d0 * grad1_z_j12_mu_exc(r1, r2)) + call grad1_j12_mu_exc(r1, r2, grad) + + tmp_x += tmp * (-1.d0 * grad(1)) + tmp_y += tmp * (-1.d0 * grad(2)) + tmp_z += tmp * (-1.d0 * grad(3)) enddo integ(1) = tmp_x From ad01d2b2e46144219b15dc25a9d85744184e9d59 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Thu, 20 Oct 2022 11:58:14 +0200 Subject: [PATCH 08/10] v0 of new Jast added --- .../grad_lapl_jmu_modif.irp.f | 4 +- src/bi_ort_ints/semi_num_ints_mo.irp.f | 220 ++++++++++++------ src/bi_ort_ints/three_body_ints_bi_ort.irp.f | 73 ++++-- src/non_h_ints_mu/debug_integ_jmu_modif.irp.f | 24 +- src/non_h_ints_mu/grad_squared.irp.f | 66 ++++-- src/non_h_ints_mu/new_grad_tc.irp.f | 143 +++++++----- src/non_h_ints_mu/numerical_integ.irp.f | 49 +--- 7 files changed, 355 insertions(+), 224 deletions(-) diff --git a/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f index b847a630..fab50805 100644 --- a/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f +++ b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f @@ -90,7 +90,7 @@ END_PROVIDER BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_points_final_grid, 3)] BEGIN_DOC - ! int dr x * phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R|) - 1)/|r - R| + ! int dr x phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R|) - 1)/|r - R| END_DOC implicit none @@ -119,7 +119,7 @@ END_PROVIDER BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_num, n_points_final_grid)] BEGIN_DOC - ! int dr x * phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R|) - 1)/|r - R| + ! int dr x phi_i(r) phi_j(r) 1s_j1b(r) (erf(mu(R) |r - R|) - 1)/|r - R| END_DOC implicit none diff --git a/src/bi_ort_ints/semi_num_ints_mo.irp.f b/src/bi_ort_ints/semi_num_ints_mo.irp.f index 6c4b44c0..27fcb7de 100644 --- a/src/bi_ort_ints/semi_num_ints_mo.irp.f +++ b/src/bi_ort_ints/semi_num_ints_mo.irp.f @@ -1,81 +1,154 @@ -BEGIN_PROVIDER [ double precision, mo_v_ki_bi_ortho_erf_rk_cst_mu, ( mo_num, mo_num,n_points_final_grid)] - implicit none - BEGIN_DOC -! mo_v_ki_bi_ortho_erf_rk_cst_mu(k,i,ip) = int dr chi_k(r) phi_i(r) (erf(mu |r - R_ip|) - 1 )/(2|r - R_ip|) on the BI-ORTHO MO basis -! -! where phi_k(r) is a LEFT MOs and phi_i(r) is a RIGHT MO -! -! R_ip = the "ip"-th point of the DFT Grid - END_DOC - integer :: ipoint - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + +! --- + +! TODO :: optimization : transform into a DGEMM + +BEGIN_PROVIDER [ double precision, mo_v_ki_bi_ortho_erf_rk_cst_mu, (mo_num, mo_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! mo_v_ki_bi_ortho_erf_rk_cst_mu(k,i,ip) = int dr chi_k(r) phi_i(r) (erf(mu |r - R_ip|) - 1 )/(2|r - R_ip|) on the BI-ORTHO MO basis + ! + ! where phi_k(r) is a LEFT MOs and phi_i(r) is a RIGHT MO + ! + ! R_ip = the "ip"-th point of the DFT Grid + ! + END_DOC + + implicit none + integer :: ipoint + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (ipoint) & !$OMP SHARED (n_points_final_grid,v_ij_erf_rk_cst_mu,mo_v_ki_bi_ortho_erf_rk_cst_mu) !$OMP DO SCHEDULE (dynamic) -! TODO :: optimization : transform into a DGEMM - do ipoint = 1, n_points_final_grid - call ao_to_mo_bi_ortho(v_ij_erf_rk_cst_mu(1,1,ipoint),size(v_ij_erf_rk_cst_mu,1),mo_v_ki_bi_ortho_erf_rk_cst_mu(1,1,ipoint),size(mo_v_ki_bi_ortho_erf_rk_cst_mu,1)) - enddo + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho( v_ij_erf_rk_cst_mu (1,1,ipoint), size(v_ij_erf_rk_cst_mu, 1) & + , mo_v_ki_bi_ortho_erf_rk_cst_mu(1,1,ipoint), size(mo_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) + enddo !$OMP END DO !$OMP END PARALLEL - mo_v_ki_bi_ortho_erf_rk_cst_mu = mo_v_ki_bi_ortho_erf_rk_cst_mu * 0.5d0 -END_PROVIDER - -BEGIN_PROVIDER [ double precision, mo_v_ki_bi_ortho_erf_rk_cst_mu_transp, ( n_points_final_grid,mo_num, mo_num)] - implicit none - BEGIN_DOC -! int dr phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/(2|r - R|) on the BI-ORTHO MO basis - END_DOC - integer :: ipoint,i,j - do i = 1, mo_num - do j = 1, mo_num - do ipoint = 1, n_points_final_grid - mo_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,j,i) = mo_v_ki_bi_ortho_erf_rk_cst_mu(j,i,ipoint) - enddo - enddo - enddo -! FREE mo_v_ki_bi_ortho_erf_rk_cst_mu -END_PROVIDER - -BEGIN_PROVIDER [ double precision, mo_x_v_ki_bi_ortho_erf_rk_cst_mu, ( mo_num, mo_num,3,n_points_final_grid)] - implicit none - BEGIN_DOC -! mo_x_v_ki_bi_ortho_erf_rk_cst_mu(k,i,m,ip) = int dr x(m) * chi_k(r) phi_i(r) (erf(mu |r - R_ip|) - 1)/2|r - R_ip| on the BI-ORTHO MO basis -! -! where chi_k(r)/phi_i(r) are left/right MOs, m=1 => x(m) = x, m=2 => x(m) = y, m=3 => x(m) = z, -! -! R_ip = the "ip"-th point of the DFT Grid - END_DOC - integer :: ipoint,m - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (ipoint,m) & - !$OMP SHARED (n_points_final_grid,x_v_ij_erf_rk_cst_mu_transp,mo_x_v_ki_bi_ortho_erf_rk_cst_mu) - !$OMP DO SCHEDULE (dynamic) -! TODO :: optimization : transform into a DGEMM - do ipoint = 1, n_points_final_grid - do m = 1, 3 - call ao_to_mo_bi_ortho(x_v_ij_erf_rk_cst_mu_transp(1,1,m,ipoint),size(x_v_ij_erf_rk_cst_mu_transp,1),mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,m,ipoint),size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu,1)) - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - mo_x_v_ki_bi_ortho_erf_rk_cst_mu = 0.5d0 * mo_x_v_ki_bi_ortho_erf_rk_cst_mu + mo_v_ki_bi_ortho_erf_rk_cst_mu = mo_v_ki_bi_ortho_erf_rk_cst_mu * 0.5d0 END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp, (n_points_final_grid, 3, mo_num, mo_num)] + +BEGIN_PROVIDER [ double precision, mo_v_ki_bi_ortho_erf_rk_cst_mu_transp, (n_points_final_grid, mo_num, mo_num)] + + BEGIN_DOC + ! + ! int dr phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/(2|r - R|) on the BI-ORTHO MO basis + ! + END_DOC + implicit none - integer :: i, j, m, ipoint + integer :: ipoint, i, j + do i = 1, mo_num do j = 1, mo_num - do m = 1, 3 - do ipoint = 1, n_points_final_grid - mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,m,j,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu(j,i,m,ipoint) - enddo + do ipoint = 1, n_points_final_grid + mo_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,j,i) = mo_v_ki_bi_ortho_erf_rk_cst_mu(j,i,ipoint) + enddo + enddo + enddo + +! FREE mo_v_ki_bi_ortho_erf_rk_cst_mu + +END_PROVIDER + +! --- + +! TODO :: optimization : transform into a DGEMM + +BEGIN_PROVIDER [ double precision, mo_x_v_ki_bi_ortho_erf_rk_cst_mu, (mo_num, mo_num, 3, n_points_final_grid)] + + BEGIN_DOC + ! + ! mo_x_v_ki_bi_ortho_erf_rk_cst_mu(k,i,m,ip) = int dr x(m) * chi_k(r) phi_i(r) (erf(mu |r - R_ip|) - 1)/2|r - R_ip| on the BI-ORTHO MO basis + ! + ! where chi_k(r)/phi_i(r) are left/right MOs, m=1 => x(m) = x, m=2 => x(m) = y, m=3 => x(m) = z, + ! + ! R_ip = the "ip"-th point of the DFT Grid + ! + END_DOC + + implicit none + integer :: ipoint + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid,x_v_ij_erf_rk_cst_mu_transp,mo_x_v_ki_bi_ortho_erf_rk_cst_mu) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + + call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,1,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & + , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,1,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) + + call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,2,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & + , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,2,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) + + call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,3,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & + , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,3,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) + + enddo + !$OMP END DO + !$OMP END PARALLEL + + mo_x_v_ki_bi_ortho_erf_rk_cst_mu = 0.5d0 * mo_x_v_ki_bi_ortho_erf_rk_cst_mu + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, int2_grad1_u12_bimo, (3, ao_num, ao_num, n_points_final_grid)] + + BEGIN_DOC + ! + ! int2_grad1_u12_bimo(:,k,i,ipoint) = \int dr2 [-1 * \grad_r1 J(r1,r2)] \chi_k(r2) \phi_i(r2) + ! + END_DOC + + implicit none + integer :: ipoint + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid,int2_grad1_u12_ao,int2_grad1_u12_bimo) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + + call ao_to_mo_bi_ortho( int2_grad1_u12_ao (1,1,1,ipoint), size(int2_grad1_u12_ao , 1) & + , int2_grad1_u12_bimo(1,1,1,ipoint), size(int2_grad1_u12_bimo, 1) ) + + call ao_to_mo_bi_ortho( int2_grad1_u12_ao (2,1,1,ipoint), size(int2_grad1_u12_ao , 1) & + , int2_grad1_u12_bimo(2,1,1,ipoint), size(int2_grad1_u12_bimo, 1) ) + + call ao_to_mo_bi_ortho( int2_grad1_u12_ao (3,1,1,ipoint), size(int2_grad1_u12_ao , 1) & + , int2_grad1_u12_bimo(3,1,1,ipoint), size(int2_grad1_u12_bimo, 1) ) + + enddo + !$OMP END DO + !$OMP END PARALLEL + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp, (n_points_final_grid, 3, mo_num, mo_num)] + + implicit none + integer :: i, j, ipoint + + do i = 1, mo_num + do j = 1, mo_num + do ipoint = 1, n_points_final_grid + mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,1,j,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu(j,i,1,ipoint) + mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,2,j,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu(j,i,2,ipoint) + mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp(ipoint,3,j,i) = mo_x_v_ki_bi_ortho_erf_rk_cst_mu(j,i,3,ipoint) enddo enddo enddo @@ -83,14 +156,15 @@ END_PROVIDER ! --- - BEGIN_PROVIDER [ double precision, x_W_ki_bi_ortho_erf_rk, (n_points_final_grid, 3, mo_num, mo_num)] + BEGIN_DOC - ! x_W_ki_bi_ortho_erf_rk(ip,m,k,i) = \int dr chi_k(r) (1 - erf(mu |r-R_ip|)) (x(m)-X(m)_ip) phi_i(r) ON THE BI-ORTHO MO BASIS -! -! where chi_k(r)/phi_i(r) are left/right MOs, m=1 => X(m) = x, m=2 => X(m) = y, m=3 => X(m) = z, -! -! R_ip = the "ip"-th point of the DFT Grid + ! + ! x_W_ki_bi_ortho_erf_rk(ip,m,k,i) = \int dr chi_k(r) \frac{(1 - erf(mu |r-R_ip|))}{2|r-R_ip|} (x(m)-R_ip(m)) phi_i(r) ON THE BI-ORTHO MO BASIS + ! + ! where chi_k(r)/phi_i(r) are left/right MOs, m=1 => X(m) = x, m=2 => X(m) = y, m=3 => X(m) = z, + ! + ! R_ip = the "ip"-th point of the DFT Grid END_DOC implicit none @@ -100,7 +174,7 @@ BEGIN_PROVIDER [ double precision, x_W_ki_bi_ortho_erf_rk, (n_points_final_grid, double precision :: xyz double precision :: wall0, wall1 - print*,'providing x_W_ki_bi_ortho_erf_rk ...' + print*, ' providing x_W_ki_bi_ortho_erf_rk ...' call wall_time(wall0) !$OMP PARALLEL & @@ -126,7 +200,7 @@ BEGIN_PROVIDER [ double precision, x_W_ki_bi_ortho_erf_rk, (n_points_final_grid, ! FREE mo_x_v_ki_bi_ortho_erf_rk_cst_mu_transp call wall_time(wall1) - print*,'time to provide x_W_ki_bi_ortho_erf_rk = ',wall1 - wall0 + print *, ' time to provide x_W_ki_bi_ortho_erf_rk = ', wall1 - wall0 END_PROVIDER diff --git a/src/bi_ort_ints/three_body_ints_bi_ort.irp.f b/src/bi_ort_ints/three_body_ints_bi_ort.irp.f index 1fe27ab1..12361ace 100644 --- a/src/bi_ort_ints/three_body_ints_bi_ort.irp.f +++ b/src/bi_ort_ints/three_body_ints_bi_ort.irp.f @@ -53,26 +53,55 @@ BEGIN_PROVIDER [ double precision, three_body_ints_bi_ort, (mo_num, mo_num, mo_n END_PROVIDER -subroutine give_integrals_3_body_bi_ort(n,l,k,m,j,i,integral) - implicit none - double precision, intent(out) :: integral - integer, intent(in) :: n,l,k,m,j,i - double precision :: weight - BEGIN_DOC -! with a BI ORTHONORMAL ORBITALS - END_DOC - integer :: ipoint,mm - integral = 0.d0 - do mm = 1, 3 - do ipoint = 1, n_points_final_grid - weight = final_weight_at_r_vector(ipoint) - integral += weight * mos_l_in_r_array_transp(ipoint,k) * mos_r_in_r_array_transp(ipoint,i) & - * x_W_ki_bi_ortho_erf_rk(ipoint,mm,n,m) * x_W_ki_bi_ortho_erf_rk(ipoint,mm,l,j) - integral += weight * mos_l_in_r_array_transp(ipoint,l) * mos_r_in_r_array_transp(ipoint,j) & - * x_W_ki_bi_ortho_erf_rk(ipoint,mm,n,m) * x_W_ki_bi_ortho_erf_rk(ipoint,mm,k,i) - integral += weight * mos_l_in_r_array_transp(ipoint,n) * mos_r_in_r_array_transp(ipoint,m) & - * x_W_ki_bi_ortho_erf_rk(ipoint,mm,l,j) * x_W_ki_bi_ortho_erf_rk(ipoint,mm,k,i) - enddo - enddo -end +! --- + +subroutine give_integrals_3_body_bi_ort(n, l, k, m, j, i, integral) + + BEGIN_DOC + ! + ! < n l k | -L | m j i > with a BI-ORTHONORMAL ORBITALS + ! + END_DOC + + implicit none + integer, intent(in) :: n,l,k,m,j,i + double precision, intent(out) :: integral + integer :: ipoint + double precision :: weight + + integral = 0.d0 + do ipoint = 1, n_points_final_grid + weight = final_weight_at_r_vector(ipoint) + +! integral += weight * mos_l_in_r_array_transp(ipoint,k) * mos_r_in_r_array_transp(ipoint,i) & +! * ( x_W_ki_bi_ortho_erf_rk(ipoint,1,n,m) * x_W_ki_bi_ortho_erf_rk(ipoint,1,l,j) & +! + x_W_ki_bi_ortho_erf_rk(ipoint,2,n,m) * x_W_ki_bi_ortho_erf_rk(ipoint,2,l,j) & +! + x_W_ki_bi_ortho_erf_rk(ipoint,3,n,m) * x_W_ki_bi_ortho_erf_rk(ipoint,3,l,j) ) +! integral += weight * mos_l_in_r_array_transp(ipoint,l) * mos_r_in_r_array_transp(ipoint,j) & +! * ( x_W_ki_bi_ortho_erf_rk(ipoint,1,n,m) * x_W_ki_bi_ortho_erf_rk(ipoint,1,k,i) & +! + x_W_ki_bi_ortho_erf_rk(ipoint,2,n,m) * x_W_ki_bi_ortho_erf_rk(ipoint,2,k,i) & +! + x_W_ki_bi_ortho_erf_rk(ipoint,3,n,m) * x_W_ki_bi_ortho_erf_rk(ipoint,3,k,i) ) +! integral += weight * mos_l_in_r_array_transp(ipoint,n) * mos_r_in_r_array_transp(ipoint,m) & +! * ( x_W_ki_bi_ortho_erf_rk(ipoint,1,l,j) * x_W_ki_bi_ortho_erf_rk(ipoint,1,k,i) & +! + x_W_ki_bi_ortho_erf_rk(ipoint,2,l,j) * x_W_ki_bi_ortho_erf_rk(ipoint,2,k,i) & +! + x_W_ki_bi_ortho_erf_rk(ipoint,3,l,j) * x_W_ki_bi_ortho_erf_rk(ipoint,3,k,i) ) + + integral += weight * mos_l_in_r_array_transp(ipoint,k) * mos_r_in_r_array_transp(ipoint,i) & + * ( int2_grad1_u12_bimo(1,ipoint,n,m) * int2_grad1_u12_bimo(1,ipoint,l,j) & + + int2_grad1_u12_bimo(2,ipoint,n,m) * int2_grad1_u12_bimo(2,ipoint,l,j) & + + int2_grad1_u12_bimo(3,ipoint,n,m) * int2_grad1_u12_bimo(3,ipoint,l,j) ) + integral += weight * mos_l_in_r_array_transp(ipoint,l) * mos_r_in_r_array_transp(ipoint,j) & + * ( int2_grad1_u12_bimo(1,ipoint,n,m) * int2_grad1_u12_bimo(1,ipoint,k,i) & + + int2_grad1_u12_bimo(2,ipoint,n,m) * int2_grad1_u12_bimo(2,ipoint,k,i) & + + int2_grad1_u12_bimo(3,ipoint,n,m) * int2_grad1_u12_bimo(3,ipoint,k,i) ) + integral += weight * mos_l_in_r_array_transp(ipoint,n) * mos_r_in_r_array_transp(ipoint,m) & + * ( int2_grad1_u12_bimo(1,ipoint,l,j) * int2_grad1_u12_bimo(1,ipoint,k,i) & + + int2_grad1_u12_bimo(2,ipoint,l,j) * int2_grad1_u12_bimo(2,ipoint,k,i) & + + int2_grad1_u12_bimo(3,ipoint,l,j) * int2_grad1_u12_bimo(3,ipoint,k,i) ) + + enddo + +end subroutine give_integrals_3_body_bi_ort + +! --- diff --git a/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f index e59b5f7a..7b99cc91 100644 --- a/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f +++ b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f @@ -34,7 +34,7 @@ program debug_integ_jmu_modif !call test_int2_u2_j1b2() !call test_int2_grad1u2_grad2u2_j1b2() - !call test_grad_1_u_ij_mu() + !call test_int2_grad1_u12_ao() !call test_gradu_squared_u_ij_mu() end @@ -287,16 +287,16 @@ end subroutine test_int2_grad1u2_grad2u2_j1b2 ! --- -subroutine test_grad_1_u_ij_mu() +subroutine test_int2_grad1_u12_ao() implicit none integer :: i, j, ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz double precision :: integ(3) - print*, ' test_grad_1_u_ij_mu ...' + print*, ' test_int2_grad1_u12_ao ...' - PROVIDE grad_1_u_ij_mu + PROVIDE int2_grad1_u12_ao eps_ij = 1d-6 acc_tot = 0.d0 @@ -305,13 +305,13 @@ subroutine test_grad_1_u_ij_mu() do j = 1, ao_num do i = 1, ao_num - call num_grad_1_u_ij_mu(i, j, ipoint, integ) + call num_int2_grad1_u12_ao(i, j, ipoint, integ) - i_exc = grad_1_u_ij_mu(i,j,ipoint,1) + i_exc = int2_grad1_u12_ao(1,i,j,ipoint) i_num = integ(1) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in x part of grad_1_u_ij_mu on', i, j, ipoint + print *, ' problem in x part of int2_grad1_u12_ao on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -319,11 +319,11 @@ subroutine test_grad_1_u_ij_mu() acc_tot += acc_ij normalz += dabs(i_num) - i_exc = grad_1_u_ij_mu(i,j,ipoint,2) + i_exc = int2_grad1_u12_ao(2,i,j,ipoint) i_num = integ(2) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in y part of grad_1_u_ij_mu on', i, j, ipoint + print *, ' problem in y part of int2_grad1_u12_ao on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -331,11 +331,11 @@ subroutine test_grad_1_u_ij_mu() acc_tot += acc_ij normalz += dabs(i_num) - i_exc = grad_1_u_ij_mu(i,j,ipoint,3) + i_exc = int2_grad1_u12_ao(3,i,j,ipoint) i_num = integ(3) acc_ij = dabs(i_exc - i_num) if(acc_ij .gt. eps_ij) then - print *, ' problem in z part of grad_1_u_ij_mu on', i, j, ipoint + print *, ' problem in z part of int2_grad1_u12_ao on', i, j, ipoint print *, ' analyt integ = ', i_exc print *, ' numeri integ = ', i_num print *, ' diff = ', acc_ij @@ -352,7 +352,7 @@ subroutine test_grad_1_u_ij_mu() print*, ' normalz = ', normalz return -end subroutine test_grad_1_u_ij_mu +end subroutine test_int2_grad1_u12_ao ! --- diff --git a/src/non_h_ints_mu/grad_squared.irp.f b/src/non_h_ints_mu/grad_squared.irp.f index b9c98ea9..bf37c551 100644 --- a/src/non_h_ints_mu/grad_squared.irp.f +++ b/src/non_h_ints_mu/grad_squared.irp.f @@ -4,20 +4,34 @@ ! TODO : strong optmization : write the loops in a different way ! : for each couple of AO, the gaussian product are done once for all -BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num,n_points_final_grid)] +BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num, n_points_final_grid) ] BEGIN_DOC - ! - ! -1/2 [ (grad_1 u)^2 + (grad_2 u^2)] = - 1/4 * (1 - erf(mu*r12))^2 ! + ! if J(r1,r2) = u12: + ! + ! gradu_squared_u_ij_mu = -0.50 x \int r2 [ (grad_1 u12)^2 + (grad_2 u12^2)] \phi_i(2) \phi_j(2) + ! = -0.25 x \int r2 (1 - erf(mu*r12))^2 \phi_i(2) \phi_j(2) ! and ! (1 - erf(mu*r12))^2 = \sum_i coef_gauss_1_erf_x_2(i) * exp(-expo_gauss_1_erf_x_2(i) * r12^2) ! + ! if J(r1,r2) = u12 x v1 x v2 + ! + ! gradu_squared_u_ij_mu = -0.50 x \int r2 \phi_i(2) \phi_j(2) [ v1^2 v2^2 ((grad_1 u12)^2 + (grad_2 u12^2)]) + u12^2 v2^2 (grad_1 v1)^2 + 2 u12 v1 v2^2 (grad_1 u12) . (grad_1 v1) ] + ! = -0.25 x v1^2 \int r2 \phi_i(2) \phi_j(2) [1 - erf(mu r12)]^2 v2^2 + ! + -0.50 x (grad_1 v1)^2 \int r2 \phi_i(2) \phi_j(2) u12^2 v2^2 + ! + -1.00 x v1 (grad_1 v1) \int r2 \phi_i(2) \phi_j(2) (grad_1 u12) v2^2 + ! = v1^2 x int2_grad1u2_grad2u2_j1b2 + ! + -0.5 x (grad_1 v1)^2 x int2_u2_j1b2 + ! + -1.0 X V1 x (grad_1 v1) \cdot int2_u_grad1u_x_j1b + ! + ! END_DOC implicit none integer :: ipoint, i, j, m, igauss - double precision :: r(3), delta, coef, tmp + double precision :: r(3), delta, coef + double precision :: tmp_v, tmp_x, tmp_y, tmp_z, tmp1, tmp2, tmp3, tmp4, tmp5 double precision :: time0, time1 double precision, external :: overlap_gauss_r12_ao @@ -27,13 +41,28 @@ BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num,n_poin PROVIDE j1b_type j1b_pen if(j1b_type .eq. 3) then - ! v1_1b^2 \int d2 \phi_i(2) \phi_j(2) \frac{-[1 - \erf(\mu r12)]^2}{4} v2_1b^2 do ipoint = 1, n_points_final_grid - tmp = v_1b(ipoint) * v_1b(ipoint) + + tmp_v = v_1b (ipoint) + tmp_x = v_1b_grad(1,ipoint) + tmp_y = v_1b_grad(2,ipoint) + tmp_z = v_1b_grad(3,ipoint) + + tmp1 = tmp_v * tmp_v + tmp2 = 0.5d0 * (tmp_x * tmp_x + tmp_y * tmp_y + tmp_z * tmp_z) + tmp3 = tmp_v * tmp_x + tmp4 = tmp_v * tmp_y + tmp5 = tmp_v * tmp_z + do j = 1, ao_num do i = 1, ao_num - gradu_squared_u_ij_mu(j,i,ipoint) += tmp * int2_grad1u2_grad2u2_j1b2(i,j,ipoint) + + gradu_squared_u_ij_mu(j,i,ipoint) += tmp1 * int2_grad1u2_grad2u2_j1b2(i,j,ipoint) & + - tmp2 * int2_u2_j1b2 (i,j,ipoint) & + - tmp3 * int2_u_grad1u_x_j1b (1,i,j,ipoint) & + - tmp4 * int2_u_grad1u_x_j1b (2,i,j,ipoint) & + - tmp5 * int2_u_grad1u_x_j1b (3,i,j,ipoint) enddo enddo enddo @@ -74,22 +103,27 @@ BEGIN_PROVIDER [double precision, tc_grad_square_ao, (ao_num, ao_num, ao_num, ao implicit none integer :: ipoint, i, j, k, l - double precision :: contrib, weight1 + double precision :: contrib, weight1, ao_k_r, ao_i_r double precision, allocatable :: ac_mat(:,:,:,:) allocate(ac_mat(ao_num,ao_num,ao_num,ao_num)) ac_mat = 0.d0 do ipoint = 1, n_points_final_grid - weight1 = final_weight_at_r_vector(ipoint) + weight1 = 0.5d0 * final_weight_at_r_vector(ipoint) - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - contrib = weight1 * 0.5d0 * (aos_in_r_array_transp(ipoint,k) * aos_in_r_array_transp(ipoint,i)) - ! \int dr1 phi_k(r1) phi_i(r1) . \int dr2 |\grad_1 u(r1,r2)|^2 \phi_l(r2) \phi_j(r2) - ac_mat(k,i,l,j) += gradu_squared_u_ij_mu(l,j,ipoint) * contrib + do i = 1, ao_num + ao_i_r = aos_in_r_array_transp(ipoint,i) + + do k = 1, ao_num + ao_k_r = aos_in_r_array_transp(ipoint,k) + + do j = 1, ao_num + do l = 1, ao_num + + contrib = gradu_squared_u_ij_mu(l,j,ipoint) * ao_k_r * ao_i_r + + ac_mat(k,i,l,j) += weight1 * contrib enddo enddo enddo diff --git a/src/non_h_ints_mu/new_grad_tc.irp.f b/src/non_h_ints_mu/new_grad_tc.irp.f index 9832e81d..db659520 100644 --- a/src/non_h_ints_mu/new_grad_tc.irp.f +++ b/src/non_h_ints_mu/new_grad_tc.irp.f @@ -1,15 +1,28 @@ ! --- -BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num, n_points_final_grid, 3)] +BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao, (3, ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! - ! grad_1_u_ij_mu(i,j,ipoint) = \int dr2 [-1 * \grad_r1 u(r1,r2)] \phi_i(r2) \phi_j(r2) x 1s_j1b(r2) - ! = \int dr2 [(r1 - r2) (erf(mu * r12)-1)/2 r_12] \phi_i(r2) \phi_j(r2) x 1s_j1b(r2) + ! int2_grad1_u12_ao(:,i,j,ipoint) = \int dr2 [-1 * \grad_r1 J(r1,r2)] \phi_i(r2) \phi_j(r2) ! ! where r1 = r(ipoint) ! + ! if J(r1,r2) = u12: + ! + ! int2_grad1_u12_ao(:,i,j,ipoint) = 0.5 x \int dr2 [(r1 - r2) (erf(mu * r12)-1)r_12] \phi_i(r2) \phi_j(r2) + ! = 0.5 * [ v_ij_erf_rk_cst_mu(i,j,ipoint) * r(:) - x_v_ij_erf_rk_cst_mu(i,j,ipoint,:) ] + ! + ! if J(r1,r2) = u12 x v1 x v2 + ! + ! int2_grad1_u12_ao(:,i,j,ipoint) = v1 x [ 0.5 x \int dr2 [(r1 - r2) (erf(mu * r12)-1)r_12] v2 \phi_i(r2) \phi_j(r2) ] + ! + \grad_1 v1 x [ \int dr2 u12 v2 \phi_i(r2) \phi_j(r2) ] + ! = 0.5 v_1b(ipoint) * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) * r(:) + ! - 0.5 v_1b(ipoint) * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,:) + ! + v_1b_grad[:,ipoint] * v_ij_u_cst_mu_j1b(i,j,ipoint) + ! + ! END_DOC implicit none @@ -25,10 +38,10 @@ BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num, n_points_fin y = final_grid_points(2,ipoint) z = final_grid_points(3,ipoint) - tmp0 = v_1b (ipoint) - tmp_x = v_1b_grad(1,ipoint) - tmp_y = v_1b_grad(2,ipoint) - tmp_z = v_1b_grad(3,ipoint) + tmp0 = 0.5d0 * v_1b(ipoint) + tmp_x = v_1b_grad(1,ipoint) + tmp_y = v_1b_grad(2,ipoint) + tmp_z = v_1b_grad(3,ipoint) do j = 1, ao_num do i = 1, ao_num @@ -36,9 +49,9 @@ BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num, n_points_fin tmp1 = tmp0 * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) tmp2 = v_ij_u_cst_mu_j1b(i,j,ipoint) - grad_1_u_ij_mu(i,j,ipoint,1) = tmp1 * x - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) + tmp_x * tmp2 - grad_1_u_ij_mu(i,j,ipoint,2) = tmp1 * y - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) + tmp_y * tmp2 - grad_1_u_ij_mu(i,j,ipoint,3) = tmp1 * z - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + tmp_z * tmp2 + int2_grad1_u12_ao(1,i,j,ipoint) = tmp1 * x - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) + tmp_x * tmp2 + int2_grad1_u12_ao(2,i,j,ipoint) = tmp1 * y - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) + tmp_y * tmp2 + int2_grad1_u12_ao(3,i,j,ipoint) = tmp1 * z - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + tmp_z * tmp2 enddo enddo enddo @@ -51,61 +64,87 @@ BEGIN_PROVIDER [ double precision, grad_1_u_ij_mu, (ao_num, ao_num, n_points_fin z = final_grid_points(3,ipoint) do j = 1, ao_num do i = 1, ao_num - grad_1_u_ij_mu(i,j,ipoint,1) = v_ij_erf_rk_cst_mu(i,j,ipoint) * x - x_v_ij_erf_rk_cst_mu(i,j,ipoint,1) - grad_1_u_ij_mu(i,j,ipoint,2) = v_ij_erf_rk_cst_mu(i,j,ipoint) * y - x_v_ij_erf_rk_cst_mu(i,j,ipoint,2) - grad_1_u_ij_mu(i,j,ipoint,3) = v_ij_erf_rk_cst_mu(i,j,ipoint) * z - x_v_ij_erf_rk_cst_mu(i,j,ipoint,3) + int2_grad1_u12_ao(1,i,j,ipoint) = v_ij_erf_rk_cst_mu(i,j,ipoint) * x - x_v_ij_erf_rk_cst_mu(i,j,ipoint,1) + int2_grad1_u12_ao(2,i,j,ipoint) = v_ij_erf_rk_cst_mu(i,j,ipoint) * y - x_v_ij_erf_rk_cst_mu(i,j,ipoint,2) + int2_grad1_u12_ao(3,i,j,ipoint) = v_ij_erf_rk_cst_mu(i,j,ipoint) * z - x_v_ij_erf_rk_cst_mu(i,j,ipoint,3) enddo enddo enddo - endif + int2_grad1_u12_ao *= 0.5d0 - grad_1_u_ij_mu *= 0.5d0 + endif END_PROVIDER ! --- BEGIN_PROVIDER [double precision, tc_grad_and_lapl_ao, (ao_num, ao_num, ao_num, ao_num)] - implicit none - BEGIN_DOC - ! tc_grad_and_lapl_ao(k,i,l,j) = - ! - ! = 1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) - ! - ! This is obtained by integration by parts. - END_DOC - integer :: ipoint,i,j,k,l,m - double precision :: contrib,weight1 - double precision, allocatable :: ac_mat(:,:,:,:) - allocate(ac_mat(ao_num, ao_num, ao_num, ao_num)) - ac_mat = 0.d0 - do m = 1, 3 - do ipoint = 1, n_points_final_grid - weight1 = final_weight_at_r_vector(ipoint) - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - contrib = weight1 *0.5D0* (aos_in_r_array_transp(ipoint,k) * aos_grad_in_r_array_transp_bis(ipoint,i,m) & - -aos_in_r_array_transp(ipoint,i) * aos_grad_in_r_array_transp_bis(ipoint,k,m) ) - ! \int dr1 phi_k(r1) \grad_r1 phi_i(r1) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) - ac_mat(k,i,l,j) += grad_1_u_ij_mu(l,j,ipoint,m) * contrib - enddo - enddo - enddo - enddo - enddo - enddo - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - tc_grad_and_lapl_ao(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + BEGIN_DOC + ! + ! tc_grad_and_lapl_ao(k,i,l,j) = < k l | -1/2 \Delta_1 u(r1,r2) - \grad_1 u(r1,r2) | ij > + ! + ! = 1/2 \int dr1 (phi_k(r1) \grad_r1 phi_i(r1) - phi_i(r1) \grad_r1 phi_k(r1)) . \int dr2 \grad_r1 u(r1,r2) \phi_l(r2) \phi_j(r2) + ! + ! This is obtained by integration by parts. + ! + END_DOC + + implicit none + integer :: ipoint, i, j, k, l + double precision :: contrib, weight1, contrib_x, contrib_y, contrib_z + double precision :: ao_k_r, ao_k_dx, ao_k_dy, ao_k_dz + double precision :: ao_i_r, ao_i_dx, ao_i_dy, ao_i_dz + double precision, allocatable :: ac_mat(:,:,:,:) + + allocate(ac_mat(ao_num,ao_num,ao_num,ao_num)) + ac_mat = 0.d0 + + do ipoint = 1, n_points_final_grid + weight1 = 0.5d0 * final_weight_at_r_vector(ipoint) + + do i = 1, ao_num + ao_i_r = aos_in_r_array_transp (ipoint,i) + ao_i_dx = aos_grad_in_r_array_transp_bis(ipoint,i,1) + ao_i_dy = aos_grad_in_r_array_transp_bis(ipoint,i,2) + ao_i_dz = aos_grad_in_r_array_transp_bis(ipoint,i,3) + + do k = 1, ao_num + ao_k_r = aos_in_r_array_transp (ipoint,k) + ao_k_dx = aos_grad_in_r_array_transp_bis(ipoint,k,1) + ao_k_dy = aos_grad_in_r_array_transp_bis(ipoint,k,2) + ao_k_dz = aos_grad_in_r_array_transp_bis(ipoint,k,3) + + do j = 1, ao_num + do l = 1, ao_num + + contrib_x = int2_grad1_u12_ao(1,l,j,ipoint) * ( ao_k_r * ao_i_dx - ao_i_r * ao_k_dx ) + contrib_y = int2_grad1_u12_ao(2,l,j,ipoint) * ( ao_k_r * ao_i_dy - ao_i_r * ao_k_dy ) + contrib_z = int2_grad1_u12_ao(3,l,j,ipoint) * ( ao_k_r * ao_i_dz - ao_i_r * ao_k_dz ) + + contrib = weight1 * ( contrib_x + contrib_y + contrib_z ) + + ac_mat(k,i,l,j) += contrib + enddo + enddo + enddo enddo - enddo enddo - enddo + + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + tc_grad_and_lapl_ao(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + enddo + enddo + enddo + enddo + + deallocate(ac_mat) END_PROVIDER + +! --- + diff --git a/src/non_h_ints_mu/numerical_integ.irp.f b/src/non_h_ints_mu/numerical_integ.irp.f index 17b666aa..dae68649 100644 --- a/src/non_h_ints_mu/numerical_integ.irp.f +++ b/src/non_h_ints_mu/numerical_integ.irp.f @@ -1,51 +1,6 @@ ! --- -! -! \int dr2 [-1 * \grad_r1 u(r1,r2)] \phi_i(r2) \phi_j(r2) x 1s_j1b(r2) -! - -!BEGIN_PROVIDER [ double precision, num_grad_1_u_ij_mu, (ao_num, ao_num, n_points_final_grid, 3)] -! -! implicit none -! -! integer :: i, j, ipoint, jpoint -! double precision :: tmp, r1(3), r2(3), grad(3) -! -! double precision, external :: ao_value -! double precision, external :: j12_nucl -! -! num_grad_1_u_ij_mu = 0.d0 -! -! do j = 1, ao_num -! do i = 1, ao_num -! -! do ipoint = 1, n_points_final_grid -! r1(1) = final_grid_points(1,ipoint) -! r1(2) = final_grid_points(2,ipoint) -! r1(3) = final_grid_points(3,ipoint) -! -! do jpoint = 1, n_points_final_grid -! r2(1) = final_grid_points(1,jpoint) -! r2(2) = final_grid_points(2,jpoint) -! r2(3) = final_grid_points(3,jpoint) -! tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) -! -! call grad1_j12_mu_exc(r1, r2, grad) -! -! num_grad_1_u_ij_mu(i,j,ipoint,1) += tmp * (-1.d0 * grad(1)) -! num_grad_1_u_ij_mu(i,j,ipoint,2) += tmp * (-1.d0 * grad(2)) -! num_grad_1_u_ij_mu(i,j,ipoint,3) += tmp * (-1.d0 * grad(3)) -! enddo -! -! enddo -! enddo -! enddo -! -!END_PROVIDER - -! --- - double precision function num_v_ij_u_cst_mu_j1b(i, j, ipoint) BEGIN_DOC @@ -289,7 +244,7 @@ end subroutine num_x_v_ij_erf_rk_cst_mu_j1b ! --- -subroutine num_grad_1_u_ij_mu(i, j, ipoint, integ) +subroutine num_int2_grad1_u12_ao(i, j, ipoint, integ) implicit none @@ -328,7 +283,7 @@ subroutine num_grad_1_u_ij_mu(i, j, ipoint, integ) integ(3) = tmp_z return -end subroutine num_grad_1_u_ij_mu +end subroutine num_int2_grad1_u12_ao ! --- From 813f2c360182e4de8ac087525fbca7d244f513a5 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Thu, 20 Oct 2022 15:48:34 +0200 Subject: [PATCH 09/10] Hij in bimo added --- src/ao_tc_eff_map/potential.irp.f | 48 ++-- src/ao_two_e_ints/two_e_integrals.irp.f | 4 +- src/bi_ort_ints/biorthog_mo_for_h.irp.f | 183 +++++++++++++ src/bi_ort_ints/one_e_bi_ort.irp.f | 15 +- src/bi_ort_ints/total_twoe_pot.irp.f | 289 +++++++++++--------- src/tc_bi_ortho/compute_deltamu_right.irp.f | 52 ++++ src/tc_bi_ortho/dressing_vectors_lr.irp.f | 54 ++++ src/tc_bi_ortho/h_biortho.irp.f | 249 +++++++++++++++++ src/tc_bi_ortho/slater_tc.irp.f | 2 +- 9 files changed, 744 insertions(+), 152 deletions(-) create mode 100644 src/bi_ort_ints/biorthog_mo_for_h.irp.f create mode 100644 src/tc_bi_ortho/compute_deltamu_right.irp.f create mode 100644 src/tc_bi_ortho/dressing_vectors_lr.irp.f create mode 100644 src/tc_bi_ortho/h_biortho.irp.f diff --git a/src/ao_tc_eff_map/potential.irp.f b/src/ao_tc_eff_map/potential.irp.f index 2f7ea4d6..d8b3c442 100644 --- a/src/ao_tc_eff_map/potential.irp.f +++ b/src/ao_tc_eff_map/potential.irp.f @@ -94,30 +94,40 @@ BEGIN_PROVIDER [double precision, expos_slat_gauss_1_erf_x, (n_fit_1_erf_x)] expos_slat_gauss_1_erf_x(2) = 0.756023d0 END_PROVIDER +! --- + BEGIN_PROVIDER [double precision, expo_gauss_1_erf_x, (n_max_fit_slat)] &BEGIN_PROVIDER [double precision, coef_gauss_1_erf_x, (n_max_fit_slat)] - implicit none - BEGIN_DOC -! (1 - erf(mu*x)) = \sum_i coef_gauss_1_erf_x(i) * exp(-expo_gauss_1_erf_x(i) * x^2) -! -! This is based on a fit of (1 - erf(mu*x)) by exp(-alpha * x) exp(-beta*mu^2x^2) -! -! and the slater function exp(-alpha * x) is fitted with n_max_fit_slat gaussians -! -! See Appendix 2 of JCP 154, 084119 (2021) - END_DOC - integer :: i - double precision :: expos(n_max_fit_slat),alpha,beta - alpha = expos_slat_gauss_1_erf_x(1) * mu_erf - call expo_fit_slater_gam(alpha,expos) - beta = expos_slat_gauss_1_erf_x(2) * mu_erf**2.d0 + + BEGIN_DOC + ! + ! (1 - erf(mu*x)) = \sum_i coef_gauss_1_erf_x(i) * exp(-expo_gauss_1_erf_x(i) * x^2) + ! + ! This is based on a fit of (1 - erf(mu*x)) by exp(-alpha * x) exp(-beta*mu^2x^2) + ! + ! and the slater function exp(-alpha * x) is fitted with n_max_fit_slat gaussians + ! + ! See Appendix 2 of JCP 154, 084119 (2021) + ! + END_DOC + + implicit none + integer :: i + double precision :: expos(n_max_fit_slat), alpha, beta + + alpha = expos_slat_gauss_1_erf_x(1) * mu_erf + call expo_fit_slater_gam(alpha, expos) + beta = expos_slat_gauss_1_erf_x(2) * mu_erf * mu_erf - do i = 1, n_max_fit_slat - expo_gauss_1_erf_x(i) = expos(i) + beta - coef_gauss_1_erf_x(i) = coef_fit_slat_gauss(i) - enddo + do i = 1, n_max_fit_slat + expo_gauss_1_erf_x(i) = expos(i) + beta + coef_gauss_1_erf_x(i) = coef_fit_slat_gauss(i) + enddo + END_PROVIDER +! --- + double precision function fit_1_erf_x(x) implicit none double precision, intent(in) :: x diff --git a/src/ao_two_e_ints/two_e_integrals.irp.f b/src/ao_two_e_ints/two_e_integrals.irp.f index b4b21f5c..80b4af2e 100644 --- a/src/ao_two_e_ints/two_e_integrals.irp.f +++ b/src/ao_two_e_ints/two_e_integrals.irp.f @@ -1,7 +1,7 @@ ! --- -double precision function ao_two_e_integral(i,j,k,l) +double precision function ao_two_e_integral(i, j, k, l) BEGIN_DOC ! integral of the AO basis or (ij|kl) @@ -29,7 +29,7 @@ double precision function ao_two_e_integral(i,j,k,l) if(use_cosgtos) then !print *, ' use_cosgtos for ao_two_e_integral ?', use_cosgtos - ao_two_e_integral = ao_two_e_integral_cosgtos(i,j,k,l) + ao_two_e_integral = ao_two_e_integral_cosgtos(i, j, k, l) else diff --git a/src/bi_ort_ints/biorthog_mo_for_h.irp.f b/src/bi_ort_ints/biorthog_mo_for_h.irp.f new file mode 100644 index 00000000..a8e7630b --- /dev/null +++ b/src/bi_ort_ints/biorthog_mo_for_h.irp.f @@ -0,0 +1,183 @@ + +! --- + +BEGIN_PROVIDER [double precision, ao_two_e_coul, (ao_num, ao_num, ao_num, ao_num) ] + + BEGIN_DOC + ! + ! ao_two_e_coul(k,i,l,j) = ( k i | 1/r12 | l j ) = < l k | 1/r12 | j i > + ! + END_DOC + + integer :: i, j, k, l + double precision :: integral + double precision, external :: get_ao_two_e_integral + + PROVIDE ao_integrals_map + + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + + integral = get_ao_two_e_integral(i, j, k, l, ao_integrals_map) + + ao_two_e_coul(k,i,l,j) = integral + enddo + enddo + enddo + enddo + +END_PROVIDER + +! --- + +double precision function bi_ortho_mo_coul_ints(l, k, j, i) + + BEGIN_DOC + ! + ! < mo^L_k mo^L_l | 1/r12 | mo^R_i mo^R_j > + ! + END_DOC + + implicit none + integer, intent(in) :: i, j, k, l + integer :: m, n, p, q + + bi_ortho_mo_coul_ints = 0.d0 + do m = 1, ao_num + do p = 1, ao_num + do n = 1, ao_num + do q = 1, ao_num + ! p1h1p2h2 l1 l2 r1 r2 + bi_ortho_mo_coul_ints += ao_two_e_coul(n,q,m,p) * mo_l_coef(m,l) * mo_l_coef(n,k) * mo_r_coef(p,j) * mo_r_coef(q,i) + enddo + enddo + enddo + enddo + +end function bi_ortho_mo_coul_ints + +! --- + +! TODO :: transform into DEGEMM + +BEGIN_PROVIDER [double precision, mo_bi_ortho_coul_e_chemist, (mo_num, mo_num, mo_num, mo_num)] + + BEGIN_DOC + ! + ! mo_bi_ortho_coul_e_chemist(k,i,l,j) = < k l | 1/r12 | i j > where i,j are right MOs and k,l are left MOs + ! + END_DOC + + implicit none + integer :: i, j, k, l, m, n, p, q + double precision, allocatable :: mo_tmp_1(:,:,:,:), mo_tmp_2(:,:,:,:) + + allocate(mo_tmp_1(mo_num,ao_num,ao_num,ao_num)) + mo_tmp_1 = 0.d0 + + do m = 1, ao_num + do p = 1, ao_num + do n = 1, ao_num + do q = 1, ao_num + do k = 1, mo_num + ! (k n|p m) = sum_q c_qk * (q n|p m) + mo_tmp_1(k,n,p,m) += mo_l_coef_transp(k,q) * ao_two_e_coul(q,n,p,m) + enddo + enddo + enddo + enddo + enddo + + allocate(mo_tmp_2(mo_num,mo_num,ao_num,ao_num)) + mo_tmp_2 = 0.d0 + + do m = 1, ao_num + do p = 1, ao_num + do n = 1, ao_num + do i = 1, mo_num + do k = 1, mo_num + ! (k i|p m) = sum_n c_ni * (k n|p m) + mo_tmp_2(k,i,p,m) += mo_r_coef_transp(i,n) * mo_tmp_1(k,n,p,m) + enddo + enddo + enddo + enddo + enddo + deallocate(mo_tmp_1) + + allocate(mo_tmp_1(mo_num,mo_num,mo_num,ao_num)) + mo_tmp_1 = 0.d0 + do m = 1, ao_num + do p = 1, ao_num + do l = 1, mo_num + do i = 1, mo_num + do k = 1, mo_num + mo_tmp_1(k,i,l,m) += mo_l_coef_transp(l,p) * mo_tmp_2(k,i,p,m) + enddo + enddo + enddo + enddo + enddo + deallocate(mo_tmp_2) + + mo_bi_ortho_coul_e_chemist = 0.d0 + do m = 1, ao_num + do j = 1, mo_num + do l = 1, mo_num + do i = 1, mo_num + do k = 1, mo_num + mo_bi_ortho_coul_e_chemist(k,i,l,j) += mo_r_coef_transp(j,m) * mo_tmp_1(k,i,l,m) + enddo + enddo + enddo + enddo + enddo + deallocate(mo_tmp_1) + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, mo_bi_ortho_coul_e, (mo_num, mo_num, mo_num, mo_num)] + + BEGIN_DOC + ! + ! mo_bi_ortho_coul_e(k,l,i,j) = < k l | 1/r12 | i j > where i,j are right MOs and k,l are left MOs + ! + END_DOC + + implicit none + integer :: i, j, k, l + + do j = 1, mo_num + do i = 1, mo_num + do l = 1, mo_num + do k = 1, mo_num + ! < k l | V12 | i j > (k i|l j) + mo_bi_ortho_coul_e(k,l,i,j) = mo_bi_ortho_coul_e_chemist(k,i,l,j) + enddo + enddo + enddo + enddo + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, mo_bi_ortho_one_e, (mo_num, mo_num)] + + BEGIN_DOC + ! mo_bi_ortho_one_e(k,i) = + END_DOC + + implicit none + + call ao_to_mo_bi_ortho( ao_one_e_integrals, ao_num & + , mo_bi_ortho_one_e , mo_num ) + +END_PROVIDER + +! --- + diff --git a/src/bi_ort_ints/one_e_bi_ort.irp.f b/src/bi_ort_ints/one_e_bi_ort.irp.f index a995a364..5efcb637 100644 --- a/src/bi_ort_ints/one_e_bi_ort.irp.f +++ b/src/bi_ort_ints/one_e_bi_ort.irp.f @@ -24,12 +24,17 @@ BEGIN_PROVIDER [double precision, ao_one_e_integrals_tc_tot, (ao_num,ao_num)] END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, mo_bi_ortho_tc_one_e, (mo_num, mo_num)] - implicit none - BEGIN_DOC -! mo_bi_ortho_tc_one_e(k,i) = - END_DOC - integer :: i,k,p,q + + BEGIN_DOC + ! + ! mo_bi_ortho_tc_one_e(k,i) = + ! + END_DOC + + implicit none call ao_to_mo_bi_ortho(ao_one_e_integrals_tc_tot, ao_num, mo_bi_ortho_tc_one_e, mo_num) diff --git a/src/bi_ort_ints/total_twoe_pot.irp.f b/src/bi_ort_ints/total_twoe_pot.irp.f index b71a85d2..72ded7cf 100644 --- a/src/bi_ort_ints/total_twoe_pot.irp.f +++ b/src/bi_ort_ints/total_twoe_pot.irp.f @@ -1,138 +1,177 @@ -BEGIN_PROVIDER [double precision, ao_two_e_tc_tot, (ao_num, ao_num, ao_num, ao_num) ] - integer :: i,j,k,l - BEGIN_DOC -! ao_two_e_tc_tot(k,i,l,j) = (ki|V^TC(r_12)|lj) = where V^TC(r_12) is the total TC operator -! -! including both hermitian and non hermitian parts. THIS IS IN CHEMIST NOTATION. -! -! WARNING :: non hermitian ! acts on "the right functions" (i,j) - END_DOC - double precision :: integral_sym, integral_nsym, get_ao_tc_sym_two_e_pot - PROVIDE ao_tc_sym_two_e_pot_in_map - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - integral_sym = get_ao_tc_sym_two_e_pot(i,j,k,l,ao_tc_sym_two_e_pot_map) - ! ao_non_hermit_term_chemist(k,i,l,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the AO basis - integral_nsym = ao_non_hermit_term_chemist(k,i,l,j) - ao_two_e_tc_tot(k,i,l,j) = integral_sym + integral_nsym - enddo - enddo - enddo - enddo -END_PROVIDER - - -double precision function bi_ortho_mo_ints(l,k,j,i) - implicit none - BEGIN_DOC -! -! -! WARNING :: very naive, super slow, only used to DEBUG. - END_DOC - integer, intent(in) :: i,j,k,l - integer :: m,n,p,q - bi_ortho_mo_ints = 0.d0 - do m = 1, ao_num - do p = 1, ao_num - do n = 1, ao_num - do q = 1, ao_num - ! p1h1p2h2 l1 l2 r1 r2 - bi_ortho_mo_ints += ao_two_e_tc_tot(n,q,m,p) * mo_l_coef(m,l) * mo_l_coef(n,k) * mo_r_coef(p,j) * mo_r_coef(q,i) - enddo - enddo - enddo - enddo - -end ! --- -BEGIN_PROVIDER [double precision, mo_bi_ortho_tc_two_e_chemist, (mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! mo_bi_ortho_tc_two_e_chemist(k,i,l,j) = where i,j are right MOs and k,l are left MOs - END_DOC - integer :: i,j,k,l,m,n,p,q - double precision, allocatable :: mo_tmp_1(:,:,:,:),mo_tmp_2(:,:,:,:),mo_tmp_3(:,:,:,:) +BEGIN_PROVIDER [double precision, ao_two_e_tc_tot, (ao_num, ao_num, ao_num, ao_num) ] -!! TODO :: transform into DEGEMM - - allocate(mo_tmp_1(mo_num,ao_num,ao_num,ao_num)) - mo_tmp_1 = 0.d0 - do m = 1, ao_num - do p = 1, ao_num - do n = 1, ao_num - do q = 1, ao_num - do k = 1, mo_num - ! (k n|p m) = sum_q c_qk * (q n|p m) - mo_tmp_1(k,n,p,m) += mo_l_coef_transp(k,q) * ao_two_e_tc_tot(q,n,p,m) - enddo + BEGIN_DOC + ! + ! ao_two_e_tc_tot(k,i,l,j) = (ki|V^TC(r_12)|lj) = where V^TC(r_12) is the total TC operator + ! + ! including both hermitian and non hermitian parts. THIS IS IN CHEMIST NOTATION. + ! + ! WARNING :: non hermitian ! acts on "the right functions" (i,j) + ! + END_DOC + + integer :: i, j, k, l + double precision :: integral_sym, integral_nsym + double precision, external :: get_ao_tc_sym_two_e_pot + + PROVIDE ao_tc_sym_two_e_pot_in_map + + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + + integral_sym = get_ao_tc_sym_two_e_pot(i,j,k,l,ao_tc_sym_two_e_pot_map) + + ! ao_non_hermit_term_chemist(k,i,l,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the AO basis + integral_nsym = ao_non_hermit_term_chemist(k,i,l,j) + + ao_two_e_tc_tot(k,i,l,j) = integral_sym + integral_nsym + enddo + enddo enddo - enddo enddo - enddo - allocate(mo_tmp_2(mo_num,mo_num,ao_num,ao_num)) - mo_tmp_2 = 0.d0 - do m = 1, ao_num - do p = 1, ao_num - do n = 1, ao_num - do i = 1, mo_num - do k = 1, mo_num - ! (k i|p m) = sum_n c_ni * (k n|p m) - mo_tmp_2(k,i,p,m) += mo_r_coef_transp(i,n) * mo_tmp_1(k,n,p,m) - enddo - enddo - enddo - enddo - enddo - deallocate(mo_tmp_1) - allocate(mo_tmp_1(mo_num,mo_num,mo_num,ao_num)) - mo_tmp_1 = 0.d0 - do m = 1, ao_num - do p = 1, ao_num - do l = 1, mo_num - do i = 1, mo_num - do k = 1, mo_num - mo_tmp_1(k,i,l,m) += mo_l_coef_transp(l,p) * mo_tmp_2(k,i,p,m) - enddo - enddo - enddo - enddo - enddo - deallocate(mo_tmp_2) - mo_bi_ortho_tc_two_e_chemist = 0.d0 - do m = 1, ao_num - do j = 1, mo_num - do l = 1, mo_num - do i = 1, mo_num - do k = 1, mo_num - mo_bi_ortho_tc_two_e_chemist(k,i,l,j) += mo_r_coef_transp(j,m) * mo_tmp_1(k,i,l,m) - enddo - enddo - enddo - enddo - enddo END_PROVIDER +! --- + +double precision function bi_ortho_mo_ints(l, k, j, i) + + BEGIN_DOC + ! + ! + ! WARNING :: very naive, super slow, only used to DEBUG. + END_DOC + + implicit none + integer, intent(in) :: i, j, k, l + integer :: m, n, p, q + + bi_ortho_mo_ints = 0.d0 + do m = 1, ao_num + do p = 1, ao_num + do n = 1, ao_num + do q = 1, ao_num + ! p1h1p2h2 l1 l2 r1 r2 + bi_ortho_mo_ints += ao_two_e_tc_tot(n,q,m,p) * mo_l_coef(m,l) * mo_l_coef(n,k) * mo_r_coef(p,j) * mo_r_coef(q,i) + enddo + enddo + enddo + enddo + +end function bi_ortho_mo_ints + +! --- + +! TODO :: transform into DEGEMM + +BEGIN_PROVIDER [double precision, mo_bi_ortho_tc_two_e_chemist, (mo_num, mo_num, mo_num, mo_num)] + + BEGIN_DOC + ! + ! mo_bi_ortho_tc_two_e_chemist(k,i,l,j) = where i,j are right MOs and k,l are left MOs + ! + END_DOC + + implicit none + integer :: i, j, k, l, m, n, p, q + double precision, allocatable :: mo_tmp_1(:,:,:,:), mo_tmp_2(:,:,:,:) + + allocate(mo_tmp_1(mo_num,ao_num,ao_num,ao_num)) + mo_tmp_1 = 0.d0 + + do m = 1, ao_num + do p = 1, ao_num + do n = 1, ao_num + do q = 1, ao_num + do k = 1, mo_num + ! (k n|p m) = sum_q c_qk * (q n|p m) + mo_tmp_1(k,n,p,m) += mo_l_coef_transp(k,q) * ao_two_e_tc_tot(q,n,p,m) + enddo + enddo + enddo + enddo + enddo + + allocate(mo_tmp_2(mo_num,mo_num,ao_num,ao_num)) + mo_tmp_2 = 0.d0 + + do m = 1, ao_num + do p = 1, ao_num + do n = 1, ao_num + do i = 1, mo_num + do k = 1, mo_num + ! (k i|p m) = sum_n c_ni * (k n|p m) + mo_tmp_2(k,i,p,m) += mo_r_coef_transp(i,n) * mo_tmp_1(k,n,p,m) + enddo + enddo + enddo + enddo + enddo + deallocate(mo_tmp_1) + + allocate(mo_tmp_1(mo_num,mo_num,mo_num,ao_num)) + mo_tmp_1 = 0.d0 + do m = 1, ao_num + do p = 1, ao_num + do l = 1, mo_num + do i = 1, mo_num + do k = 1, mo_num + mo_tmp_1(k,i,l,m) += mo_l_coef_transp(l,p) * mo_tmp_2(k,i,p,m) + enddo + enddo + enddo + enddo + enddo + deallocate(mo_tmp_2) + + mo_bi_ortho_tc_two_e_chemist = 0.d0 + do m = 1, ao_num + do j = 1, mo_num + do l = 1, mo_num + do i = 1, mo_num + do k = 1, mo_num + mo_bi_ortho_tc_two_e_chemist(k,i,l,j) += mo_r_coef_transp(j,m) * mo_tmp_1(k,i,l,m) + enddo + enddo + enddo + enddo + enddo + deallocate(mo_tmp_1) + +END_PROVIDER + +! --- + BEGIN_PROVIDER [double precision, mo_bi_ortho_tc_two_e, (mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! mo_bi_ortho_tc_two_e(k,l,i,j) = where i,j are right MOs and k,l are left MOs -! -! the potential V(r_12) contains ALL TWO-E CONTRIBUTION OF THE TC-HAMILTONIAN - END_DOC - integer :: i,j,k,l - do j = 1, mo_num - do i = 1, mo_num - do l = 1, mo_num - do k = 1, mo_num - ! (k i|l j) = - mo_bi_ortho_tc_two_e(k,l,i,j) = mo_bi_ortho_tc_two_e_chemist(k,i,l,j) + + BEGIN_DOC + ! + ! mo_bi_ortho_tc_two_e(k,l,i,j) = where i,j are right MOs and k,l are left MOs + ! + ! the potential V(r_12) contains ALL TWO-E CONTRIBUTION OF THE TC-HAMILTONIAN + ! + END_DOC + + implicit none + integer :: i, j, k, l + + do j = 1, mo_num + do i = 1, mo_num + do l = 1, mo_num + do k = 1, mo_num + ! < k l | V12 | i j > (k i|l j) + mo_bi_ortho_tc_two_e(k,l,i,j) = mo_bi_ortho_tc_two_e_chemist(k,i,l,j) + enddo + enddo enddo - enddo enddo - enddo + END_PROVIDER + +! --- + diff --git a/src/tc_bi_ortho/compute_deltamu_right.irp.f b/src/tc_bi_ortho/compute_deltamu_right.irp.f new file mode 100644 index 00000000..32566cc8 --- /dev/null +++ b/src/tc_bi_ortho/compute_deltamu_right.irp.f @@ -0,0 +1,52 @@ +program compute_deltamu_right + + implicit none + + my_grid_becke = .True. + my_n_pt_r_grid = 30 + my_n_pt_a_grid = 50 + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + read_wf = .True. + touch read_wf + + PROVIDE N_int + call delta_right() + +end + +! --- + +subroutine delta_right() + + implicit none + integer :: k + double precision, allocatable :: delta(:,:) + + print *, j1b_type + print *, j1b_pen + print *, mu_erf + + allocate( delta(N_det,N_states) ) + delta = 0.d0 + + do k = 1, N_states + !do k = 1, 1 + + ! get < I_left | H_mu - H | psi_right > + call get_delta_bitc_right(psi_det, psi_r_coef_bi_ortho(:,k), N_det, N_int, delta(:,k)) + + ! order as QMCCHEM + call dset_order(delta(:,k), psi_bilinear_matrix_order, N_det) + + enddo + + call ezfio_set_dmc_dress_dmc_delta_h(delta) + + deallocate(delta) + + return +end subroutine delta_right + +! --- + diff --git a/src/tc_bi_ortho/dressing_vectors_lr.irp.f b/src/tc_bi_ortho/dressing_vectors_lr.irp.f new file mode 100644 index 00000000..e69a970b --- /dev/null +++ b/src/tc_bi_ortho/dressing_vectors_lr.irp.f @@ -0,0 +1,54 @@ + +! --- + +subroutine get_delta_bitc_right(psidet, psicoef, ndet, Nint, delta) + + BEGIN_DOC + ! + ! delta(I) = < I_left | H_TC - H | Psi_right > + ! + END_DOC + + use bitmasks + + implicit none + + integer, intent(in) :: ndet, Nint + double precision, intent(in) :: psicoef(ndet) + integer(bit_kind), intent(in) :: psidet(Nint,2,ndet) + double precision, intent(out) :: delta(ndet) + + integer :: i, j + double precision :: h_mono, h_twoe, h_tot + double precision :: htc_mono, htc_twoe, htc_three, htc_tot + double precision :: delta_mat + + i = 1 + j = 1 + call htilde_mu_mat_bi_ortho(psidet(1,1,i), psidet(1,1,j), Nint, htc_mono, htc_twoe, htc_three, htc_tot) + call hmat_bi_ortho (psidet(1,1,i), psidet(1,1,j), Nint, h_mono , h_twoe , h_tot) + + delta = 0.d0 + !$OMP PARALLEL DO DEFAULT(NONE) SCHEDULE(dynamic,8) & + !$OMP SHARED(delta, ndet, psidet, psicoef, Nint) & + !$OMP PRIVATE(i, j, delta_mat, h_mono, h_twoe, h_tot, & + !$OMP htc_mono, htc_twoe, htc_three, htc_tot) + do i = 1, ndet + do j = 1, ndet + + ! < I | Htilde | J > + call htilde_mu_mat_bi_ortho(psidet(1,1,i), psidet(1,1,j), Nint, htc_mono, htc_twoe, htc_three, htc_tot) + ! < I | H | J > + call hmat_bi_ortho (psidet(1,1,i), psidet(1,1,j), Nint, h_mono , h_twoe , h_tot) + + delta_mat = htc_tot - h_tot + + delta(i) = delta(i) + psicoef(j) * delta_mat + enddo + enddo + !$OMP END PARALLEL DO + +end subroutine get_delta_bitc_right + +! --- + diff --git a/src/tc_bi_ortho/h_biortho.irp.f b/src/tc_bi_ortho/h_biortho.irp.f new file mode 100644 index 00000000..0494399f --- /dev/null +++ b/src/tc_bi_ortho/h_biortho.irp.f @@ -0,0 +1,249 @@ + +! -- + +subroutine hmat_bi_ortho(key_j, key_i, Nint, hmono, htwoe, htot) + + BEGIN_DOC + ! + ! where | key_j > is developed on the LEFT basis and | key_i > is developed on the RIGHT basis + ! + END_DOC + + use bitmasks + + implicit none + + integer, intent(in) :: Nint + integer(bit_kind), intent(in) :: key_i(Nint,2), key_j(Nint,2) + double precision, intent(out) :: hmono, htwoe, htot + + integer :: degree + + hmono = 0.d0 + htwoe = 0.d0 + htot = 0.d0 + + call get_excitation_degree(key_i, key_j, degree, Nint) + if(degree .gt. 2) return + + if(degree == 0) then + + call diag_hmat_bi_ortho(Nint, key_i, hmono, htwoe) + htot = htot + nuclear_repulsion + + else if (degree == 1)then + + call single_hmat_bi_ortho(Nint, key_j, key_i, hmono, htwoe) + + else if(degree == 2)then + + call double_hmat_bi_ortho(Nint, key_j, key_i, hmono, htwoe) + + endif + + htot += hmono + htwoe + + return +end subroutine hmat_bi_ortho + +! --- + +subroutine diag_hmat_bi_ortho(Nint, key_i, hmono, htwoe) + + use bitmasks + + implicit none + + integer, intent(in) :: Nint + integer(bit_kind), intent(in) :: key_i(Nint,2) + double precision, intent(out) :: hmono, htwoe + + integer :: occ(Nint*bit_kind_size,2) + integer :: Ne(2), i, j, ii, jj, ispin, jspin, k, kk + integer(bit_kind) :: key_i_core(Nint,2) + + hmono = 0.d0 + htwoe = 0.d0 + + call bitstring_to_list_ab(key_i, occ, Ne, Nint) + + do ispin = 1, 2 + do i = 1, Ne(ispin) + ii = occ(i,ispin) + hmono += mo_bi_ortho_one_e(ii,ii) + enddo + enddo + + ! alpha/beta two-body + ispin = 1 + jspin = 2 + do i = 1, Ne(ispin) ! electron 1 + ii = occ(i,ispin) + do j = 1, Ne(jspin) ! electron 2 + jj = occ(j,jspin) + htwoe += mo_bi_ortho_coul_e(jj,ii,jj,ii) + enddo + enddo + + ! alpha/alpha two-body + do i = 1, Ne(ispin) + ii = occ(i,ispin) + do j = i+1, Ne(ispin) + jj = occ(j,ispin) + htwoe += mo_bi_ortho_coul_e(ii,jj,ii,jj) - mo_bi_ortho_coul_e(ii,jj,jj,ii) + enddo + enddo + + ! beta/beta two-body + do i = 1, Ne(jspin) + ii = occ(i,jspin) + do j = i+1, Ne(jspin) + jj = occ(j,jspin) + htwoe += mo_bi_ortho_coul_e(ii,jj,ii,jj) - mo_bi_ortho_coul_e(ii,jj,jj,ii) + enddo + enddo + + return +end subroutine diag_hmat_bi_ortho + +! --- + +subroutine single_hmat_bi_ortho(Nint, key_j, key_i, hmono, htwoe) + + BEGIN_DOC + ! + ! < key_j | H | key_i > for single excitation + ! + END_DOC + + use bitmasks + + implicit none + + integer, intent(in) :: Nint + integer(bit_kind), intent(in) :: key_j(Nint,2), key_i(Nint,2) + double precision, intent(out) :: hmono, htwoe + + integer :: occ(Nint*bit_kind_size,2) + integer :: Ne(2), i, j, ii, jj, ispin, jspin, k, kk + integer :: degree,exc(0:2,2,2) + integer :: h1, p1, h2, p2, s1, s2 + integer :: other_spin(2) + integer(bit_kind) :: key_j_core(Nint,2), key_i_core(Nint,2) + double precision :: phase + double precision :: direct_int, exchange_int_12, exchange_int_23, exchange_int_13 + + other_spin(1) = 2 + other_spin(2) = 1 + + hmono = 0.d0 + htwoe = 0.d0 + + call get_excitation_degree(key_i, key_j, degree, Nint) + if(degree .ne. 1) then + return + endif + + call bitstring_to_list_ab(key_i, occ, Ne, Nint) + + call get_single_excitation(key_i, key_j, exc, phase, Nint) + call decode_exc(exc, 1, h1, p1, h2, p2, s1, s2) + + hmono = mo_bi_ortho_one_e(p1,h1) * phase + + ! alpha/beta two-body + ispin = other_spin(s1) + if(s1 == 1) then + + ! single alpha + do i = 1, Ne(ispin) ! electron 2 + ii = occ(i,ispin) + htwoe += mo_bi_ortho_coul_e(ii,p1,ii,h1) + enddo + + else + + ! single beta + do i = 1, Ne(ispin) ! electron 1 + ii = occ(i,ispin) + htwoe += mo_bi_ortho_coul_e(p1,ii,h1,ii) + enddo + + endif + + ! same spin two-body + do i = 1, Ne(s1) + ii = occ(i,s1) + ! ( h1 p1 |ii ii ) - ( h1 ii | p1 ii ) + htwoe += mo_bi_ortho_coul_e(ii,p1,ii,h1) - mo_bi_ortho_coul_e(p1,ii,ii,h1) + enddo + + htwoe *= phase + +end subroutine single_hmat_bi_ortho + +! --- + +subroutine double_hmat_bi_ortho(Nint, key_j, key_i, hmono, htwoe) + + BEGIN_DOC + ! + ! < key_j | H | key_i> for double excitation + ! + END_DOC + + use bitmasks + + implicit none + + integer, intent(in) :: Nint + integer(bit_kind), intent(in) :: key_j(Nint,2), key_i(Nint,2) + double precision, intent(out) :: hmono, htwoe + + integer :: occ(Nint*bit_kind_size,2) + integer :: Ne(2), i, j, ii, jj, ispin, jspin, k, kk + integer :: degree,exc(0:2,2,2) + integer :: h1, p1, h2, p2, s1, s2 + integer :: other_spin(2) + integer(bit_kind) :: key_i_core(Nint,2) + double precision :: phase + + other_spin(1) = 2 + other_spin(2) = 1 + + call get_excitation_degree(key_i, key_j, degree, Nint) + + hmono = 0.d0 + htwoe = 0.d0 + + if(degree .ne. 2) then + return + endif + + call bitstring_to_list_ab(key_i, occ, Ne, Nint) + + call get_double_excitation(key_i, key_j, exc, phase, Nint) + call decode_exc(exc, 2, h1, p1, h2, p2, s1, s2) + + if(s1.ne.s2) then + + htwoe = mo_bi_ortho_coul_e(p2,p1,h2,h1) + + else + + ! same spin two-body + + ! direct terms + htwoe = mo_bi_ortho_coul_e(p2,p1,h2,h1) + ! exchange terms + htwoe -= mo_bi_ortho_coul_e(p1,p2,h2,h1) + + endif + + htwoe *= phase + +end subroutine double_hmat_bi_ortho + +! --- + + diff --git a/src/tc_bi_ortho/slater_tc.irp.f b/src/tc_bi_ortho/slater_tc.irp.f index 45115a40..e0a52741 100644 --- a/src/tc_bi_ortho/slater_tc.irp.f +++ b/src/tc_bi_ortho/slater_tc.irp.f @@ -24,7 +24,7 @@ subroutine htilde_mu_mat_bi_ortho_tot(key_j, key_i, Nint, htot) call htilde_mu_mat_bi_ortho(key_j,key_i, Nint, hmono,htwoe,hthree,htot) endif -end subroutine htilde_mu_mat_tot +end subroutine htilde_mu_mat_bi_ortho_tot ! -- From fccf2fe43841a94481d343f75a97f1cb6f54e425 Mon Sep 17 00:00:00 2001 From: AbdAmmar Date: Fri, 21 Oct 2022 23:27:51 +0200 Subject: [PATCH 10/10] new jast added in QP --- src/ao_many_one_e_ints/ao_erf_gauss.irp.f | 25 +- src/ao_many_one_e_ints/ao_gaus_gauss.irp.f | 26 +- src/ao_many_one_e_ints/grad2_jmu_modif.irp.f | 204 +++--- .../grad_lapl_jmu_modif.irp.f | 138 ++-- .../grad_related_ints.irp.f | 450 +++++++------ src/ao_one_e_ints/pot_ao_erf_ints.irp.f | 29 +- src/ao_tc_eff_map/compute_ints_eff_pot.irp.f | 1 - .../fit_j.irp.f | 0 src/ao_tc_eff_map/two_e_ints_gauss.irp.f | 1 + src/ao_two_e_ints/map_integrals.irp.f | 3 +- src/bi_ort_ints/biorthog_mo_for_h.irp.f | 40 +- src/bi_ort_ints/one_e_bi_ort.irp.f | 2 +- src/bi_ort_ints/semi_num_ints_mo.irp.f | 66 +- src/bi_ort_ints/three_body_ijm.irp.f | 460 +++++++------ src/bi_ort_ints/three_body_ijmk.irp.f | 428 ++++++------ src/bi_ort_ints/three_body_ijmkl.irp.f | 452 +++++++------ src/bi_ort_ints/three_body_ints_bi_ort.irp.f | 105 +-- src/bi_ort_ints/total_twoe_pot.irp.f | 45 +- src/bi_ortho_mos/mos_rl.irp.f | 28 +- src/non_h_ints_mu/debug_fit.irp.f | 512 ++++++++++++++ src/non_h_ints_mu/debug_integ_jmu_modif.irp.f | 632 ++++++------------ src/non_h_ints_mu/grad_squared.irp.f | 238 ++++++- src/non_h_ints_mu/grad_tc_int.irp.f | 290 ++++---- src/non_h_ints_mu/j12_nucl_utils.irp.f | 34 + src/non_h_ints_mu/new_grad_tc.irp.f | 57 +- src/non_h_ints_mu/numerical_integ.irp.f | 341 +++++++++- src/non_h_ints_mu/total_tc_int.irp.f | 60 ++ src/tc_bi_ortho/compute_deltamu_right.irp.f | 1 + src/tc_bi_ortho/dressing_vectors_lr.irp.f | 105 ++- src/tc_bi_ortho/h_biortho.irp.f | 32 +- src/tc_bi_ortho/psi_r_l_prov.irp.f | 18 + src/tc_bi_ortho/slater_tc.irp.f | 80 ++- src/tc_bi_ortho/slater_tc_3e.irp.f | 7 +- 33 files changed, 3076 insertions(+), 1834 deletions(-) rename src/{non_h_ints_mu => ao_tc_eff_map}/fit_j.irp.f (100%) create mode 100644 src/non_h_ints_mu/debug_fit.irp.f create mode 100644 src/non_h_ints_mu/total_tc_int.irp.f diff --git a/src/ao_many_one_e_ints/ao_erf_gauss.irp.f b/src/ao_many_one_e_ints/ao_erf_gauss.irp.f index d9c35a8c..fe25e9c0 100644 --- a/src/ao_many_one_e_ints/ao_erf_gauss.irp.f +++ b/src/ao_many_one_e_ints/ao_erf_gauss.irp.f @@ -298,10 +298,16 @@ subroutine NAI_pol_x_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, mu_in, C_cen double precision, intent(out) :: ints(3) integer :: i, j, power_Ai(3), power_Aj(3), n_pt_in, power_xA(3), m - double precision :: Ai_center(3), Aj_center(3), integral, alphai, alphaj, coef + double precision :: Ai_center(3), Aj_center(3), integral, alphai, alphaj, coef, coefi double precision, external :: NAI_pol_mult_erf_with1s + ASSERT(beta .ge. 0.d0) + if(beta .lt. 1d-10) then + call NAI_pol_x_mult_erf_ao(i_ao, j_ao, mu_in, C_center, ints) + return + endif + ints = 0.d0 if(ao_overlap_abs(j_ao,i_ao) .lt. 1.d-12) then return @@ -316,26 +322,27 @@ subroutine NAI_pol_x_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, mu_in, C_cen n_pt_in = n_pt_max_integrals do i = 1, ao_prim_num(i_ao) - alphai = ao_expo_ordered_transp(i,i_ao) + alphai = ao_expo_ordered_transp (i,i_ao) + coefi = ao_coef_normalized_ordered_transp(i,i_ao) do m = 1, 3 - power_xA = power_Ai ! x * phi_i(r) = x * (x-Ax)**ax = (x-Ax)**(ax+1) + Ax * (x-Ax)**ax + power_xA = power_Ai power_xA(m) += 1 do j = 1, ao_prim_num(j_ao) - alphaj = ao_expo_ordered_transp(j,j_ao) - coef = ao_coef_normalized_ordered_transp(j,j_ao) * ao_coef_normalized_ordered_transp(i,i_ao) + alphaj = ao_expo_ordered_transp (j,j_ao) + coef = coefi * ao_coef_normalized_ordered_transp(j,j_ao) ! First term = (x-Ax)**(ax+1) - integral = NAI_pol_mult_erf_with1s( Ai_center, Aj_center, power_xA, power_Aj, alphai, alphaj & - , beta, b_center, c_center, n_pt_in, mu_in ) + integral = NAI_pol_mult_erf_with1s( Ai_center, Aj_center, power_xA, power_Aj, alphai, alphaj & + , beta, B_center, C_center, n_pt_in, mu_in ) ints(m) += integral * coef ! Second term = Ax * (x-Ax)**(ax) - integral = NAI_pol_mult_erf_with1s( Ai_center, Aj_center, power_Ai, power_Aj, alphai, alphaj & - , beta, b_center, c_center, n_pt_in, mu_in ) + integral = NAI_pol_mult_erf_with1s( Ai_center, Aj_center, power_Ai, power_Aj, alphai, alphaj & + , beta, B_center, C_center, n_pt_in, mu_in ) ints(m) += Ai_center(m) * integral * coef enddo diff --git a/src/ao_many_one_e_ints/ao_gaus_gauss.irp.f b/src/ao_many_one_e_ints/ao_gaus_gauss.irp.f index fadec343..c058d0d8 100644 --- a/src/ao_many_one_e_ints/ao_gaus_gauss.irp.f +++ b/src/ao_many_one_e_ints/ao_gaus_gauss.irp.f @@ -116,7 +116,7 @@ double precision function overlap_gauss_r12_ao(D_center, delta, i, j) double precision, intent(in) :: D_center(3), delta integer :: power_A(3), power_B(3), l, k - double precision :: A_center(3), B_center(3), alpha, beta, coef, analytical_j + double precision :: A_center(3), B_center(3), alpha, beta, coef, coef1, analytical_j double precision, external :: overlap_gauss_r12 @@ -133,10 +133,12 @@ double precision function overlap_gauss_r12_ao(D_center, delta, i, j) B_center(1:3) = nucl_coord(ao_nucl(j),1:3) do l = 1, ao_prim_num(i) - alpha = ao_expo_ordered_transp(l,i) + alpha = ao_expo_ordered_transp (l,i) + coef1 = ao_coef_normalized_ordered_transp(l,i) + do k = 1, ao_prim_num(j) beta = ao_expo_ordered_transp(k,j) - coef = ao_coef_normalized_ordered_transp(l,i) * ao_coef_normalized_ordered_transp(k,j) + coef = coef1 * ao_coef_normalized_ordered_transp(k,j) if(dabs(coef) .lt. 1d-12) cycle @@ -153,7 +155,9 @@ end function overlap_gauss_r12_ao double precision function overlap_gauss_r12_ao_with1s(B_center, beta, D_center, delta, i, j) BEGIN_DOC + ! ! \int dr AO_i(r) AO_j(r) e^{-beta |r-B_center^2|} e^{-delta |r-D_center|^2} + ! END_DOC implicit none @@ -161,7 +165,7 @@ double precision function overlap_gauss_r12_ao_with1s(B_center, beta, D_center, double precision, intent(in) :: B_center(3), beta, D_center(3), delta integer :: power_A1(3), power_A2(3), l, k - double precision :: A1_center(3), A2_center(3), alpha1, alpha2, coef12, analytical_j + double precision :: A1_center(3), A2_center(3), alpha1, alpha2, coef1, coef12, analytical_j double precision :: G_center(3), gama, fact_g, gama_inv double precision, external :: overlap_gauss_r12, overlap_gauss_r12_ao @@ -188,8 +192,8 @@ double precision function overlap_gauss_r12_ao_with1s(B_center, beta, D_center, fact_g = beta * delta * gama_inv * ( (B_center(1) - D_center(1)) * (B_center(1) - D_center(1)) & + (B_center(2) - D_center(2)) * (B_center(2) - D_center(2)) & + (B_center(3) - D_center(3)) * (B_center(3) - D_center(3)) ) - fact_g = dexp(-fact_g) - if(fact_g .lt. 1.d-12) return + if(fact_g .gt. 80d0) return + fact_g = dexp(-fact_g) ! --- @@ -200,11 +204,13 @@ double precision function overlap_gauss_r12_ao_with1s(B_center, beta, D_center, A2_center(1:3) = nucl_coord(ao_nucl(j),1:3) do l = 1, ao_prim_num(i) - alpha1 = ao_expo_ordered_transp(l,i) - do k = 1, ao_prim_num(j) - alpha2 = ao_expo_ordered_transp(k,j) - coef12 = fact_g * ao_coef_normalized_ordered_transp(l,i) * ao_coef_normalized_ordered_transp(k,j) + alpha1 = ao_expo_ordered_transp (l,i) + coef1 = fact_g * ao_coef_normalized_ordered_transp(l,i) + !if(dabs(coef1) .lt. 1d-12) cycle + do k = 1, ao_prim_num(j) + alpha2 = ao_expo_ordered_transp (k,j) + coef12 = coef1 * ao_coef_normalized_ordered_transp(k,j) if(dabs(coef12) .lt. 1d-12) cycle analytical_j = overlap_gauss_r12(G_center, gama, A1_center, A2_center, power_A1, power_A2, alpha1, alpha2) diff --git a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f index 7e08bd97..50b4bf96 100644 --- a/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f +++ b/src/ao_many_one_e_ints/grad2_jmu_modif.irp.f @@ -13,8 +13,8 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2, (ao_num, ao_num, n integer :: i, j, ipoint, i_1s, i_fit double precision :: r(3), int_fit, expo_fit, coef_fit double precision :: coef, beta, B_center(3) + double precision :: tmp double precision :: wall0, wall1 - double precision, allocatable :: tmp(:,:,:) double precision, external :: overlap_gauss_r12_ao_with1s @@ -31,19 +31,17 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2, (ao_num, ao_num, n !$OMP expo_gauss_1_erf_x_2, coef_gauss_1_erf_x_2, & !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & !$OMP List_all_comb_b3_cent, int2_grad1u2_grad2u2_j1b2) - - allocate( tmp(ao_num,ao_num,n_points_final_grid) ) - tmp = 0.d0 - !$OMP DO !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do i = 1, ao_num do j = i, ao_num - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) + tmp = 0.d0 do i_1s = 1, List_all_comb_b3_size coef = List_all_comb_b3_coef (i_1s) @@ -58,29 +56,19 @@ BEGIN_PROVIDER [ double precision, int2_grad1u2_grad2u2_j1b2, (ao_num, ao_num, n coef_fit = coef_gauss_1_erf_x_2(i_fit) int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) - tmp(j,i,ipoint) += -0.25d0 * coef * coef_fit * int_fit + tmp += -0.25d0 * coef * coef_fit * int_fit enddo enddo + + int2_grad1u2_grad2u2_j1b2(j,i,ipoint) = tmp enddo enddo enddo !$OMP END DO - - !$OMP CRITICAL - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = i, ao_num - int2_grad1u2_grad2u2_j1b2(j,i,ipoint) += tmp(j,i,ipoint) - enddo - enddo - enddo - !$OMP END CRITICAL - - deallocate( tmp ) !$OMP END PARALLEL do ipoint = 1, n_points_final_grid - do i = 1, ao_num + do i = 2, ao_num do j = 1, i-1 int2_grad1u2_grad2u2_j1b2(j,i,ipoint) = int2_grad1u2_grad2u2_j1b2(i,j,ipoint) enddo @@ -105,9 +93,8 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b2, (ao_num, ao_num, n_points_final implicit none integer :: i, j, ipoint, i_1s, i_fit double precision :: r(3), int_fit, expo_fit, coef_fit - double precision :: coef, beta, B_center(3) + double precision :: coef, beta, B_center(3), tmp double precision :: wall0, wall1 - double precision, allocatable :: tmp(:,:,:) double precision, external :: overlap_gauss_r12_ao_with1s @@ -124,19 +111,17 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b2, (ao_num, ao_num, n_points_final !$OMP expo_gauss_j_mu_x_2, coef_gauss_j_mu_x_2, & !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & !$OMP List_all_comb_b3_cent, int2_u2_j1b2) - - allocate( tmp(ao_num,ao_num,n_points_final_grid) ) - tmp = 0.d0 - !$OMP DO !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do i = 1, ao_num do j = i, ao_num - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) + tmp = 0.d0 do i_1s = 1, List_all_comb_b3_size coef = List_all_comb_b3_coef (i_1s) @@ -151,29 +136,19 @@ BEGIN_PROVIDER [ double precision, int2_u2_j1b2, (ao_num, ao_num, n_points_final coef_fit = coef_gauss_j_mu_x_2(i_fit) int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) - tmp(j,i,ipoint) += coef * coef_fit * int_fit + tmp += coef * coef_fit * int_fit enddo enddo + + int2_u2_j1b2(j,i,ipoint) = tmp enddo enddo enddo !$OMP END DO - - !$OMP CRITICAL - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = i, ao_num - int2_u2_j1b2(j,i,ipoint) += tmp(j,i,ipoint) - enddo - enddo - enddo - !$OMP END CRITICAL - - deallocate( tmp ) !$OMP END PARALLEL do ipoint = 1, n_points_final_grid - do i = 1, ao_num + do i = 2, ao_num do j = 1, i-1 int2_u2_j1b2(j,i,ipoint) = int2_u2_j1b2(i,j,ipoint) enddo @@ -187,7 +162,7 @@ END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b, (3, ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b2, (3, ao_num, ao_num, n_points_final_grid)] BEGIN_DOC ! @@ -196,39 +171,40 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b, (3, ao_num, ao_num, n_po END_DOC implicit none - integer :: i, j, ipoint, i_1s, i_fit - double precision :: r(3), int_fit(3), expo_fit, coef_fit - double precision :: coef, beta, B_center(3) - double precision :: alpha_1s, alpha_1s_inv, centr_1s(3), expo_coef_1s, coeff_1s - double precision :: wall0, wall1 - double precision, allocatable :: tmp(:,:,:,:) + integer :: i, j, ipoint, i_1s, i_fit + double precision :: r(3), int_fit(3), expo_fit, coef_fit + double precision :: coef, beta, B_center(3), dist + double precision :: alpha_1s, alpha_1s_inv, centr_1s(3), expo_coef_1s, coef_tmp + double precision :: tmp_x, tmp_y, tmp_z + double precision :: wall0, wall1 provide mu_erf final_grid_points j1b_pen call wall_time(wall0) - int2_u_grad1u_x_j1b = 0.d0 + int2_u_grad1u_x_j1b2 = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & - !$OMP coef_fit, expo_fit, int_fit, tmp, alpha_1s, & - !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coeff_1s) & + !$OMP coef_fit, expo_fit, int_fit, alpha_1s, dist, & + !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coef_tmp, & + !$OMP tmp_x, tmp_y, tmp_z) & !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & !$OMP final_grid_points, n_max_fit_slat, & !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & - !$OMP List_all_comb_b3_cent, int2_u_grad1u_x_j1b) - - allocate( tmp(3,ao_num,ao_num,n_points_final_grid) ) - tmp = 0.d0 - + !$OMP List_all_comb_b3_cent, int2_u_grad1u_x_j1b2) !$OMP DO do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do i = 1, ao_num do j = i, ao_num - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) + tmp_x = 0.d0 + tmp_y = 0.d0 + tmp_z = 0.d0 do i_1s = 1, List_all_comb_b3_size coef = List_all_comb_b3_coef (i_1s) @@ -236,6 +212,9 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b, (3, ao_num, ao_num, n_po B_center(1) = List_all_comb_b3_cent(1,i_1s) B_center(2) = List_all_comb_b3_cent(2,i_1s) B_center(3) = List_all_comb_b3_cent(3,i_1s) + dist = (B_center(1) - r(1)) * (B_center(1) - r(1)) & + + (B_center(2) - r(2)) * (B_center(2) - r(2)) & + + (B_center(3) - r(3)) * (B_center(3) - r(3)) do i_fit = 1, n_max_fit_slat @@ -244,56 +223,45 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_x_j1b, (3, ao_num, ao_num, n_po alpha_1s = beta + expo_fit alpha_1s_inv = 1.d0 / alpha_1s + centr_1s(1) = alpha_1s_inv * (beta * B_center(1) + expo_fit * r(1)) centr_1s(2) = alpha_1s_inv * (beta * B_center(2) + expo_fit * r(2)) centr_1s(3) = alpha_1s_inv * (beta * B_center(3) + expo_fit * r(3)) - expo_coef_1s = -beta * expo_fit * alpha_1s_inv & - * ( (B_center(1) - r(1)) * (B_center(1) - r(1)) & - + (B_center(2) - r(2)) * (B_center(2) - r(2)) & - + (B_center(3) - r(3)) * (B_center(3) - r(3)) ) - if(expo_coef_1s .gt. 80.d0) cycle - coeff_1s = dexp(-expo_coef_1s) + + expo_coef_1s = beta * expo_fit * alpha_1s_inv * dist + !if(expo_coef_1s .gt. 80.d0) cycle + coef_tmp = coef * coef_fit * dexp(-expo_coef_1s) + !if(dabs(coef_tmp) .lt. 1d-10) cycle - call NAI_pol_x_mult_erf_ao_with1s(i, j, alpha_1s, centr_1s, 1.d+9, r, int_fit) + call NAI_pol_x_mult_erf_ao_with1s(i, j, alpha_1s, centr_1s, 1.d+9, r, int_fit) - - tmp(1,j,i,ipoint) += coef * coef_fit * coeff_1s * int_fit(1) - tmp(2,j,i,ipoint) += coef * coef_fit * coeff_1s * int_fit(2) - tmp(3,j,i,ipoint) += coef * coef_fit * coeff_1s * int_fit(3) + tmp_x += coef_tmp * int_fit(1) + tmp_y += coef_tmp * int_fit(2) + tmp_z += coef_tmp * int_fit(3) enddo enddo + + int2_u_grad1u_x_j1b2(1,j,i,ipoint) = tmp_x + int2_u_grad1u_x_j1b2(2,j,i,ipoint) = tmp_y + int2_u_grad1u_x_j1b2(3,j,i,ipoint) = tmp_z enddo enddo enddo !$OMP END DO - - !$OMP CRITICAL - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = i, ao_num - int2_u_grad1u_x_j1b(1,j,i,ipoint) += tmp(1,j,i,ipoint) - int2_u_grad1u_x_j1b(2,j,i,ipoint) += tmp(2,j,i,ipoint) - int2_u_grad1u_x_j1b(3,j,i,ipoint) += tmp(3,j,i,ipoint) - enddo - enddo - enddo - !$OMP END CRITICAL - - deallocate( tmp ) !$OMP END PARALLEL do ipoint = 1, n_points_final_grid - do i = 1, ao_num + do i = 2, ao_num do j = 1, i-1 - int2_u_grad1u_x_j1b(1,j,i,ipoint) = int2_u_grad1u_x_j1b(1,i,j,ipoint) - int2_u_grad1u_x_j1b(2,j,i,ipoint) = int2_u_grad1u_x_j1b(2,i,j,ipoint) - int2_u_grad1u_x_j1b(3,j,i,ipoint) = int2_u_grad1u_x_j1b(3,i,j,ipoint) + int2_u_grad1u_x_j1b2(1,j,i,ipoint) = int2_u_grad1u_x_j1b2(1,i,j,ipoint) + int2_u_grad1u_x_j1b2(2,j,i,ipoint) = int2_u_grad1u_x_j1b2(2,i,j,ipoint) + int2_u_grad1u_x_j1b2(3,j,i,ipoint) = int2_u_grad1u_x_j1b2(3,i,j,ipoint) enddo enddo enddo call wall_time(wall1) - print*, ' wall time for int2_u_grad1u_x_j1b', wall1 - wall0 + print*, ' wall time for int2_u_grad1u_x_j1b2', wall1 - wall0 END_PROVIDER @@ -309,11 +277,10 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points implicit none integer :: i, j, ipoint, i_1s, i_fit - double precision :: r(3), int_fit, expo_fit, coef_fit - double precision :: coef, beta, B_center(3) - double precision :: alpha_1s, alpha_1s_inv, centr_1s(3), expo_coef_1s, coeff_1s + double precision :: r(3), int_fit, expo_fit, coef_fit, coef_tmp + double precision :: coef, beta, B_center(3), dist + double precision :: alpha_1s, alpha_1s_inv, centr_1s(3), expo_coef_1s, tmp double precision :: wall0, wall1 - double precision, allocatable :: tmp(:,:,:) double precision, external :: NAI_pol_mult_erf_ao_with1s provide mu_erf final_grid_points j1b_pen @@ -323,17 +290,13 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (ipoint, i, j, i_1s, i_fit, r, coef, beta, B_center, & - !$OMP coef_fit, expo_fit, int_fit, tmp, alpha_1s, & - !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coeff_1s) & + !$OMP coef_fit, expo_fit, int_fit, tmp, alpha_1s, dist, & + !$OMP alpha_1s_inv, centr_1s, expo_coef_1s, coef_tmp) & !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b3_size, & !$OMP final_grid_points, n_max_fit_slat, & !$OMP expo_gauss_j_mu_1_erf, coef_gauss_j_mu_1_erf, & !$OMP List_all_comb_b3_coef, List_all_comb_b3_expo, & !$OMP List_all_comb_b3_cent, int2_u_grad1u_j1b2) - - allocate( tmp(ao_num,ao_num,n_points_final_grid) ) - tmp = 0.d0 - !$OMP DO do ipoint = 1, n_points_final_grid do i = 1, ao_num @@ -342,6 +305,7 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) + tmp = 0.d0 do i_1s = 1, List_all_comb_b3_size coef = List_all_comb_b3_coef (i_1s) @@ -349,6 +313,9 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points B_center(1) = List_all_comb_b3_cent(1,i_1s) B_center(2) = List_all_comb_b3_cent(2,i_1s) B_center(3) = List_all_comb_b3_cent(3,i_1s) + dist = (B_center(1) - r(1)) * (B_center(1) - r(1)) & + + (B_center(2) - r(2)) * (B_center(2) - r(2)) & + + (B_center(3) - r(3)) * (B_center(3) - r(3)) do i_fit = 1, n_max_fit_slat @@ -360,39 +327,27 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points centr_1s(1) = alpha_1s_inv * (beta * B_center(1) + expo_fit * r(1)) centr_1s(2) = alpha_1s_inv * (beta * B_center(2) + expo_fit * r(2)) centr_1s(3) = alpha_1s_inv * (beta * B_center(3) + expo_fit * r(3)) - expo_coef_1s = -beta * expo_fit * alpha_1s_inv & - * ( (B_center(1) - r(1)) * (B_center(1) - r(1)) & - + (B_center(2) - r(2)) * (B_center(2) - r(2)) & - + (B_center(3) - r(3)) * (B_center(3) - r(3)) ) - if(expo_coef_1s .gt. 80.d0) cycle - coeff_1s = dexp(-expo_coef_1s) + + expo_coef_1s = beta * expo_fit * alpha_1s_inv * dist + !if(expo_coef_1s .gt. 80.d0) cycle + coef_tmp = coef * coef_fit * dexp(-expo_coef_1s) + !if(dabs(coef_tmp) .lt. 1d-10) cycle int_fit = NAI_pol_mult_erf_ao_with1s(i, j, alpha_1s, centr_1s, 1.d+9, r) - - tmp(j,i,ipoint) += coef * coef_fit * coeff_1s * int_fit + tmp += coef_tmp * int_fit enddo enddo + + int2_u_grad1u_j1b2(j,i,ipoint) = tmp enddo enddo enddo !$OMP END DO - - !$OMP CRITICAL - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = i, ao_num - int2_u_grad1u_j1b2(j,i,ipoint) += tmp(j,i,ipoint) - enddo - enddo - enddo - !$OMP END CRITICAL - - deallocate( tmp ) !$OMP END PARALLEL do ipoint = 1, n_points_final_grid - do i = 1, ao_num + do i = 2, ao_num do j = 1, i-1 int2_u_grad1u_j1b2(j,i,ipoint) = int2_u_grad1u_j1b2(i,j,ipoint) enddo @@ -405,3 +360,4 @@ BEGIN_PROVIDER [ double precision, int2_u_grad1u_j1b2, (ao_num, ao_num, n_points END_PROVIDER ! --- + diff --git a/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f index fab50805..552e7069 100644 --- a/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f +++ b/src/ao_many_one_e_ints/grad_lapl_jmu_modif.irp.f @@ -10,13 +10,12 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po END_DOC implicit none - integer :: i, j, ipoint, i_1s - double precision :: r(3), int_mu, int_coulomb - double precision :: coef, beta, B_center(3) - double precision :: wall0, wall1 - double precision, allocatable :: tmp(:,:,:) - - double precision, external :: NAI_pol_mult_erf_ao_with1s + integer :: i, j, ipoint, i_1s + double precision :: r(3), int_mu, int_coulomb + double precision :: coef, beta, B_center(3) + double precision :: tmp + double precision :: wall0, wall1 + double precision, external :: NAI_pol_mult_erf_ao_with1s provide mu_erf final_grid_points j1b_pen call wall_time(wall0) @@ -28,19 +27,17 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, final_grid_points, & !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, List_all_comb_b2_cent, & !$OMP v_ij_erf_rk_cst_mu_j1b, mu_erf) - - allocate( tmp(ao_num,ao_num,n_points_final_grid) ) - tmp = 0.d0 - !$OMP DO !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do i = 1, ao_num do j = i, ao_num - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) + tmp = 0.d0 do i_1s = 1, List_all_comb_b2_size coef = List_all_comb_b2_coef (i_1s) @@ -52,28 +49,18 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_j1b, (ao_num, ao_num, n_po int_mu = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r) int_coulomb = NAI_pol_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r) - tmp(j,i,ipoint) += coef * (int_mu - int_coulomb) + tmp += coef * (int_mu - int_coulomb) enddo + + v_ij_erf_rk_cst_mu_j1b(j,i,ipoint) = tmp enddo enddo enddo !$OMP END DO - - !$OMP CRITICAL - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = i, ao_num - v_ij_erf_rk_cst_mu_j1b(j,i,ipoint) += tmp(j,i,ipoint) - enddo - enddo - enddo - !$OMP END CRITICAL - - deallocate( tmp ) !$OMP END PARALLEL do ipoint = 1, n_points_final_grid - do i = 1, ao_num + do i = 2, ao_num do j = 1, i-1 v_ij_erf_rk_cst_mu_j1b(j,i,ipoint) = v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) enddo @@ -123,33 +110,34 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_ END_DOC implicit none - integer :: i, j, ipoint, i_1s - double precision :: coef, beta, B_center(3), r(3), ints(3), ints_coulomb(3) - double precision :: wall0, wall1 - double precision, allocatable :: tmp(:,:,:,:) + integer :: i, j, ipoint, i_1s + double precision :: coef, beta, B_center(3), r(3), ints(3), ints_coulomb(3) + double precision :: tmp_x, tmp_y, tmp_z + double precision :: wall0, wall1 call wall_time(wall0) x_v_ij_erf_rk_cst_mu_tmp_j1b = 0.d0 !$OMP PARALLEL DEFAULT (NONE) & - !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, ints, ints_coulomb, tmp) & + !$OMP PRIVATE (ipoint, i, j, i_1s, r, coef, beta, B_center, ints, ints_coulomb, & + !$OMP tmp_x, tmp_y, tmp_z) & !$OMP SHARED (n_points_final_grid, ao_num, List_all_comb_b2_size, final_grid_points,& !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, List_all_comb_b2_cent, & !$OMP x_v_ij_erf_rk_cst_mu_tmp_j1b, mu_erf) - - allocate( tmp(3,ao_num,ao_num,n_points_final_grid) ) - tmp = 0.d0 - !$OMP DO !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do i = 1, ao_num do j = i, ao_num - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) + tmp_x = 0.d0 + tmp_y = 0.d0 + tmp_z = 0.d0 do i_1s = 1, List_all_comb_b2_size coef = List_all_comb_b2_coef (i_1s) @@ -161,32 +149,22 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp_j1b, (3, ao_num, ao_ call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, mu_erf, r, ints ) call NAI_pol_x_mult_erf_ao_with1s(i, j, beta, B_center, 1.d+9, r, ints_coulomb) - tmp(1,j,i,ipoint) += coef * (ints(1) - ints_coulomb(1)) - tmp(2,j,i,ipoint) += coef * (ints(2) - ints_coulomb(2)) - tmp(3,j,i,ipoint) += coef * (ints(3) - ints_coulomb(3)) + tmp_x += coef * (ints(1) - ints_coulomb(1)) + tmp_y += coef * (ints(2) - ints_coulomb(2)) + tmp_z += coef * (ints(3) - ints_coulomb(3)) enddo + + x_v_ij_erf_rk_cst_mu_tmp_j1b(1,j,i,ipoint) = tmp_x + x_v_ij_erf_rk_cst_mu_tmp_j1b(2,j,i,ipoint) = tmp_y + x_v_ij_erf_rk_cst_mu_tmp_j1b(3,j,i,ipoint) = tmp_z enddo enddo enddo !$OMP END DO - - !$OMP CRITICAL - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = i, ao_num - x_v_ij_erf_rk_cst_mu_tmp_j1b(1,j,i,ipoint) += tmp(1,j,i,ipoint) - x_v_ij_erf_rk_cst_mu_tmp_j1b(2,j,i,ipoint) += tmp(2,j,i,ipoint) - x_v_ij_erf_rk_cst_mu_tmp_j1b(3,j,i,ipoint) += tmp(3,j,i,ipoint) - enddo - enddo - enddo - !$OMP END CRITICAL - - deallocate( tmp ) !$OMP END PARALLEL do ipoint = 1, n_points_final_grid - do i = 1, ao_num + do i = 2, ao_num do j = 1, i-1 x_v_ij_erf_rk_cst_mu_tmp_j1b(1,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp_j1b(1,i,j,ipoint) x_v_ij_erf_rk_cst_mu_tmp_j1b(2,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp_j1b(2,i,j,ipoint) @@ -202,6 +180,7 @@ END_PROVIDER ! --- +! TODO analytically BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_final_grid)] BEGIN_DOC @@ -211,13 +190,13 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_ END_DOC implicit none - integer :: i, j, ipoint, i_1s, i_fit - double precision :: r(3), int_fit, expo_fit, coef_fit - double precision :: coef, beta, B_center(3) - double precision :: wall0, wall1 - double precision, allocatable :: tmp(:,:,:) + integer :: i, j, ipoint, i_1s, i_fit + double precision :: r(3), int_fit, expo_fit, coef_fit + double precision :: coef, beta, B_center(3) + double precision :: tmp + double precision :: wall0, wall1 - double precision, external :: overlap_gauss_r12_ao_with1s + double precision, external :: overlap_gauss_r12_ao_with1s provide mu_erf final_grid_points j1b_pen call wall_time(wall0) @@ -232,19 +211,17 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_ !$OMP expo_gauss_j_mu_x, coef_gauss_j_mu_x, & !$OMP List_all_comb_b2_coef, List_all_comb_b2_expo, & !$OMP List_all_comb_b2_cent, v_ij_u_cst_mu_j1b) - - allocate( tmp(ao_num,ao_num,n_points_final_grid) ) - tmp = 0.d0 - !$OMP DO !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do i = 1, ao_num do j = i, ao_num - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) + tmp = 0.d0 do i_1s = 1, List_all_comb_b2_size coef = List_all_comb_b2_coef (i_1s) @@ -259,29 +236,19 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_ coef_fit = coef_gauss_j_mu_x(i_fit) int_fit = overlap_gauss_r12_ao_with1s(B_center, beta, r, expo_fit, i, j) - tmp(j,i,ipoint) += coef * coef_fit * int_fit + tmp += coef * coef_fit * int_fit enddo enddo + + v_ij_u_cst_mu_j1b(j,i,ipoint) = tmp enddo enddo enddo !$OMP END DO - - !$OMP CRITICAL - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = i, ao_num - v_ij_u_cst_mu_j1b(j,i,ipoint) += tmp(j,i,ipoint) - enddo - enddo - enddo - !$OMP END CRITICAL - - deallocate( tmp ) !$OMP END PARALLEL do ipoint = 1, n_points_final_grid - do i = 1, ao_num + do i = 2, ao_num do j = 1, i-1 v_ij_u_cst_mu_j1b(j,i,ipoint) = v_ij_u_cst_mu_j1b(i,j,ipoint) enddo @@ -294,3 +261,4 @@ BEGIN_PROVIDER [ double precision, v_ij_u_cst_mu_j1b, (ao_num, ao_num, n_points_ END_PROVIDER ! --- + diff --git a/src/ao_many_one_e_ints/grad_related_ints.irp.f b/src/ao_many_one_e_ints/grad_related_ints.irp.f index 7b183d83..67fb0fe7 100644 --- a/src/ao_many_one_e_ints/grad_related_ints.irp.f +++ b/src/ao_many_one_e_ints/grad_related_ints.irp.f @@ -28,11 +28,12 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu, (ao_num, ao_num, n_points !$OMP SHARED (ao_num, n_points_final_grid, v_ij_erf_rk_cst_mu, final_grid_points, mu_erf) !$OMP DO SCHEDULE (dynamic) do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do i = 1, ao_num do j = i, ao_num - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) int_mu = NAI_pol_mult_erf_ao(i, j, mu_erf, r) int_coulomb = NAI_pol_mult_erf_ao(i, j, 1.d+9, r) @@ -45,7 +46,7 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu, (ao_num, ao_num, n_points !$OMP END PARALLEL do ipoint = 1, n_points_final_grid - do i = 1, ao_num + do i = 2, ao_num do j = 1, i-1 v_ij_erf_rk_cst_mu(j,i,ipoint) = v_ij_erf_rk_cst_mu(i,j,ipoint) enddo @@ -53,54 +54,61 @@ BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu, (ao_num, ao_num, n_points enddo call wall_time(wall1) - print*, 'wall time for v_ij_erf_rk_cst_mu ', wall1 - wall0 + print*, ' wall time for v_ij_erf_rk_cst_mu ', wall1 - wall0 END_PROVIDER ! --- BEGIN_PROVIDER [ double precision, v_ij_erf_rk_cst_mu_transp, (n_points_final_grid, ao_num, ao_num)] - implicit none - BEGIN_DOC -! int dr phi_i(r) phi_j(r) (erf(mu(R) |r - R| - 1)/|r - R| - END_DOC - integer :: i,j,ipoint - double precision :: mu,r(3),NAI_pol_mult_erf_ao - double precision :: int_mu, int_coulomb - provide mu_erf final_grid_points - double precision :: wall0, wall1 - call wall_time(wall0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,ipoint,mu,r,int_mu,int_coulomb) & + + BEGIN_DOC + ! int dr phi_i(r) phi_j(r) (erf(mu(R) |r - R| - 1)/|r - R| + END_DOC + + implicit none + integer :: i, j, ipoint + double precision :: r(3) + double precision :: int_mu, int_coulomb + double precision :: wall0, wall1 + double precision :: NAI_pol_mult_erf_ao + + provide mu_erf final_grid_points + call wall_time(wall0) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,ipoint,r,int_mu,int_coulomb) & !$OMP SHARED (ao_num,n_points_final_grid,v_ij_erf_rk_cst_mu_transp,final_grid_points,mu_erf) !$OMP DO SCHEDULE (dynamic) - do i = 1, ao_num - do j = i, ao_num - do ipoint = 1, n_points_final_grid - mu = mu_erf - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - int_mu = NAI_pol_mult_erf_ao(i,j,mu,r) - int_coulomb = NAI_pol_mult_erf_ao(i,j,1.d+9,r) - v_ij_erf_rk_cst_mu_transp(ipoint,j,i)= (int_mu - int_coulomb ) - enddo + do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + do i = 1, ao_num + do j = i, ao_num + int_mu = NAI_pol_mult_erf_ao(i, j, mu_erf, r) + int_coulomb = NAI_pol_mult_erf_ao(i, j, 1.d+9, r) + + v_ij_erf_rk_cst_mu_transp(ipoint,j,i) = int_mu - int_coulomb + enddo + enddo enddo - enddo !$OMP END DO !$OMP END PARALLEL - do i = 1, ao_num - do j = 1, i-1 - do ipoint = 1, n_points_final_grid - v_ij_erf_rk_cst_mu_transp(ipoint,j,i)= v_ij_erf_rk_cst_mu_transp(ipoint,i,j) + do i = 2, ao_num + do j = 1, i-1 + do ipoint = 1, n_points_final_grid + v_ij_erf_rk_cst_mu_transp(ipoint,j,i) = v_ij_erf_rk_cst_mu_transp(ipoint,i,j) + enddo enddo - enddo enddo - call wall_time(wall1) - print*,'wall time for v_ij_erf_rk_cst_mu_transp ',wall1 - wall0 + call wall_time(wall1) + print *, ' wall time for v_ij_erf_rk_cst_mu_transp ', wall1 - wall0 + END_PROVIDER ! --- @@ -112,30 +120,31 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp, (3, ao_num, ao_num, END_DOC implicit none - integer :: i, j, ipoint, m + integer :: i, j, ipoint double precision :: r(3), ints(3), ints_coulomb(3) double precision :: wall0, wall1 call wall_time(wall0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,ipoint,r,ints,m,ints_coulomb) & + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,ipoint,r,ints,ints_coulomb) & !$OMP SHARED (ao_num,n_points_final_grid,x_v_ij_erf_rk_cst_mu_tmp,final_grid_points,mu_erf) !$OMP DO SCHEDULE (dynamic) do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do i = 1, ao_num do j = i, ao_num - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) call NAI_pol_x_mult_erf_ao(i, j, mu_erf, r, ints ) call NAI_pol_x_mult_erf_ao(i, j, 1.d+9 , r, ints_coulomb) - do m = 1, 3 - x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) = (ints(m) - ints_coulomb(m)) - enddo + x_v_ij_erf_rk_cst_mu_tmp(1,j,i,ipoint) = ints(1) - ints_coulomb(1) + x_v_ij_erf_rk_cst_mu_tmp(2,j,i,ipoint) = ints(2) - ints_coulomb(2) + x_v_ij_erf_rk_cst_mu_tmp(3,j,i,ipoint) = ints(3) - ints_coulomb(3) enddo enddo enddo @@ -143,11 +152,11 @@ BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_tmp, (3, ao_num, ao_num, !$OMP END PARALLEL do ipoint = 1, n_points_final_grid - do i = 1, ao_num + do i = 2, ao_num do j = 1, i-1 - do m = 1, 3 - x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(m,i,j,ipoint) - enddo + x_v_ij_erf_rk_cst_mu_tmp(1,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(1,i,j,ipoint) + x_v_ij_erf_rk_cst_mu_tmp(2,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(2,i,j,ipoint) + x_v_ij_erf_rk_cst_mu_tmp(3,j,i,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(3,i,j,ipoint) enddo enddo enddo @@ -160,208 +169,249 @@ END_PROVIDER ! --- BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu, (ao_num, ao_num,n_points_final_grid,3)] - implicit none - BEGIN_DOC -! int dr x * phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/|r - R| - END_DOC - integer :: i,j,ipoint,m - double precision :: mu,r(3),ints,ints_coulomb - double precision :: wall0, wall1 - call wall_time(wall0) - do ipoint = 1, n_points_final_grid - do i = 1, ao_num - do j = 1, ao_num - do m = 1, 3 - x_v_ij_erf_rk_cst_mu(j,i,ipoint,m)= x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) - enddo - enddo - enddo - enddo - call wall_time(wall1) - print*,'wall time for x_v_ij_erf_rk_cst_mu',wall1 - wall0 + BEGIN_DOC + ! int dr x * phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/|r - R| + END_DOC + + implicit none + integer :: i, j, ipoint + double precision :: wall0, wall1 + + call wall_time(wall0) + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, ao_num + x_v_ij_erf_rk_cst_mu(j,i,ipoint,1) = x_v_ij_erf_rk_cst_mu_tmp(1,j,i,ipoint) + x_v_ij_erf_rk_cst_mu(j,i,ipoint,2) = x_v_ij_erf_rk_cst_mu_tmp(2,j,i,ipoint) + x_v_ij_erf_rk_cst_mu(j,i,ipoint,3) = x_v_ij_erf_rk_cst_mu_tmp(3,j,i,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print *, ' wall time for x_v_ij_erf_rk_cst_mu', wall1 - wall0 END_PROVIDER +! --- BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_transp, (ao_num, ao_num,3,n_points_final_grid)] - implicit none - BEGIN_DOC -! int dr x * phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/|r - R| - END_DOC - integer :: i,j,ipoint,m - double precision :: mu,r(3),ints,ints_coulomb - double precision :: wall0, wall1 - call wall_time(wall0) - do ipoint = 1, n_points_final_grid - do m = 1, 3 - do i = 1, ao_num - do j = 1, ao_num - x_v_ij_erf_rk_cst_mu_transp(j,i,m,ipoint)= x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) - enddo - enddo - enddo - enddo - call wall_time(wall1) - print*,'wall time for x_v_ij_erf_rk_cst_mu_transp',wall1 - wall0 + BEGIN_DOC + ! int dr x * phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/|r - R| + END_DOC + + implicit none + integer :: i, j, ipoint + double precision :: wall0, wall1 + + call wall_time(wall0) + + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, ao_num + x_v_ij_erf_rk_cst_mu_transp(j,i,1,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(1,j,i,ipoint) + x_v_ij_erf_rk_cst_mu_transp(j,i,2,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(2,j,i,ipoint) + x_v_ij_erf_rk_cst_mu_transp(j,i,3,ipoint) = x_v_ij_erf_rk_cst_mu_tmp(3,j,i,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print *, ' wall time for x_v_ij_erf_rk_cst_mu_transp', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, x_v_ij_erf_rk_cst_mu_transp_bis, (n_points_final_grid,ao_num, ao_num,3)] - implicit none - BEGIN_DOC -! int dr x * phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/|r - R| - END_DOC - integer :: i,j,ipoint,m - double precision :: mu,r(3),ints,ints_coulomb - double precision :: wall0, wall1 - call wall_time(wall0) - do m = 1, 3 - do i = 1, ao_num - do j = 1, ao_num - do ipoint = 1, n_points_final_grid - x_v_ij_erf_rk_cst_mu_transp_bis(ipoint,j,i,m)= x_v_ij_erf_rk_cst_mu_tmp(m,j,i,ipoint) - enddo - enddo - enddo - enddo - call wall_time(wall1) - print*,'wall time for x_v_ij_erf_rk_cst_mu_transp',wall1 - wall0 + BEGIN_DOC + ! int dr x * phi_i(r) phi_j(r) (erf(mu(R) |r - R|) - 1)/|r - R| + END_DOC + + implicit none + integer :: i, j, ipoint + double precision :: wall0, wall1 + + call wall_time(wall0) + + do i = 1, ao_num + do j = 1, ao_num + do ipoint = 1, n_points_final_grid + x_v_ij_erf_rk_cst_mu_transp_bis(ipoint,j,i,1) = x_v_ij_erf_rk_cst_mu_tmp(1,j,i,ipoint) + x_v_ij_erf_rk_cst_mu_transp_bis(ipoint,j,i,2) = x_v_ij_erf_rk_cst_mu_tmp(2,j,i,ipoint) + x_v_ij_erf_rk_cst_mu_transp_bis(ipoint,j,i,3) = x_v_ij_erf_rk_cst_mu_tmp(3,j,i,ipoint) + enddo + enddo + enddo + + call wall_time(wall1) + print *, ' wall time for x_v_ij_erf_rk_cst_mu_transp_bis', wall1 - wall0 END_PROVIDER +! --- + +BEGIN_PROVIDER [ double precision, d_dx_v_ij_erf_rk_cst_mu_tmp, (3, n_points_final_grid, ao_num, ao_num)] + + BEGIN_DOC + ! d_dx_v_ij_erf_rk_cst_mu_tmp(m,R,j,i) = int dr phi_j(r)) (erf(mu(R) |r - R|) - 1)/|r - R| d/dx (phi_i(r) + ! + ! with m == 1 -> d/dx , m == 2 -> d/dy , m == 3 -> d/dz + END_DOC -BEGIN_PROVIDER [ double precision, d_dx_v_ij_erf_rk_cst_mu_tmp, (3,n_points_final_grid,ao_num, ao_num)] implicit none - BEGIN_DOC -! d_dx_v_ij_erf_rk_cst_mu_tmp(m,R,j,i) = int dr phi_j(r)) (erf(mu(R) |r - R|) - 1)/|r - R| d/dx (phi_i(r) -! -! with m == 1 -> d/dx , m == 2 -> d/dy , m == 3 -> d/dz - END_DOC - integer :: i,j,ipoint,m - double precision :: mu,r(3),ints(3),ints_coulomb(3) + integer :: i, j, ipoint + double precision :: r(3), ints(3), ints_coulomb(3) double precision :: wall0, wall1 + call wall_time(wall0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,ipoint,mu,r,ints,m,ints_coulomb) & + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,ipoint,r,ints,ints_coulomb) & !$OMP SHARED (ao_num,n_points_final_grid,d_dx_v_ij_erf_rk_cst_mu_tmp,final_grid_points,mu_erf) !$OMP DO SCHEDULE (dynamic) - do i = 1, ao_num - do j = 1, ao_num - do ipoint = 1, n_points_final_grid - mu = mu_erf + do ipoint = 1, n_points_final_grid r(1) = final_grid_points(1,ipoint) r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) - call phi_j_erf_mu_r_dxyz_phi(j,i,mu, r, ints) - call phi_j_erf_mu_r_dxyz_phi(j,i,1.d+9, r, ints_coulomb) - do m = 1, 3 - d_dx_v_ij_erf_rk_cst_mu_tmp(m,ipoint,j,i) = ( ints(m) - ints_coulomb(m)) + + do i = 1, ao_num + do j = 1, ao_num + call phi_j_erf_mu_r_dxyz_phi(j, i, mu_erf, r, ints) + call phi_j_erf_mu_r_dxyz_phi(j, i, 1.d+9, r, ints_coulomb) + + d_dx_v_ij_erf_rk_cst_mu_tmp(1,ipoint,j,i) = ints(1) - ints_coulomb(1) + d_dx_v_ij_erf_rk_cst_mu_tmp(2,ipoint,j,i) = ints(2) - ints_coulomb(2) + d_dx_v_ij_erf_rk_cst_mu_tmp(3,ipoint,j,i) = ints(3) - ints_coulomb(3) + enddo enddo - enddo enddo - enddo !$OMP END DO !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for d_dx_v_ij_erf_rk_cst_mu_tmp',wall1 - wall0 - + call wall_time(wall1) + print *, ' wall time for d_dx_v_ij_erf_rk_cst_mu_tmp', wall1 - wall0 END_PROVIDER -BEGIN_PROVIDER [ double precision, d_dx_v_ij_erf_rk_cst_mu, (n_points_final_grid,ao_num, ao_num,3)] - implicit none - BEGIN_DOC -! d_dx_v_ij_erf_rk_cst_mu_tmp(j,i,R,m) = int dr phi_j(r)) (erf(mu(R) |r - R|) - 1)/|r - R| d/dx (phi_i(r) -! -! with m == 1 -> d/dx , m == 2 -> d/dy , m == 3 -> d/dz - END_DOC - integer :: i,j,ipoint,m - double precision :: mu,r(3),ints,ints_coulomb - double precision :: wall0, wall1 - call wall_time(wall0) - do i = 1, ao_num - do j = 1, ao_num - do m = 1, 3 - do ipoint = 1, n_points_final_grid - d_dx_v_ij_erf_rk_cst_mu(ipoint,j,i,m)= d_dx_v_ij_erf_rk_cst_mu_tmp(m,ipoint,j,i) +! --- + +BEGIN_PROVIDER [ double precision, d_dx_v_ij_erf_rk_cst_mu, (n_points_final_grid, ao_num, ao_num, 3)] + + BEGIN_DOC + ! + ! d_dx_v_ij_erf_rk_cst_mu_tmp(j,i,R,m) = int dr phi_j(r)) (erf(mu(R) |r - R|) - 1)/|r - R| d/dx (phi_i(r) + ! + ! with m == 1 -> d/dx , m == 2 -> d/dy , m == 3 -> d/dz + ! + END_DOC + + implicit none + integer :: i, j, ipoint + double precision :: wall0, wall1 + + call wall_time(wall0) + do i = 1, ao_num + do j = 1, ao_num + do ipoint = 1, n_points_final_grid + d_dx_v_ij_erf_rk_cst_mu(ipoint,j,i,1) = d_dx_v_ij_erf_rk_cst_mu_tmp(1,ipoint,j,i) + d_dx_v_ij_erf_rk_cst_mu(ipoint,j,i,2) = d_dx_v_ij_erf_rk_cst_mu_tmp(2,ipoint,j,i) + d_dx_v_ij_erf_rk_cst_mu(ipoint,j,i,3) = d_dx_v_ij_erf_rk_cst_mu_tmp(3,ipoint,j,i) + enddo enddo - enddo enddo - enddo - call wall_time(wall1) - print*,'wall time for d_dx_v_ij_erf_rk_cst_mu',wall1 - wall0 + call wall_time(wall1) + print *, ' wall time for d_dx_v_ij_erf_rk_cst_mu', wall1 - wall0 END_PROVIDER -BEGIN_PROVIDER [ double precision, x_d_dx_v_ij_erf_rk_cst_mu_tmp, (3,n_points_final_grid,ao_num, ao_num)] - implicit none - BEGIN_DOC -! x_d_dx_v_ij_erf_rk_cst_mu_tmp(m,j,i,R) = int dr x phi_j(r)) (erf(mu(R) |r - R|) - 1)/|r - R| d/dx (phi_i(r) -! -! with m == 1 -> d/dx , m == 2 -> d/dy , m == 3 -> d/dz - END_DOC - integer :: i,j,ipoint,m - double precision :: mu,r(3),ints(3),ints_coulomb(3) - double precision :: wall0, wall1 - call wall_time(wall0) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,ipoint,mu,r,ints,m,ints_coulomb) & +! --- + +BEGIN_PROVIDER [ double precision, x_d_dx_v_ij_erf_rk_cst_mu_tmp, (3, n_points_final_grid, ao_num, ao_num)] + + BEGIN_DOC + ! + ! x_d_dx_v_ij_erf_rk_cst_mu_tmp(m,j,i,R) = int dr x phi_j(r)) (erf(mu(R) |r - R|) - 1)/|r - R| d/dx (phi_i(r) + ! + ! with m == 1 -> d/dx , m == 2 -> d/dy , m == 3 -> d/dz + ! + END_DOC + + implicit none + integer :: i, j, ipoint + double precision :: r(3), ints(3), ints_coulomb(3) + double precision :: wall0, wall1 + + call wall_time(wall0) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,ipoint,r,ints,ints_coulomb) & !$OMP SHARED (ao_num,n_points_final_grid,x_d_dx_v_ij_erf_rk_cst_mu_tmp,final_grid_points,mu_erf) !$OMP DO SCHEDULE (dynamic) - do i = 1, ao_num - do j = 1, ao_num - do ipoint = 1, n_points_final_grid - mu = mu_erf + do ipoint = 1, n_points_final_grid r(1) = final_grid_points(1,ipoint) r(2) = final_grid_points(2,ipoint) r(3) = final_grid_points(3,ipoint) - call phi_j_erf_mu_r_xyz_dxyz_phi(j,i,mu, r, ints) - call phi_j_erf_mu_r_xyz_dxyz_phi(j,i,1.d+9, r, ints_coulomb) - do m = 1, 3 - x_d_dx_v_ij_erf_rk_cst_mu_tmp(m,ipoint,j,i) = ( ints(m) - ints_coulomb(m)) + + do i = 1, ao_num + do j = 1, ao_num + call phi_j_erf_mu_r_xyz_dxyz_phi(j, i, mu_erf, r, ints) + call phi_j_erf_mu_r_xyz_dxyz_phi(j, i, 1.d+9, r, ints_coulomb) + + x_d_dx_v_ij_erf_rk_cst_mu_tmp(1,ipoint,j,i) = ints(1) - ints_coulomb(1) + x_d_dx_v_ij_erf_rk_cst_mu_tmp(2,ipoint,j,i) = ints(2) - ints_coulomb(2) + x_d_dx_v_ij_erf_rk_cst_mu_tmp(3,ipoint,j,i) = ints(3) - ints_coulomb(3) + enddo enddo - enddo enddo - enddo !$OMP END DO !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for x_d_dx_v_ij_erf_rk_cst_mu_tmp',wall1 - wall0 - + call wall_time(wall1) + print *, ' wall time for x_d_dx_v_ij_erf_rk_cst_mu_tmp', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, x_d_dx_v_ij_erf_rk_cst_mu, (n_points_final_grid,ao_num, ao_num,3)] - implicit none - BEGIN_DOC -! x_d_dx_v_ij_erf_rk_cst_mu_tmp(j,i,R,m) = int dr x phi_j(r)) (erf(mu(R) |r - R|) - 1)/|r - R| d/dx (phi_i(r) -! -! with m == 1 -> d/dx , m == 2 -> d/dy , m == 3 -> d/dz - END_DOC - integer :: i,j,ipoint,m - double precision :: mu,r(3),ints,ints_coulomb - double precision :: wall0, wall1 - call wall_time(wall0) - do i = 1, ao_num - do j = 1, ao_num - do ipoint = 1, n_points_final_grid - do m = 1, 3 - x_d_dx_v_ij_erf_rk_cst_mu(ipoint,j,i,m)= x_d_dx_v_ij_erf_rk_cst_mu_tmp(m,ipoint,j,i) - enddo - enddo - enddo - enddo - call wall_time(wall1) - print*,'wall time for x_d_dx_v_ij_erf_rk_cst_mu',wall1 - wall0 + BEGIN_DOC + ! + ! x_d_dx_v_ij_erf_rk_cst_mu_tmp(j,i,R,m) = int dr x phi_j(r)) (erf(mu(R) |r - R|) - 1)/|r - R| d/dx (phi_i(r) + ! + ! with m == 1 -> d/dx , m == 2 -> d/dy , m == 3 -> d/dz + ! + END_DOC + + implicit none + integer :: i, j, ipoint + double precision :: wall0, wall1 + + call wall_time(wall0) + + do i = 1, ao_num + do j = 1, ao_num + do ipoint = 1, n_points_final_grid + x_d_dx_v_ij_erf_rk_cst_mu(ipoint,j,i,1) = x_d_dx_v_ij_erf_rk_cst_mu_tmp(1,ipoint,j,i) + x_d_dx_v_ij_erf_rk_cst_mu(ipoint,j,i,2) = x_d_dx_v_ij_erf_rk_cst_mu_tmp(2,ipoint,j,i) + x_d_dx_v_ij_erf_rk_cst_mu(ipoint,j,i,3) = x_d_dx_v_ij_erf_rk_cst_mu_tmp(3,ipoint,j,i) + enddo + enddo + enddo + + call wall_time(wall1) + print *, ' wall time for x_d_dx_v_ij_erf_rk_cst_mu', wall1 - wall0 END_PROVIDER +! --- + + diff --git a/src/ao_one_e_ints/pot_ao_erf_ints.irp.f b/src/ao_one_e_ints/pot_ao_erf_ints.irp.f index 1d2d8faf..263e9845 100644 --- a/src/ao_one_e_ints/pot_ao_erf_ints.irp.f +++ b/src/ao_one_e_ints/pot_ao_erf_ints.irp.f @@ -78,7 +78,7 @@ double precision function NAI_pol_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, double precision, intent(in) :: mu_in, C_center(3) integer :: i, j, power_A1(3), power_A2(3), n_pt_in - double precision :: A1_center(3), A2_center(3), alpha1, alpha2, coef12, integral + double precision :: A1_center(3), A2_center(3), alpha1, alpha2, coef12, coef1, integral double precision, external :: NAI_pol_mult_erf_with1s, NAI_pol_mult_erf_ao @@ -98,11 +98,12 @@ double precision function NAI_pol_mult_erf_ao_with1s(i_ao, j_ao, beta, B_center, NAI_pol_mult_erf_ao_with1s = 0.d0 do i = 1, ao_prim_num(i_ao) - alpha1 = ao_expo_ordered_transp(i,i_ao) + alpha1 = ao_expo_ordered_transp (i,i_ao) + coef1 = ao_coef_normalized_ordered_transp(i,i_ao) + do j = 1, ao_prim_num(j_ao) alpha2 = ao_expo_ordered_transp(j,j_ao) - - coef12 = ao_coef_normalized_ordered_transp(j,j_ao) * ao_coef_normalized_ordered_transp(i,i_ao) + coef12 = coef1 * ao_coef_normalized_ordered_transp(j,j_ao) if(dabs(coef12) .lt. 1d-14) cycle integral = NAI_pol_mult_erf_with1s( A1_center, A2_center, power_A1, power_A2, alpha1, alpha2 & @@ -242,9 +243,9 @@ double precision function NAI_pol_mult_erf_with1s( A1_center, A2_center, power_A A12_center(1) = (alpha1 * A1_center(1) + alpha2 * A2_center(1)) * alpha12_inv A12_center(2) = (alpha1 * A1_center(2) + alpha2 * A2_center(2)) * alpha12_inv A12_center(3) = (alpha1 * A1_center(3) + alpha2 * A2_center(3)) * alpha12_inv - dist12 = ( (A1_center(1) - A2_center(1)) * (A1_center(1) - A2_center(1)) & - + (A1_center(2) - A2_center(2)) * (A1_center(2) - A2_center(2)) & - + (A1_center(3) - A2_center(3)) * (A1_center(3) - A2_center(3)) ) + dist12 = (A1_center(1) - A2_center(1)) * (A1_center(1) - A2_center(1)) & + + (A1_center(2) - A2_center(2)) * (A1_center(2) - A2_center(2)) & + + (A1_center(3) - A2_center(3)) * (A1_center(3) - A2_center(3)) const_factor12 = dist12 * rho12 if(const_factor12 > 80.d0) then @@ -262,9 +263,9 @@ double precision function NAI_pol_mult_erf_with1s( A1_center, A2_center, power_A P_center(1) = (alpha12 * A12_center(1) + beta * B_center(1)) * p_inv P_center(2) = (alpha12 * A12_center(2) + beta * B_center(2)) * p_inv P_center(3) = (alpha12 * A12_center(3) + beta * B_center(3)) * p_inv - dist = ( (A12_center(1) - B_center(1)) * (A12_center(1) - B_center(1)) & - + (A12_center(2) - B_center(2)) * (A12_center(2) - B_center(2)) & - + (A12_center(3) - B_center(3)) * (A12_center(3) - B_center(3)) ) + dist = (A12_center(1) - B_center(1)) * (A12_center(1) - B_center(1)) & + + (A12_center(2) - B_center(2)) * (A12_center(2) - B_center(2)) & + + (A12_center(3) - B_center(3)) * (A12_center(3) - B_center(3)) const_factor = const_factor12 + dist * rho if(const_factor > 80.d0) then @@ -272,11 +273,9 @@ double precision function NAI_pol_mult_erf_with1s( A1_center, A2_center, power_A return endif - dist_integral = 0.d0 - do i = 1, 3 - dist_integral += (P_center(i) - C_center(i)) * (P_center(i) - C_center(i)) - enddo - + dist_integral = (P_center(1) - C_center(1)) * (P_center(1) - C_center(1)) & + + (P_center(2) - C_center(2)) * (P_center(2) - C_center(2)) & + + (P_center(3) - C_center(3)) * (P_center(3) - C_center(3)) ! --- diff --git a/src/ao_tc_eff_map/compute_ints_eff_pot.irp.f b/src/ao_tc_eff_map/compute_ints_eff_pot.irp.f index 2e7e21c0..7a567979 100644 --- a/src/ao_tc_eff_map/compute_ints_eff_pot.irp.f +++ b/src/ao_tc_eff_map/compute_ints_eff_pot.irp.f @@ -61,7 +61,6 @@ subroutine compute_ao_tc_sym_two_e_pot_jl(j, l, n_integrals, buffer_i, buffer_va integral = integral + j1b_gauss_2e_j2(i, k, j, l) endif - if(abs(integral) < thr) then cycle endif diff --git a/src/non_h_ints_mu/fit_j.irp.f b/src/ao_tc_eff_map/fit_j.irp.f similarity index 100% rename from src/non_h_ints_mu/fit_j.irp.f rename to src/ao_tc_eff_map/fit_j.irp.f diff --git a/src/ao_tc_eff_map/two_e_ints_gauss.irp.f b/src/ao_tc_eff_map/two_e_ints_gauss.irp.f index 988b0b58..51ef73a0 100644 --- a/src/ao_tc_eff_map/two_e_ints_gauss.irp.f +++ b/src/ao_tc_eff_map/two_e_ints_gauss.irp.f @@ -254,6 +254,7 @@ double precision function general_primitive_integral_gauss(dim, & rho_old = (p*q)/(p+q) prefactor = pi_3 * inv_pq_3_2 * fact_p * fact_q do i = 1, n_gauss_eff_pot ! browse the gaussians with different expo/coef + !do i = 1, n_gauss_eff_pot-1 aa = expo_gauss_eff_pot(i) c_a = coef_gauss_eff_pot(i) t_a = dsqrt( aa /(rho_old + aa) ) diff --git a/src/ao_two_e_ints/map_integrals.irp.f b/src/ao_two_e_ints/map_integrals.irp.f index de4195ba..0d34d95e 100644 --- a/src/ao_two_e_ints/map_integrals.irp.f +++ b/src/ao_two_e_ints/map_integrals.irp.f @@ -321,8 +321,9 @@ BEGIN_PROVIDER [ double precision, ao_integrals_cache, (0:64*64*64*64) ] !$OMP END PARALLEL DO END_PROVIDER +! --- -double precision function get_ao_two_e_integral(i,j,k,l,map) result(result) +double precision function get_ao_two_e_integral(i, j, k, l, map) result(result) use map_module implicit none BEGIN_DOC diff --git a/src/bi_ort_ints/biorthog_mo_for_h.irp.f b/src/bi_ort_ints/biorthog_mo_for_h.irp.f index a8e7630b..452c13f1 100644 --- a/src/bi_ort_ints/biorthog_mo_for_h.irp.f +++ b/src/bi_ort_ints/biorthog_mo_for_h.irp.f @@ -1,37 +1,6 @@ ! --- -BEGIN_PROVIDER [double precision, ao_two_e_coul, (ao_num, ao_num, ao_num, ao_num) ] - - BEGIN_DOC - ! - ! ao_two_e_coul(k,i,l,j) = ( k i | 1/r12 | l j ) = < l k | 1/r12 | j i > - ! - END_DOC - - integer :: i, j, k, l - double precision :: integral - double precision, external :: get_ao_two_e_integral - - PROVIDE ao_integrals_map - - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - - integral = get_ao_two_e_integral(i, j, k, l, ao_integrals_map) - - ao_two_e_coul(k,i,l,j) = integral - enddo - enddo - enddo - enddo - -END_PROVIDER - -! --- - double precision function bi_ortho_mo_coul_ints(l, k, j, i) BEGIN_DOC @@ -155,7 +124,7 @@ BEGIN_PROVIDER [double precision, mo_bi_ortho_coul_e, (mo_num, mo_num, mo_num, m do i = 1, mo_num do l = 1, mo_num do k = 1, mo_num - ! < k l | V12 | i j > (k i|l j) + ! < k l | V12 | i j > (k i|l j) mo_bi_ortho_coul_e(k,l,i,j) = mo_bi_ortho_coul_e_chemist(k,i,l,j) enddo enddo @@ -169,13 +138,14 @@ END_PROVIDER BEGIN_PROVIDER [ double precision, mo_bi_ortho_one_e, (mo_num, mo_num)] BEGIN_DOC - ! mo_bi_ortho_one_e(k,i) = + ! + ! mo_bi_ortho_one_e(k,i) = < MO^L_k | h_c | MO^R_i > + ! END_DOC implicit none - call ao_to_mo_bi_ortho( ao_one_e_integrals, ao_num & - , mo_bi_ortho_one_e , mo_num ) + call ao_to_mo_bi_ortho(ao_one_e_integrals, ao_num, mo_bi_ortho_one_e , mo_num) END_PROVIDER diff --git a/src/bi_ort_ints/one_e_bi_ort.irp.f b/src/bi_ort_ints/one_e_bi_ort.irp.f index 5efcb637..8997991d 100644 --- a/src/bi_ort_ints/one_e_bi_ort.irp.f +++ b/src/bi_ort_ints/one_e_bi_ort.irp.f @@ -10,7 +10,7 @@ BEGIN_PROVIDER [double precision, ao_one_e_integrals_tc_tot, (ao_num,ao_num)] provide j1b_type - if(j1b_type .ne. 0) then + if( (j1b_type .eq. 1) .or. (j1b_type .eq. 2) ) then do i = 1, ao_num do j = 1, ao_num diff --git a/src/bi_ort_ints/semi_num_ints_mo.irp.f b/src/bi_ort_ints/semi_num_ints_mo.irp.f index 27fcb7de..e7c1fdd1 100644 --- a/src/bi_ort_ints/semi_num_ints_mo.irp.f +++ b/src/bi_ort_ints/semi_num_ints_mo.irp.f @@ -86,10 +86,8 @@ BEGIN_PROVIDER [ double precision, mo_x_v_ki_bi_ortho_erf_rk_cst_mu, (mo_num, mo call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,1,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,1,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) - call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,2,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,2,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) - call ao_to_mo_bi_ortho( x_v_ij_erf_rk_cst_mu_transp (1,1,3,ipoint), size(x_v_ij_erf_rk_cst_mu_transp, 1) & , mo_x_v_ki_bi_ortho_erf_rk_cst_mu(1,1,3,ipoint), size(mo_x_v_ki_bi_ortho_erf_rk_cst_mu, 1) ) @@ -103,7 +101,55 @@ END_PROVIDER ! --- -BEGIN_PROVIDER [ double precision, int2_grad1_u12_bimo, (3, ao_num, ao_num, n_points_final_grid)] +BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao_transp, (ao_num, ao_num, 3, n_points_final_grid)] + + implicit none + integer :: i, j, ipoint + double precision :: wall0, wall1 + + call wall_time(wall0) + do ipoint = 1, n_points_final_grid + do i = 1, ao_num + do j = 1, ao_num + int2_grad1_u12_ao_transp(j,i,1,ipoint) = int2_grad1_u12_ao(1,j,i,ipoint) + int2_grad1_u12_ao_transp(j,i,2,ipoint) = int2_grad1_u12_ao(2,j,i,ipoint) + int2_grad1_u12_ao_transp(j,i,3,ipoint) = int2_grad1_u12_ao(3,j,i,ipoint) + enddo + enddo + enddo + call wall_time(wall1) + print *, ' wall time for int2_grad1_u12_ao_transp ', wall1 - wall0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, int2_grad1_u12_bimo_transp, (mo_num, mo_num, 3, n_points_final_grid)] + + implicit none + integer :: ipoint + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (ipoint) & + !$OMP SHARED (n_points_final_grid,int2_grad1_u12_ao_transp,int2_grad1_u12_bimo_transp) + !$OMP DO SCHEDULE (dynamic) + do ipoint = 1, n_points_final_grid + call ao_to_mo_bi_ortho( int2_grad1_u12_ao_transp (1,1,1,ipoint), size(int2_grad1_u12_ao_transp , 1) & + , int2_grad1_u12_bimo_transp(1,1,1,ipoint), size(int2_grad1_u12_bimo_transp, 1) ) + call ao_to_mo_bi_ortho( int2_grad1_u12_ao_transp (1,1,2,ipoint), size(int2_grad1_u12_ao_transp , 1) & + , int2_grad1_u12_bimo_transp(1,1,2,ipoint), size(int2_grad1_u12_bimo_transp, 1) ) + call ao_to_mo_bi_ortho( int2_grad1_u12_ao_transp (1,1,3,ipoint), size(int2_grad1_u12_ao_transp , 1) & + , int2_grad1_u12_bimo_transp(1,1,3,ipoint), size(int2_grad1_u12_bimo_transp, 1) ) + enddo + !$OMP END DO + !$OMP END PARALLEL + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, int2_grad1_u12_bimo, (3, mo_num, mo_num, n_points_final_grid)] BEGIN_DOC ! @@ -121,14 +167,12 @@ BEGIN_PROVIDER [ double precision, int2_grad1_u12_bimo, (3, ao_num, ao_num, n_po !$OMP DO SCHEDULE (dynamic) do ipoint = 1, n_points_final_grid - call ao_to_mo_bi_ortho( int2_grad1_u12_ao (1,1,1,ipoint), size(int2_grad1_u12_ao , 1) & - , int2_grad1_u12_bimo(1,1,1,ipoint), size(int2_grad1_u12_bimo, 1) ) - - call ao_to_mo_bi_ortho( int2_grad1_u12_ao (2,1,1,ipoint), size(int2_grad1_u12_ao , 1) & - , int2_grad1_u12_bimo(2,1,1,ipoint), size(int2_grad1_u12_bimo, 1) ) - - call ao_to_mo_bi_ortho( int2_grad1_u12_ao (3,1,1,ipoint), size(int2_grad1_u12_ao , 1) & - , int2_grad1_u12_bimo(3,1,1,ipoint), size(int2_grad1_u12_bimo, 1) ) + call ao_to_mo_bi_ortho( int2_grad1_u12_ao (1,1,1,ipoint), size(int2_grad1_u12_ao , 2) & + , int2_grad1_u12_bimo(1,1,1,ipoint), size(int2_grad1_u12_bimo, 2) ) + call ao_to_mo_bi_ortho( int2_grad1_u12_ao (2,1,1,ipoint), size(int2_grad1_u12_ao , 2) & + , int2_grad1_u12_bimo(2,1,1,ipoint), size(int2_grad1_u12_bimo, 2) ) + call ao_to_mo_bi_ortho( int2_grad1_u12_ao (3,1,1,ipoint), size(int2_grad1_u12_ao , 2) & + , int2_grad1_u12_bimo(3,1,1,ipoint), size(int2_grad1_u12_bimo, 2) ) enddo !$OMP END DO diff --git a/src/bi_ort_ints/three_body_ijm.irp.f b/src/bi_ort_ints/three_body_ijm.irp.f index 4fd85756..0e42264b 100644 --- a/src/bi_ort_ints/three_body_ijm.irp.f +++ b/src/bi_ort_ints/three_body_ijm.irp.f @@ -1,304 +1,366 @@ + +! --- + BEGIN_PROVIDER [ double precision, three_e_3_idx_direct_bi_ort, (mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the direct terms -! -! three_e_3_idx_direct_bi_ort(m,j,i) = -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_3_idx_direct_bi_ort = 0.d0 - print*,'Providing the three_e_3_idx_direct_bi_ort ...' - call wall_time(wall0) - name_file = 'six_index_tensor' - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the direct terms + ! + ! three_e_3_idx_direct_bi_ort(m,j,i) = + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, m + double precision :: integral, wall1, wall0 + + three_e_3_idx_direct_bi_ort = 0.d0 + print *, ' Providing the three_e_3_idx_direct_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i,j,m,integral) & !$OMP SHARED (mo_num,three_e_3_idx_direct_bi_ort) !$OMP DO SCHEDULE (dynamic) do i = 1, mo_num - do j = 1, mo_num - do m = j, mo_num - call give_integrals_3_body_bi_ort(m,j,i,m,j,i,integral) - three_e_3_idx_direct_bi_ort(m,j,i) = -1.d0 * integral + do j = 1, mo_num + do m = j, mo_num + call give_integrals_3_body_bi_ort(m, j, i, m, j, i, integral) + three_e_3_idx_direct_bi_ort(m,j,i) = -1.d0 * integral + enddo enddo - enddo enddo !$OMP END DO !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_3_idx_direct_bi_ort',wall1 - wall0 - + do i = 1, mo_num - do j = 1, mo_num - do m = 1, j - three_e_3_idx_direct_bi_ort(m,j,i) = three_e_3_idx_direct_bi_ort(j,m,i) + do j = 1, mo_num + do m = 1, j + three_e_3_idx_direct_bi_ort(m,j,i) = three_e_3_idx_direct_bi_ort(j,m,i) + enddo enddo - enddo enddo + call wall_time(wall1) + print *, ' wall time for three_e_3_idx_direct_bi_ort', wall1 - wall0 + END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_3_idx_cycle_1_bi_ort, (mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the first cyclic permutation -! -! three_e_3_idx_direct_bi_ort(m,j,i) = -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_3_idx_cycle_1_bi_ort = 0.d0 - print*,'Providing the three_e_3_idx_cycle_1_bi_ort ...' - call wall_time(wall0) - name_file = 'six_index_tensor' - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the first cyclic permutation + ! + ! three_e_3_idx_direct_bi_ort(m,j,i) = + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, m + double precision :: integral, wall1, wall0 + + three_e_3_idx_cycle_1_bi_ort = 0.d0 + print *, ' Providing the three_e_3_idx_cycle_1_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i,j,m,integral) & !$OMP SHARED (mo_num,three_e_3_idx_cycle_1_bi_ort) !$OMP DO SCHEDULE (dynamic) do i = 1, mo_num - do j = 1, mo_num - do m = j, mo_num - call give_integrals_3_body_bi_ort(m,j,i,j,i,m,integral) - three_e_3_idx_cycle_1_bi_ort(m,j,i) = -1.d0 * integral + do j = 1, mo_num + do m = j, mo_num + call give_integrals_3_body_bi_ort(m, j, i, j, i, m, integral) + three_e_3_idx_cycle_1_bi_ort(m,j,i) = -1.d0 * integral + enddo enddo - enddo enddo !$OMP END DO !$OMP END PARALLEL - call wall_time(wall1) do i = 1, mo_num - do j = 1, mo_num - do m = 1, j - three_e_3_idx_cycle_1_bi_ort(m,j,i) = three_e_3_idx_cycle_1_bi_ort(j,m,i) + do j = 1, mo_num + do m = 1, j + three_e_3_idx_cycle_1_bi_ort(m,j,i) = three_e_3_idx_cycle_1_bi_ort(j,m,i) + enddo enddo - enddo enddo - print*,'wall time for three_e_3_idx_cycle_1_bi_ort',wall1 - wall0 + + call wall_time(wall1) + print *, ' wall time for three_e_3_idx_cycle_1_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_3_idx_cycle_2_bi_ort, (mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the second cyclic permutation -! -! three_e_3_idx_direct_bi_ort(m,j,i) = -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_3_idx_cycle_2_bi_ort = 0.d0 - print*,'Providing the three_e_3_idx_cycle_2_bi_ort ...' - call wall_time(wall0) - name_file = 'six_index_tensor' - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the second cyclic permutation + ! + ! three_e_3_idx_direct_bi_ort(m,j,i) = + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, m + double precision :: integral, wall1, wall0 + + three_e_3_idx_cycle_2_bi_ort = 0.d0 + print *, ' Providing the three_e_3_idx_cycle_2_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i,j,m,integral) & !$OMP SHARED (mo_num,three_e_3_idx_cycle_2_bi_ort) !$OMP DO SCHEDULE (dynamic) do i = 1, mo_num - do j = 1, mo_num - do m = j, mo_num - call give_integrals_3_body_bi_ort(m,j,i,i,m,j,integral) - three_e_3_idx_cycle_2_bi_ort(m,j,i) = -1.d0 * integral + do j = 1, mo_num + do m = j, mo_num + call give_integrals_3_body_bi_ort(m, j, i, i, m, j, integral) + three_e_3_idx_cycle_2_bi_ort(m,j,i) = -1.d0 * integral + enddo enddo - enddo enddo !$OMP END DO !$OMP END PARALLEL - call wall_time(wall1) + do i = 1, mo_num - do j = 1, mo_num - do m = 1, j - three_e_3_idx_cycle_2_bi_ort(m,j,i) = three_e_3_idx_cycle_2_bi_ort(j,m,i) + do j = 1, mo_num + do m = 1, j + three_e_3_idx_cycle_2_bi_ort(m,j,i) = three_e_3_idx_cycle_2_bi_ort(j,m,i) + enddo enddo - enddo enddo - print*,'wall time for three_e_3_idx_cycle_2_bi_ort',wall1 - wall0 + + call wall_time(wall1) + print *, ' wall time for three_e_3_idx_cycle_2_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_3_idx_exch23_bi_ort, (mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the permutations of particle 2 and 3 -! -! three_e_3_idx_exch23_bi_ort(m,j,i) = -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_3_idx_exch23_bi_ort = 0.d0 - print*,'Providing the three_e_3_idx_exch23_bi_ort ...' - call wall_time(wall0) - name_file = 'six_index_tensor' - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the permutations of particle 2 and 3 + ! + ! three_e_3_idx_exch23_bi_ort(m,j,i) = + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, m + double precision :: integral, wall1, wall0 + + three_e_3_idx_exch23_bi_ort = 0.d0 + print*,'Providing the three_e_3_idx_exch23_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i,j,m,integral) & !$OMP SHARED (mo_num,three_e_3_idx_exch23_bi_ort) !$OMP DO SCHEDULE (dynamic) do i = 1, mo_num - do j = 1, mo_num - do m = j, mo_num - call give_integrals_3_body_bi_ort(m,j,i,j,m,i,integral) - three_e_3_idx_exch23_bi_ort(m,j,i) = -1.d0 * integral + do j = 1, mo_num + do m = j, mo_num + call give_integrals_3_body_bi_ort(m, j, i, j, m, i, integral) + three_e_3_idx_exch23_bi_ort(m,j,i) = -1.d0 * integral + enddo enddo - enddo enddo !$OMP END DO !$OMP END PARALLEL + do i = 1, mo_num - do j = 1, mo_num - do m = 1, j - three_e_3_idx_exch23_bi_ort(m,j,i) = three_e_3_idx_exch23_bi_ort(j,m,i) + do j = 1, mo_num + do m = 1, j + three_e_3_idx_exch23_bi_ort(m,j,i) = three_e_3_idx_exch23_bi_ort(j,m,i) + enddo enddo - enddo enddo - call wall_time(wall1) - print*,'wall time for three_e_3_idx_exch23_bi_ort',wall1 - wall0 + + call wall_time(wall1) + print *, ' wall time for three_e_3_idx_exch23_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_3_idx_exch13_bi_ort, (mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the permutations of particle 1 and 3 -! -! three_e_3_idx_exch13_bi_ort(m,j,i) = -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_3_idx_exch13_bi_ort = 0.d0 - print*,'Providing the three_e_3_idx_exch13_bi_ort ...' - call wall_time(wall0) - name_file = 'six_index_tensor' - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the permutations of particle 1 and 3 + ! + ! three_e_3_idx_exch13_bi_ort(m,j,i) = + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i,j,m + double precision :: integral, wall1, wall0 + + three_e_3_idx_exch13_bi_ort = 0.d0 + print *, ' Providing the three_e_3_idx_exch13_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i,j,m,integral) & !$OMP SHARED (mo_num,three_e_3_idx_exch13_bi_ort) !$OMP DO SCHEDULE (dynamic) do i = 1, mo_num - do j = 1, mo_num - do m = j, mo_num - call give_integrals_3_body_bi_ort(m,j,i,i,j,m,integral) - three_e_3_idx_exch13_bi_ort(m,j,i) = -1.d0 * integral + do j = 1, mo_num + do m = j, mo_num + call give_integrals_3_body_bi_ort(m, j, i, i, j, m,integral) + three_e_3_idx_exch13_bi_ort(m,j,i) = -1.d0 * integral + enddo enddo - enddo enddo !$OMP END DO !$OMP END PARALLEL + do i = 1, mo_num - do j = 1, mo_num - do m = 1, j - three_e_3_idx_exch13_bi_ort(m,j,i) = three_e_3_idx_exch13_bi_ort(j,m,i) + do j = 1, mo_num + do m = 1, j + three_e_3_idx_exch13_bi_ort(m,j,i) = three_e_3_idx_exch13_bi_ort(j,m,i) + enddo enddo - enddo enddo - call wall_time(wall1) - print*,'wall time for three_e_3_idx_exch13_bi_ort',wall1 - wall0 + + call wall_time(wall1) + print *, ' wall time for three_e_3_idx_exch13_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_3_idx_exch12_bi_ort, (mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the permutations of particle 1 and 2 -! -! three_e_3_idx_exch12_bi_ort(m,j,i) = -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_3_idx_exch12_bi_ort = 0.d0 - print*,'Providing the three_e_3_idx_exch12_bi_ort ...' - call wall_time(wall0) - name_file = 'six_index_tensor' - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the permutations of particle 1 and 2 + ! + ! three_e_3_idx_exch12_bi_ort(m,j,i) = + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, m + double precision :: integral, wall1, wall0 + + three_e_3_idx_exch12_bi_ort = 0.d0 + print *, ' Providing the three_e_3_idx_exch12_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i,j,m,integral) & !$OMP SHARED (mo_num,three_e_3_idx_exch12_bi_ort) !$OMP DO SCHEDULE (dynamic) do i = 1, mo_num - do j = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,j,i,m,i,j,integral) - three_e_3_idx_exch12_bi_ort(m,j,i) = -1.d0 * integral + do j = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, j, i, m, i, j, integral) + three_e_3_idx_exch12_bi_ort(m,j,i) = -1.d0 * integral + enddo enddo - enddo enddo !$OMP END DO !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_3_idx_exch12_bi_ort',wall1 - wall0 + + call wall_time(wall1) + print *, ' wall time for three_e_3_idx_exch12_bi_ort', wall1 - wall0 END_PROVIDER +! --- BEGIN_PROVIDER [ double precision, three_e_3_idx_exch12_bi_ort_new, (mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the permutations of particle 1 and 2 -! -! three_e_3_idx_exch12_bi_ort_new(m,j,i) = -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_3_idx_exch12_bi_ort_new = 0.d0 - print*,'Providing the three_e_3_idx_exch12_bi_ort_new ...' - call wall_time(wall0) - name_file = 'six_index_tensor' - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator ON A BI ORTHONORMAL BASIS for the permutations of particle 1 and 2 + ! + ! three_e_3_idx_exch12_bi_ort_new(m,j,i) = + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, m + double precision :: integral, wall1, wall0 + + three_e_3_idx_exch12_bi_ort_new = 0.d0 + print *, ' Providing the three_e_3_idx_exch12_bi_ort_new ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i,j,m,integral) & !$OMP SHARED (mo_num,three_e_3_idx_exch12_bi_ort_new) !$OMP DO SCHEDULE (dynamic) do i = 1, mo_num - do j = 1, mo_num - do m = j, mo_num - call give_integrals_3_body_bi_ort(m,j,i,m,i,j,integral) - three_e_3_idx_exch12_bi_ort_new(m,j,i) = -1.d0 * integral + do j = 1, mo_num + do m = j, mo_num + call give_integrals_3_body_bi_ort(m, j, i, m, i, j, integral) + three_e_3_idx_exch12_bi_ort_new(m,j,i) = -1.d0 * integral enddo enddo enddo !$OMP END DO !$OMP END PARALLEL + do i = 1, mo_num - do j = 1, mo_num - do m = 1, j - three_e_3_idx_exch12_bi_ort_new(m,j,i) = three_e_3_idx_exch12_bi_ort_new(j,m,i) + do j = 1, mo_num + do m = 1, j + three_e_3_idx_exch12_bi_ort_new(m,j,i) = three_e_3_idx_exch12_bi_ort_new(j,m,i) + enddo enddo - enddo enddo - call wall_time(wall1) - print*,'wall time for three_e_3_idx_exch12_bi_ort_new',wall1 - wall0 + + call wall_time(wall1) + print *, ' wall time for three_e_3_idx_exch12_bi_ort_new', wall1 - wall0 END_PROVIDER +! --- + diff --git a/src/bi_ort_ints/three_body_ijmk.irp.f b/src/bi_ort_ints/three_body_ijmk.irp.f index 40c34ddf..0d5016ce 100644 --- a/src/bi_ort_ints/three_body_ijmk.irp.f +++ b/src/bi_ort_ints/three_body_ijmk.irp.f @@ -1,228 +1,284 @@ + +! --- + BEGIN_PROVIDER [ double precision, three_e_4_idx_direct_bi_ort, (mo_num, mo_num, mo_num, mo_num)] + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_4_idx_direct_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_4_idx_direct_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m + integer :: i, j, k, m double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_4_idx_direct_bi_ort = 0.d0 - print*,'Providing the three_e_4_idx_direct_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,integral) & - !$OMP SHARED (mo_num,three_e_4_idx_direct_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + three_e_4_idx_direct_bi_ort = 0.d0 + print *, ' Providing the three_e_4_idx_direct_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,integral) & + !$OMP SHARED (mo_num,three_e_4_idx_direct_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,j,k,m,j,i,integral) - three_e_4_idx_direct_bi_ort(m,j,k,i) = -1.d0 * integral + do j = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, j, k, m, j, i, integral) + three_e_4_idx_direct_bi_ort(m,j,k,i) = -1.d0 * integral + enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_4_idx_direct_bi_ort',wall1 - wall0 + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_4_idx_direct_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_4_idx_cycle_1_bi_ort, (mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE FIRST CYCLIC PERMUTATION TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_4_idx_cycle_1_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_4_idx_cycle_1_bi_ort = 0.d0 - print*,'Providing the three_e_4_idx_cycle_1_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,integral) & - !$OMP SHARED (mo_num,three_e_4_idx_cycle_1_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE FIRST CYCLIC PERMUTATION TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_4_idx_cycle_1_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, k, m + double precision :: integral, wall1, wall0 + + three_e_4_idx_cycle_1_bi_ort = 0.d0 + print *, ' Providing the three_e_4_idx_cycle_1_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,integral) & + !$OMP SHARED (mo_num,three_e_4_idx_cycle_1_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,j,k,j,i,m,integral) - three_e_4_idx_cycle_1_bi_ort(m,j,k,i) = -1.d0 * integral + do j = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, j, k, j, i, m, integral) + three_e_4_idx_cycle_1_bi_ort(m,j,k,i) = -1.d0 * integral + enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_4_idx_cycle_1_bi_ort',wall1 - wall0 + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_4_idx_cycle_1_bi_ort', wall1 - wall0 END_PROVIDER +! -- BEGIN_PROVIDER [ double precision, three_e_4_idx_cycle_2_bi_ort, (mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE FIRST CYCLIC PERMUTATION TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_4_idx_cycle_2_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_4_idx_cycle_2_bi_ort = 0.d0 - print*,'Providing the three_e_4_idx_cycle_2_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,integral) & - !$OMP SHARED (mo_num,three_e_4_idx_cycle_2_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE FIRST CYCLIC PERMUTATION TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_4_idx_cycle_2_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, k, m + double precision :: integral, wall1, wall0 + + three_e_4_idx_cycle_2_bi_ort = 0.d0 + print *, ' Providing the three_e_4_idx_cycle_2_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,integral) & + !$OMP SHARED (mo_num,three_e_4_idx_cycle_2_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,j,k,i,m,j,integral) - three_e_4_idx_cycle_2_bi_ort(m,j,k,i) = -1.d0 * integral + do j = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, j, k, i, m, j, integral) + three_e_4_idx_cycle_2_bi_ort(m,j,k,i) = -1.d0 * integral + enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_4_idx_cycle_2_bi_ort',wall1 - wall0 + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_4_idx_cycle_2_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_4_idx_exch23_bi_ort, (mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_4_idx_exch23_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_4_idx_exch23_bi_ort = 0.d0 - print*,'Providing the three_e_4_idx_exch23_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,integral) & - !$OMP SHARED (mo_num,three_e_4_idx_exch23_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num - do k = 1, mo_num - do j = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,j,k,j,m,i,integral) - three_e_4_idx_exch23_bi_ort(m,j,k,i) = -1.d0 * integral - enddo - enddo - enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_4_idx_exch23_bi_ort',wall1 - wall0 -END_PROVIDER + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_4_idx_exch23_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, k, m + double precision :: integral, wall1, wall0 + + three_e_4_idx_exch23_bi_ort = 0.d0 + print *, ' Providing the three_e_4_idx_exch23_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,integral) & + !$OMP SHARED (mo_num,three_e_4_idx_exch23_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num + do k = 1, mo_num + do j = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, j, k, j, m, i, integral) + three_e_4_idx_exch23_bi_ort(m,j,k,i) = -1.d0 * integral + enddo + enddo + enddo + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_4_idx_exch23_bi_ort', wall1 - wall0 + +END_PROVIDER + +! --- BEGIN_PROVIDER [ double precision, three_e_4_idx_exch13_bi_ort, (mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_4_idx_exch13_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_4_idx_exch13_bi_ort = 0.d0 - print*,'Providing the three_e_4_idx_exch13_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,integral) & - !$OMP SHARED (mo_num,three_e_4_idx_exch13_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_4_idx_exch13_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + END_DOC + + implicit none + integer :: i, j, k, m + double precision :: integral, wall1, wall0 + + three_e_4_idx_exch13_bi_ort = 0.d0 + print *, ' Providing the three_e_4_idx_exch13_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,integral) & + !$OMP SHARED (mo_num,three_e_4_idx_exch13_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,j,k,i,j,m,integral) - three_e_4_idx_exch13_bi_ort(m,j,k,i) = -1.d0 * integral + do j = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, j, k, i, j, m, integral) + three_e_4_idx_exch13_bi_ort(m,j,k,i) = -1.d0 * integral + enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_4_idx_exch13_bi_ort',wall1 - wall0 + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_4_idx_exch13_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_4_idx_exch12_bi_ort, (mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_4_idx_exch12_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_4_idx_exch12_bi_ort = 0.d0 - print*,'Providing the three_e_4_idx_exch12_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,integral) & - !$OMP SHARED (mo_num,three_e_4_idx_exch12_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF SINGLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_4_idx_exch12_bi_ort(m,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, k, m + double precision :: integral, wall1, wall0 + + three_e_4_idx_exch12_bi_ort = 0.d0 + print *, ' Providing the three_e_4_idx_exch12_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,integral) & + !$OMP SHARED (mo_num,three_e_4_idx_exch12_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,j,k,m,i,j,integral) - three_e_4_idx_exch12_bi_ort(m,j,k,i) = -1.d0 * integral + do j = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, j, k, m, i, j, integral) + three_e_4_idx_exch12_bi_ort(m,j,k,i) = -1.d0 * integral + enddo enddo - enddo enddo - enddo + enddo !$OMP END DO !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_4_idx_exch12_bi_ort',wall1 - wall0 + + call wall_time(wall1) + print *, ' wall time for three_e_4_idx_exch12_bi_ort', wall1 - wall0 END_PROVIDER + +! --- + diff --git a/src/bi_ort_ints/three_body_ijmkl.irp.f b/src/bi_ort_ints/three_body_ijmkl.irp.f index 72e93955..6287c5a3 100644 --- a/src/bi_ort_ints/three_body_ijmkl.irp.f +++ b/src/bi_ort_ints/three_body_ijmkl.irp.f @@ -1,240 +1,296 @@ + +! --- + BEGIN_PROVIDER [ double precision, three_e_5_idx_direct_bi_ort, (mo_num, mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_5_idx_direct_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m,l - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_5_idx_direct_bi_ort = 0.d0 - print*,'Providing the three_e_5_idx_direct_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,l,integral) & - !$OMP SHARED (mo_num,three_e_5_idx_direct_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_5_idx_direct_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + END_DOC + + implicit none + integer :: i, j, k, m, l + double precision :: integral, wall1, wall0 + + three_e_5_idx_direct_bi_ort = 0.d0 + print *, ' Providing the three_e_5_idx_direct_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,l,integral) & + !$OMP SHARED (mo_num,three_e_5_idx_direct_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do l = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,l,k,m,j,i,integral) - three_e_5_idx_direct_bi_ort(m,l,j,k,i) = -1.d0 * integral - enddo + do j = 1, mo_num + do l = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, l, k, m, j, i, integral) + three_e_5_idx_direct_bi_ort(m,l,j,k,i) = -1.d0 * integral + enddo + enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_5_idx_direct_bi_ort',wall1 - wall0 + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_5_idx_direct_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_5_idx_cycle_1_bi_ort, (mo_num, mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE FIRST CYCLIC PERMUTATION TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_5_idx_cycle_1_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m,l - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_5_idx_cycle_1_bi_ort = 0.d0 - print*,'Providing the three_e_5_idx_cycle_1_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,l,integral) & - !$OMP SHARED (mo_num,three_e_5_idx_cycle_1_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE FIRST CYCLIC PERMUTATION TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_5_idx_cycle_1_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, k, m, l + double precision :: integral, wall1, wall0 + + three_e_5_idx_cycle_1_bi_ort = 0.d0 + print *, ' Providing the three_e_5_idx_cycle_1_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,l,integral) & + !$OMP SHARED (mo_num,three_e_5_idx_cycle_1_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do l = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,l,k,j,i,m,integral) - three_e_5_idx_cycle_1_bi_ort(m,l,j,k,i) = -1.d0 * integral - enddo + do j = 1, mo_num + do l = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, l, k, j, i, m, integral) + three_e_5_idx_cycle_1_bi_ort(m,l,j,k,i) = -1.d0 * integral + enddo + enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_5_idx_cycle_1_bi_ort',wall1 - wall0 + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_5_idx_cycle_1_bi_ort', wall1 - wall0 END_PROVIDER +! --- BEGIN_PROVIDER [ double precision, three_e_5_idx_cycle_2_bi_ort, (mo_num, mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE FIRST CYCLIC PERMUTATION TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_5_idx_cycle_2_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m,l - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_5_idx_cycle_2_bi_ort = 0.d0 - print*,'Providing the three_e_5_idx_cycle_2_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,l,integral) & - !$OMP SHARED (mo_num,three_e_5_idx_cycle_2_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE FIRST CYCLIC PERMUTATION TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_5_idx_cycle_2_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, k, m, l + double precision :: integral, wall1, wall0 + + three_e_5_idx_cycle_2_bi_ort = 0.d0 + print *, ' Providing the three_e_5_idx_cycle_2_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,l,integral) & + !$OMP SHARED (mo_num,three_e_5_idx_cycle_2_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do m = 1, mo_num - do l = 1, mo_num - call give_integrals_3_body_bi_ort(m,l,k,i,m,j,integral) - three_e_5_idx_cycle_2_bi_ort(m,l,j,k,i) = -1.d0 * integral - enddo + do j = 1, mo_num + do m = 1, mo_num + do l = 1, mo_num + call give_integrals_3_body_bi_ort(m, l, k, i, m, j, integral) + three_e_5_idx_cycle_2_bi_ort(m,l,j,k,i) = -1.d0 * integral + enddo + enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_5_idx_cycle_2_bi_ort',wall1 - wall0 + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_5_idx_cycle_2_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_5_idx_exch23_bi_ort, (mo_num, mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_5_idx_exch23_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m,l - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_5_idx_exch23_bi_ort = 0.d0 - print*,'Providing the three_e_5_idx_exch23_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,l,integral) & - !$OMP SHARED (mo_num,three_e_5_idx_exch23_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_5_idx_exch23_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, k, m, l + double precision :: integral, wall1, wall0 + + three_e_5_idx_exch23_bi_ort = 0.d0 + print *, ' Providing the three_e_5_idx_exch23_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,l,integral) & + !$OMP SHARED (mo_num,three_e_5_idx_exch23_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do l = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,l,k,j,m,i,integral) - three_e_5_idx_exch23_bi_ort(m,l,j,k,i) = -1.d0 * integral - enddo + do j = 1, mo_num + do l = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, l, k, j, m, i, integral) + three_e_5_idx_exch23_bi_ort(m,l,j,k,i) = -1.d0 * integral + enddo + enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_5_idx_exch23_bi_ort',wall1 - wall0 + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_5_idx_exch23_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_5_idx_exch13_bi_ort, (mo_num, mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_5_idx_exch13_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m,l - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_5_idx_exch13_bi_ort = 0.d0 - print*,'Providing the three_e_5_idx_exch13_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,l,integral) & - !$OMP SHARED (mo_num,three_e_5_idx_exch13_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_5_idx_exch13_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, k, m, l + double precision :: integral, wall1, wall0 + + three_e_5_idx_exch13_bi_ort = 0.d0 + print *, ' Providing the three_e_5_idx_exch13_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,l,integral) & + !$OMP SHARED (mo_num,three_e_5_idx_exch13_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do l = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,l,k,i,j,m,integral) - three_e_5_idx_exch13_bi_ort(m,l,j,k,i) = -1.d0 * integral - enddo + do j = 1, mo_num + do l = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, l, k, i, j, m, integral) + three_e_5_idx_exch13_bi_ort(m,l,j,k,i) = -1.d0 * integral + enddo + enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_5_idx_exch13_bi_ort',wall1 - wall0 + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_5_idx_exch13_bi_ort', wall1 - wall0 END_PROVIDER +! --- + BEGIN_PROVIDER [ double precision, three_e_5_idx_exch12_bi_ort, (mo_num, mo_num, mo_num, mo_num, mo_num)] - implicit none - BEGIN_DOC -! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs -! -!three_e_5_idx_exch12_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO -! -! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign - END_DOC - integer :: i,j,k,m,l - double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_e_5_idx_exch12_bi_ort = 0.d0 - print*,'Providing the three_e_5_idx_exch12_bi_ort ...' - call wall_time(wall0) - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,m,l,integral) & - !$OMP SHARED (mo_num,three_e_5_idx_exch12_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + BEGIN_DOC + ! + ! matrix element of the -L three-body operator FOR THE DIRECT TERMS OF DOUBLE EXCITATIONS AND BI ORTHO MOs + ! + ! three_e_5_idx_exch12_bi_ort(m,l,j,k,i) = ::: notice that i is the RIGHT MO and k is the LEFT MO + ! + ! notice the -1 sign: in this way three_e_3_idx_direct_bi_ort can be directly used to compute Slater rules with a + sign + ! + END_DOC + + implicit none + integer :: i, j, k, m, l + double precision :: integral, wall1, wall0 + + three_e_5_idx_exch12_bi_ort = 0.d0 + print *, ' Providing the three_e_5_idx_exch12_bi_ort ...' + call wall_time(wall0) + + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,m,l,integral) & + !$OMP SHARED (mo_num,three_e_5_idx_exch12_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do k = 1, mo_num - do j = 1, mo_num - do l = 1, mo_num - do m = 1, mo_num - call give_integrals_3_body_bi_ort(m,l,k,m,i,j,integral) - three_e_5_idx_exch12_bi_ort(m,l,j,k,i) = -1.d0 * integral - enddo + do j = 1, mo_num + do l = 1, mo_num + do m = 1, mo_num + call give_integrals_3_body_bi_ort(m, l, k, m, i, j, integral) + three_e_5_idx_exch12_bi_ort(m,l,j,k,i) = -1.d0 * integral + enddo + enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL - call wall_time(wall1) - print*,'wall time for three_e_5_idx_exch12_bi_ort',wall1 - wall0 + enddo + !$OMP END DO + !$OMP END PARALLEL + + call wall_time(wall1) + print *, ' wall time for three_e_5_idx_exch12_bi_ort', wall1 - wall0 END_PROVIDER + +! --- + diff --git a/src/bi_ort_ints/three_body_ints_bi_ort.irp.f b/src/bi_ort_ints/three_body_ints_bi_ort.irp.f index 12361ace..2cca84a2 100644 --- a/src/bi_ort_ints/three_body_ints_bi_ort.irp.f +++ b/src/bi_ort_ints/three_body_ints_bi_ort.irp.f @@ -1,17 +1,24 @@ + +! --- + BEGIN_PROVIDER [ double precision, three_body_ints_bi_ort, (mo_num, mo_num, mo_num, mo_num, mo_num, mo_num)] - implicit none + BEGIN_DOC ! matrix element of the -L three-body operator ! ! notice the -1 sign: in this way three_body_ints_bi_ort can be directly used to compute Slater rules :) END_DOC - integer :: i,j,k,l,m,n + + implicit none + integer :: i, j, k, l, m, n double precision :: integral, wall1, wall0 - character*(128) :: name_file - three_body_ints_bi_ort = 0.d0 - print*,'Providing the three_body_ints_bi_ort ...' - call wall_time(wall0) - name_file = 'six_index_tensor' + character*(128) :: name_file + + three_body_ints_bi_ort = 0.d0 + print*,'Providing the three_body_ints_bi_ort ...' + call wall_time(wall0) + name_file = 'six_index_tensor' + ! if(read_three_body_ints_bi_ort)then ! call read_fcidump_3_tc(three_body_ints_bi_ort) ! else @@ -19,32 +26,37 @@ BEGIN_PROVIDER [ double precision, three_body_ints_bi_ort, (mo_num, mo_num, mo_n ! print*,'Reading three_body_ints_bi_ort from disk ...' ! call read_array_6_index_tensor(mo_num,three_body_ints_bi_ort,name_file) ! else - provide x_W_ki_bi_ortho_erf_rk mos_r_in_r_array_transp mos_l_in_r_array_transp - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & - !$OMP PRIVATE (i,j,k,l,m,n,integral) & - !$OMP SHARED (mo_num,three_body_ints_bi_ort) - !$OMP DO SCHEDULE (dynamic) - do i = 1, mo_num + + !provide x_W_ki_bi_ortho_erf_rk + provide mos_r_in_r_array_transp mos_l_in_r_array_transp + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & + !$OMP PRIVATE (i,j,k,l,m,n,integral) & + !$OMP SHARED (mo_num,three_body_ints_bi_ort) + !$OMP DO SCHEDULE (dynamic) + do i = 1, mo_num do j = 1, mo_num - do m = 1, mo_num - do k = 1, mo_num - do l = 1, mo_num - do n = 1, mo_num - call give_integrals_3_body_bi_ort(n,l,k,m,j,i,integral) - three_body_ints_bi_ort(n,l,k,m,j,i) = -1.d0 * integral + do m = 1, mo_num + do k = 1, mo_num + do l = 1, mo_num + do n = 1, mo_num + call give_integrals_3_body_bi_ort(n, l, k, m, j, i, integral) + + three_body_ints_bi_ort(n,l,k,m,j,i) = -1.d0 * integral + enddo + enddo enddo - enddo enddo - enddo enddo - enddo - !$OMP END DO - !$OMP END PARALLEL + enddo + !$OMP END DO + !$OMP END PARALLEL ! endif ! endif - call wall_time(wall1) - print*,'wall time for three_body_ints_bi_ort',wall1 - wall0 + + call wall_time(wall1) + print *, ' wall time for three_body_ints_bi_ort', wall1 - wall0 ! if(write_three_body_ints_bi_ort)then ! print*,'Writing three_body_ints_bi_ort on disk ...' ! call write_array_6_index_tensor(mo_num,three_body_ints_bi_ort,name_file) @@ -64,7 +76,7 @@ subroutine give_integrals_3_body_bi_ort(n, l, k, m, j, i, integral) END_DOC implicit none - integer, intent(in) :: n,l,k,m,j,i + integer, intent(in) :: n, l, k, m, j, i double precision, intent(out) :: integral integer :: ipoint double precision :: weight @@ -86,18 +98,31 @@ subroutine give_integrals_3_body_bi_ort(n, l, k, m, j, i, integral) ! + x_W_ki_bi_ortho_erf_rk(ipoint,2,l,j) * x_W_ki_bi_ortho_erf_rk(ipoint,2,k,i) & ! + x_W_ki_bi_ortho_erf_rk(ipoint,3,l,j) * x_W_ki_bi_ortho_erf_rk(ipoint,3,k,i) ) - integral += weight * mos_l_in_r_array_transp(ipoint,k) * mos_r_in_r_array_transp(ipoint,i) & - * ( int2_grad1_u12_bimo(1,ipoint,n,m) * int2_grad1_u12_bimo(1,ipoint,l,j) & - + int2_grad1_u12_bimo(2,ipoint,n,m) * int2_grad1_u12_bimo(2,ipoint,l,j) & - + int2_grad1_u12_bimo(3,ipoint,n,m) * int2_grad1_u12_bimo(3,ipoint,l,j) ) - integral += weight * mos_l_in_r_array_transp(ipoint,l) * mos_r_in_r_array_transp(ipoint,j) & - * ( int2_grad1_u12_bimo(1,ipoint,n,m) * int2_grad1_u12_bimo(1,ipoint,k,i) & - + int2_grad1_u12_bimo(2,ipoint,n,m) * int2_grad1_u12_bimo(2,ipoint,k,i) & - + int2_grad1_u12_bimo(3,ipoint,n,m) * int2_grad1_u12_bimo(3,ipoint,k,i) ) - integral += weight * mos_l_in_r_array_transp(ipoint,n) * mos_r_in_r_array_transp(ipoint,m) & - * ( int2_grad1_u12_bimo(1,ipoint,l,j) * int2_grad1_u12_bimo(1,ipoint,k,i) & - + int2_grad1_u12_bimo(2,ipoint,l,j) * int2_grad1_u12_bimo(2,ipoint,k,i) & - + int2_grad1_u12_bimo(3,ipoint,l,j) * int2_grad1_u12_bimo(3,ipoint,k,i) ) +! integral += weight * mos_l_in_r_array_transp(ipoint,k) * mos_r_in_r_array_transp(ipoint,i) & +! * ( int2_grad1_u12_bimo(1,n,m,ipoint) * int2_grad1_u12_bimo(1,l,j,ipoint) & +! + int2_grad1_u12_bimo(2,n,m,ipoint) * int2_grad1_u12_bimo(2,l,j,ipoint) & +! + int2_grad1_u12_bimo(3,n,m,ipoint) * int2_grad1_u12_bimo(3,l,j,ipoint) ) +! integral += weight * mos_l_in_r_array_transp(ipoint,l) * mos_r_in_r_array_transp(ipoint,j) & +! * ( int2_grad1_u12_bimo(1,n,m,ipoint) * int2_grad1_u12_bimo(1,k,i,ipoint) & +! + int2_grad1_u12_bimo(2,n,m,ipoint) * int2_grad1_u12_bimo(2,k,i,ipoint) & +! + int2_grad1_u12_bimo(3,n,m,ipoint) * int2_grad1_u12_bimo(3,k,i,ipoint) ) +! integral += weight * mos_l_in_r_array_transp(ipoint,n) * mos_r_in_r_array_transp(ipoint,m) & +! * ( int2_grad1_u12_bimo(1,l,j,ipoint) * int2_grad1_u12_bimo(1,k,i,ipoint) & +! + int2_grad1_u12_bimo(2,l,j,ipoint) * int2_grad1_u12_bimo(2,k,i,ipoint) & +! + int2_grad1_u12_bimo(3,l,j,ipoint) * int2_grad1_u12_bimo(3,k,i,ipoint) ) + + integral += weight * mos_l_in_r_array_transp(ipoint,k) * mos_r_in_r_array_transp(ipoint,i) & + * ( int2_grad1_u12_bimo_transp(n,m,1,ipoint) * int2_grad1_u12_bimo_transp(l,j,1,ipoint) & + + int2_grad1_u12_bimo_transp(n,m,2,ipoint) * int2_grad1_u12_bimo_transp(l,j,2,ipoint) & + + int2_grad1_u12_bimo_transp(n,m,3,ipoint) * int2_grad1_u12_bimo_transp(l,j,3,ipoint) ) + integral += weight * mos_l_in_r_array_transp(ipoint,l) * mos_r_in_r_array_transp(ipoint,j) & + * ( int2_grad1_u12_bimo_transp(n,m,1,ipoint) * int2_grad1_u12_bimo_transp(k,i,1,ipoint) & + + int2_grad1_u12_bimo_transp(n,m,2,ipoint) * int2_grad1_u12_bimo_transp(k,i,2,ipoint) & + + int2_grad1_u12_bimo_transp(n,m,3,ipoint) * int2_grad1_u12_bimo_transp(k,i,3,ipoint) ) + integral += weight * mos_l_in_r_array_transp(ipoint,n) * mos_r_in_r_array_transp(ipoint,m) & + * ( int2_grad1_u12_bimo_transp(l,j,1,ipoint) * int2_grad1_u12_bimo_transp(k,i,1,ipoint) & + + int2_grad1_u12_bimo_transp(l,j,2,ipoint) * int2_grad1_u12_bimo_transp(k,i,2,ipoint) & + + int2_grad1_u12_bimo_transp(l,j,3,ipoint) * int2_grad1_u12_bimo_transp(k,i,3,ipoint) ) enddo diff --git a/src/bi_ort_ints/total_twoe_pot.irp.f b/src/bi_ort_ints/total_twoe_pot.irp.f index 72ded7cf..89f46a05 100644 --- a/src/bi_ort_ints/total_twoe_pot.irp.f +++ b/src/bi_ort_ints/total_twoe_pot.irp.f @@ -17,23 +17,42 @@ BEGIN_PROVIDER [double precision, ao_two_e_tc_tot, (ao_num, ao_num, ao_num, ao_n double precision :: integral_sym, integral_nsym double precision, external :: get_ao_tc_sym_two_e_pot - PROVIDE ao_tc_sym_two_e_pot_in_map + provide j1b_type - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num + if(j1b_type .eq. 3) then - integral_sym = get_ao_tc_sym_two_e_pot(i,j,k,l,ao_tc_sym_two_e_pot_map) - - ! ao_non_hermit_term_chemist(k,i,l,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the AO basis - integral_nsym = ao_non_hermit_term_chemist(k,i,l,j) - - ao_two_e_tc_tot(k,i,l,j) = integral_sym + integral_nsym + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + ao_two_e_tc_tot(k,i,l,j) = ao_tc_int_chemist(k,i,l,j) + !write(222,*) ao_two_e_tc_tot(k,i,l,j) + enddo enddo enddo enddo - enddo + + else + + PROVIDE ao_tc_sym_two_e_pot_in_map + + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + + integral_sym = get_ao_tc_sym_two_e_pot(i, j, k, l, ao_tc_sym_two_e_pot_map) + ! ao_non_hermit_term_chemist(k,i,l,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the AO basis + integral_nsym = ao_non_hermit_term_chemist(k,i,l,j) + + ao_two_e_tc_tot(k,i,l,j) = integral_sym + integral_nsym + !write(111,*) ao_two_e_tc_tot(k,i,l,j) + enddo + enddo + enddo + enddo + + endif END_PROVIDER @@ -42,9 +61,11 @@ END_PROVIDER double precision function bi_ortho_mo_ints(l, k, j, i) BEGIN_DOC + ! ! ! ! WARNING :: very naive, super slow, only used to DEBUG. + ! END_DOC implicit none diff --git a/src/bi_ortho_mos/mos_rl.irp.f b/src/bi_ortho_mos/mos_rl.irp.f index b6e93c17..034a436e 100644 --- a/src/bi_ortho_mos/mos_rl.irp.f +++ b/src/bi_ortho_mos/mos_rl.irp.f @@ -1,33 +1,37 @@ + +! --- + subroutine ao_to_mo_bi_ortho(A_ao, LDA_ao, A_mo, LDA_mo) BEGIN_DOC + ! ! Transform A from the |AO| basis to the BI ORTHONORMAL MOS ! ! $C_L^\dagger.A_{ao}.C_R$ where C_L and C_R are the LEFT and RIGHT MO coefs + ! END_DOC implicit none - integer, intent(in) :: LDA_ao,LDA_mo + integer, intent(in) :: LDA_ao, LDA_mo double precision, intent(in) :: A_ao(LDA_ao,ao_num) double precision, intent(out) :: A_mo(LDA_mo,mo_num) double precision, allocatable :: T(:,:) allocate ( T(ao_num,mo_num) ) !DIR$ ATTRIBUTES ALIGN : $IRP_ALIGN :: T - integer :: i,j,p,q - call dgemm('N', 'N', ao_num, mo_num, ao_num, & - 1.d0, A_ao, LDA_ao, & - mo_r_coef, size(mo_r_coef, 1), & - 0.d0, T, size(T, 1)) + ! T = A_ao x mo_r_coef + call dgemm( 'N', 'N', ao_num, mo_num, ao_num, 1.d0 & + , A_ao, LDA_ao, mo_r_coef, size(mo_r_coef, 1) & + , 0.d0, T, size(T, 1) ) - call dgemm('T', 'N', mo_num, mo_num, ao_num, & - 1.d0, mo_l_coef, size(mo_l_coef, 1), & - T, ao_num, & - 0.d0, A_mo, size(A_mo, 1)) + ! A_mo = mo_l_coef.T x T + call dgemm( 'T', 'N', mo_num, mo_num, ao_num, 1.d0 & + , mo_l_coef, size(mo_l_coef, 1), T, size(T, 1) & + , 0.d0, A_mo, LDA_mo ) ! call restore_symmetry(mo_num,mo_num,A_mo,size(A_mo,1),1.d-12) - deallocate(T) + deallocate(T) end subroutine ao_to_mo_bi_ortho @@ -131,7 +135,7 @@ BEGIN_PROVIDER [ double precision, mo_l_coef, (ao_num, mo_num) ] IRP_ENDIF else - print*, 'mo_r_coef are mo_coef' + print*, 'mo_l_coef are mo_coef' do i = 1, mo_num do j = 1, ao_num mo_l_coef(j,i) = mo_coef(j,i) diff --git a/src/non_h_ints_mu/debug_fit.irp.f b/src/non_h_ints_mu/debug_fit.irp.f new file mode 100644 index 00000000..af441335 --- /dev/null +++ b/src/non_h_ints_mu/debug_fit.irp.f @@ -0,0 +1,512 @@ + +! -- + +program debug_fit + + implicit none + + my_grid_becke = .True. + + my_n_pt_r_grid = 30 + my_n_pt_a_grid = 50 + !my_n_pt_r_grid = 100 + !my_n_pt_a_grid = 170 + !my_n_pt_r_grid = 150 + !my_n_pt_a_grid = 194 + touch my_grid_becke my_n_pt_r_grid my_n_pt_a_grid + + PROVIDE mu_erf j1b_pen + + !call test_j1b_nucl() + call test_grad_j1b_nucl() + !call test_lapl_j1b_nucl() + + !call test_list_b2() + !call test_list_b3() + + call test_fit_u() + !call test_fit_u2() + !call test_fit_ugradu() + +end + +! --- + +subroutine test_j1b_nucl() + + implicit none + integer :: ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: r(3) + double precision, external :: j1b_nucl + + print*, ' test_j1b_nucl ...' + + PROVIDE v_1b + + eps_ij = 1d-7 + acc_tot = 0.d0 + normalz = 0.d0 + + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + i_exc = v_1b(ipoint) + i_num = j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in v_1b on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_j1b_nucl + +! --- + +subroutine test_grad_j1b_nucl() + + implicit none + integer :: ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: r(3) + double precision, external :: grad_x_j1b_nucl + double precision, external :: grad_y_j1b_nucl + double precision, external :: grad_z_j1b_nucl + + print*, ' test_grad_j1b_nucl ...' + + PROVIDE v_1b_grad + + eps_ij = 1d-7 + acc_tot = 0.d0 + normalz = 0.d0 + + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + i_exc = v_1b_grad(1,ipoint) + i_num = grad_x_j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in x of v_1b_grad on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + i_exc = v_1b_grad(2,ipoint) + i_num = grad_y_j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in y of v_1b_grad on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + i_exc = v_1b_grad(3,ipoint) + i_num = grad_z_j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in z of v_1b_grad on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_grad_j1b_nucl + +! --- + +subroutine test_lapl_j1b_nucl() + + implicit none + integer :: ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: r(3) + double precision, external :: lapl_j1b_nucl + + print*, ' test_lapl_j1b_nucl ...' + + PROVIDE v_1b_lapl + + eps_ij = 1d-5 + acc_tot = 0.d0 + normalz = 0.d0 + + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + i_exc = v_1b_lapl(ipoint) + i_num = lapl_j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in v_1b_lapl on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_lapl_j1b_nucl + +! --- + +subroutine test_list_b2() + + implicit none + integer :: ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: r(3) + double precision, external :: j1b_nucl + + print*, ' test_list_b2 ...' + + PROVIDE v_1b_list_b2 + + eps_ij = 1d-7 + acc_tot = 0.d0 + normalz = 0.d0 + + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + i_exc = v_1b_list_b2(ipoint) + i_num = j1b_nucl(r) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in list_b2 on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_list_b2 + +! --- + +subroutine test_list_b3() + + implicit none + integer :: ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_tmp, i_num, normalz + double precision :: r(3) + double precision, external :: j1b_nucl + + print*, ' test_list_b3 ...' + + PROVIDE v_1b_list_b3 + + eps_ij = 1d-7 + acc_tot = 0.d0 + normalz = 0.d0 + + do ipoint = 1, n_points_final_grid + + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + + i_exc = v_1b_list_b3(ipoint) + i_tmp = j1b_nucl(r) + i_num = i_tmp * i_tmp + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in list_b3 on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo + + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_list_b3 + +! --- + +subroutine test_fit_ugradu() + + implicit none + + integer :: jpoint, ipoint, i + double precision :: i_exc, i_fit, i_num, x2, tmp, dx, dy, dz + double precision :: r1(3), r2(3), grad(3) + double precision :: eps_ij, acc_tot, acc_ij, normalz, coef, expo + + double precision, external :: j12_mu + + print*, ' test_fit_ugradu ...' + + eps_ij = 1d-3 + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + + acc_tot = 0.d0 + normalz = 0.d0 + do ipoint = 1, n_points_final_grid + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + x2 = dx * dx + dy * dy + dz * dz + if(x2 .lt. 1d-10) cycle + + i_fit = 0.d0 + do i = 1, n_max_fit_slat + expo = expo_gauss_j_mu_1_erf(i) + coef = coef_gauss_j_mu_1_erf(i) + i_fit += coef * dexp(-expo*x2) + enddo + i_fit = i_fit / dsqrt(x2) + + tmp = j12_mu(r1, r2) + call grad1_j12_mu_exc(r1, r2, grad) + + ! --- + + i_exc = tmp * grad(1) + i_num = i_fit * dx + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem on x in test_fit_ugradu on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_exc) + + ! --- + + i_exc = tmp * grad(2) + i_num = i_fit * dy + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem on y in test_fit_ugradu on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_exc) + + ! --- + + i_exc = tmp * grad(3) + i_num = i_fit * dz + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem on z in test_fit_ugradu on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_exc) + + ! --- + + enddo + + if( (acc_tot/normalz) .gt. 1d-3 ) then + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz + endif + enddo + + return +end subroutine test_fit_ugradu + +! --- + +subroutine test_fit_u() + + implicit none + + integer :: jpoint, ipoint, i + double precision :: i_exc, i_fit, i_num, x2 + double precision :: r1(3), r2(3), dx, dy, dz + double precision :: eps_ij, acc_tot, acc_ij, normalz, coef, expo + + double precision, external :: j12_mu + + print*, ' test_fit_u ...' + + eps_ij = 1d-3 + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + + acc_tot = 0.d0 + normalz = 0.d0 + do ipoint = 1, n_points_final_grid + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + x2 = dx * dx + dy * dy + dz * dz + if(x2 .lt. 1d-10) cycle + + i_fit = 0.d0 + do i = 1, n_max_fit_slat + expo = expo_gauss_j_mu_x(i) + coef = coef_gauss_j_mu_x(i) + i_fit += coef * dexp(-expo*x2) + enddo + + i_exc = j12_mu(r1, r2) + i_num = i_fit + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in test_fit_u on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_exc) + enddo + + if( (acc_tot/normalz) .gt. 1d-3 ) then + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz + endif + enddo + + return +end subroutine test_fit_u + +! --- + +subroutine test_fit_u2() + + implicit none + + integer :: jpoint, ipoint, i + double precision :: i_exc, i_fit, i_num, x2 + double precision :: r1(3), r2(3), dx, dy, dz, tmp + double precision :: eps_ij, acc_tot, acc_ij, normalz, coef, expo + + double precision, external :: j12_mu + + print*, ' test_fit_u2 ...' + + eps_ij = 1d-3 + + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + + acc_tot = 0.d0 + normalz = 0.d0 + do ipoint = 1, n_points_final_grid + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + x2 = dx * dx + dy * dy + dz * dz + if(x2 .lt. 1d-10) cycle + + i_fit = 0.d0 + do i = 1, n_max_fit_slat + expo = expo_gauss_j_mu_x_2(i) + coef = coef_gauss_j_mu_x_2(i) + i_fit += coef * dexp(-expo*x2) + enddo + + tmp = j12_mu(r1, r2) + i_exc = tmp * tmp + i_num = i_fit + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in test_fit_u2 on', ipoint + print *, ' analyt = ', i_exc + print *, ' numeri = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_exc) + enddo + + if( (acc_tot/normalz) .gt. 1d-3 ) then + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz + endif + enddo + + return +end subroutine test_fit_u2 + +! --- + + diff --git a/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f index 7b99cc91..bb585f63 100644 --- a/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f +++ b/src/non_h_ints_mu/debug_integ_jmu_modif.irp.f @@ -17,25 +17,19 @@ program debug_integ_jmu_modif PROVIDE mu_erf j1b_pen - !call test_j1b_nucl() - !call test_grad_j1b_nucl() - !call test_lapl_j1b_nucl() - - !call test_list_b2() - !call test_list_b3() - - !call test_fit_u() - call test_fit_u2() - !call test_fit_ugradu() - - !call test_v_ij_u_cst_mu_j1b() - !call test_v_ij_erf_rk_cst_mu_j1b() - !call test_x_v_ij_erf_rk_cst_mu_j1b() - !call test_int2_u2_j1b2() - !call test_int2_grad1u2_grad2u2_j1b2() - - !call test_int2_grad1_u12_ao() - !call test_gradu_squared_u_ij_mu() + call test_v_ij_u_cst_mu_j1b() +! call test_v_ij_erf_rk_cst_mu_j1b() +! call test_x_v_ij_erf_rk_cst_mu_j1b() +! call test_int2_u2_j1b2() +! call test_int2_grad1u2_grad2u2_j1b2() +! call test_int2_u_grad1u_total_j1b2() +! +! call test_int2_grad1_u12_ao() +! +! call test_grad12_j12() +! call test_u12sq_j1bsq() +! call test_u12_grad1_u12_j1b_grad1_j1b() +! !call test_gradu_squared_u_ij_mu() end @@ -52,8 +46,9 @@ subroutine test_v_ij_u_cst_mu_j1b() PROVIDE v_ij_u_cst_mu_j1b - eps_ij = 1d-8 + eps_ij = 1d-3 acc_tot = 0.d0 + normalz = 0.d0 !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid @@ -76,9 +71,8 @@ subroutine test_v_ij_u_cst_mu_j1b() enddo enddo - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz return end subroutine test_v_ij_u_cst_mu_j1b @@ -96,8 +90,9 @@ subroutine test_v_ij_erf_rk_cst_mu_j1b() PROVIDE v_ij_erf_rk_cst_mu_j1b - eps_ij = 1d-8 + eps_ij = 1d-3 acc_tot = 0.d0 + normalz = 0.d0 !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid @@ -120,9 +115,8 @@ subroutine test_v_ij_erf_rk_cst_mu_j1b() enddo enddo - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz return end subroutine test_v_ij_erf_rk_cst_mu_j1b @@ -140,8 +134,9 @@ subroutine test_x_v_ij_erf_rk_cst_mu_j1b() PROVIDE x_v_ij_erf_rk_cst_mu_j1b - eps_ij = 1d-8 + eps_ij = 1d-3 acc_tot = 0.d0 + normalz = 0.d0 !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid @@ -190,9 +185,8 @@ subroutine test_x_v_ij_erf_rk_cst_mu_j1b() enddo enddo - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz return end subroutine test_x_v_ij_erf_rk_cst_mu_j1b @@ -210,8 +204,9 @@ subroutine test_int2_u2_j1b2() PROVIDE int2_u2_j1b2 - eps_ij = 1d-8 + eps_ij = 1d-3 acc_tot = 0.d0 + normalz = 0.d0 !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid @@ -235,8 +230,8 @@ subroutine test_int2_u2_j1b2() enddo acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz return end subroutine test_int2_u2_j1b2 @@ -254,8 +249,9 @@ subroutine test_int2_grad1u2_grad2u2_j1b2() PROVIDE int2_grad1u2_grad2u2_j1b2 - eps_ij = 1d-8 + eps_ij = 1d-3 acc_tot = 0.d0 + normalz = 0.d0 !do ipoint = 1, 10 do ipoint = 1, n_points_final_grid @@ -278,9 +274,8 @@ subroutine test_int2_grad1u2_grad2u2_j1b2() enddo enddo - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz return end subroutine test_int2_grad1u2_grad2u2_j1b2 @@ -298,8 +293,9 @@ subroutine test_int2_grad1_u12_ao() PROVIDE int2_grad1_u12_ao - eps_ij = 1d-6 + eps_ij = 1d-3 acc_tot = 0.d0 + normalz = 0.d0 do ipoint = 1, n_points_final_grid do j = 1, ao_num @@ -347,15 +343,90 @@ subroutine test_int2_grad1_u12_ao() enddo enddo - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz return end subroutine test_int2_grad1_u12_ao ! --- +subroutine test_int2_u_grad1u_total_j1b2() + + implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision :: x, y, z + double precision :: integ(3) + + print*, ' test_int2_u_grad1u_total_j1b2 ...' + + PROVIDE int2_u_grad1u_j1b2 + PROVIDE int2_u_grad1u_x_j1b2 + + eps_ij = 1d-3 + acc_tot = 0.d0 + normalz = 0.d0 + + !do ipoint = 1, 10 + do ipoint = 1, n_points_final_grid + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + + do j = 1, ao_num + do i = 1, ao_num + + call num_int2_u_grad1u_total_j1b2(i, j, ipoint, integ) + + i_exc = x * int2_u_grad1u_j1b2(i,j,ipoint) - int2_u_grad1u_x_j1b2(1,i,j,ipoint) + i_num = integ(1) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in x part of int2_u_grad1u_total_j1b2 on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_num) + + i_exc = y * int2_u_grad1u_j1b2(i,j,ipoint) - int2_u_grad1u_x_j1b2(2,i,j,ipoint) + i_num = integ(2) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in y part of int2_u_grad1u_total_j1b2 on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_num) + + i_exc = z * int2_u_grad1u_j1b2(i,j,ipoint) - int2_u_grad1u_x_j1b2(3,i,j,ipoint) + i_num = integ(3) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in z part of int2_u_grad1u_total_j1b2 on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + acc_tot += acc_ij + normalz += dabs(i_num) + + enddo + enddo + enddo + + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz + + return +end subroutine test_int2_u_grad1u_total_j1b2 + +! --- + subroutine test_gradu_squared_u_ij_mu() implicit none @@ -367,8 +438,9 @@ subroutine test_gradu_squared_u_ij_mu() PROVIDE gradu_squared_u_ij_mu - eps_ij = 1d-6 + eps_ij = 1d-3 acc_tot = 0.d0 + normalz = 0.d0 do ipoint = 1, n_points_final_grid do j = 1, ao_num @@ -390,458 +462,140 @@ subroutine test_gradu_squared_u_ij_mu() enddo enddo - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz return end subroutine test_gradu_squared_u_ij_mu ! --- -subroutine test_j1b_nucl() +subroutine test_grad12_j12() implicit none - integer :: ipoint + integer :: i, j, ipoint double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision :: r(3) - double precision, external :: j1b_nucl + double precision, external :: num_grad12_j12 - print*, ' test_j1b_nucl ...' + print*, ' test_grad12_j12 ...' - PROVIDE v_1b + PROVIDE grad12_j12 - eps_ij = 1d-7 + eps_ij = 1d-3 acc_tot = 0.d0 + normalz = 0.d0 do ipoint = 1, n_points_final_grid - - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - - i_exc = v_1b(ipoint) - i_num = j1b_nucl(r) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in v_1b on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_num) - enddo - - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz - - return -end subroutine test_j1b_nucl - -! --- - -subroutine test_grad_j1b_nucl() - - implicit none - integer :: ipoint - double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision :: r(3) - double precision, external :: grad_x_j1b_nucl - double precision, external :: grad_y_j1b_nucl - double precision, external :: grad_z_j1b_nucl - - print*, ' test_grad_j1b_nucl ...' - - PROVIDE v_1b_grad - - eps_ij = 1d-6 - acc_tot = 0.d0 - - do ipoint = 1, n_points_final_grid - - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - - i_exc = v_1b_grad(1,ipoint) - i_num = grad_x_j1b_nucl(r) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in x of v_1b_grad on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - i_exc = v_1b_grad(2,ipoint) - i_num = grad_y_j1b_nucl(r) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in y of v_1b_grad on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - i_exc = v_1b_grad(3,ipoint) - i_num = grad_z_j1b_nucl(r) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in z of v_1b_grad on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_num) - enddo - - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz - - return -end subroutine test_grad_j1b_nucl - -! --- - -subroutine test_lapl_j1b_nucl() - - implicit none - integer :: ipoint - double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision :: r(3) - double precision, external :: lapl_j1b_nucl - - print*, ' test_lapl_j1b_nucl ...' - - PROVIDE v_1b_lapl - - eps_ij = 1d-5 - acc_tot = 0.d0 - - do ipoint = 1, n_points_final_grid - - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - - i_exc = v_1b_lapl(ipoint) - i_num = lapl_j1b_nucl(r) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in v_1b_lapl on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_num) - enddo - - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz - - return -end subroutine test_lapl_j1b_nucl - -! --- - -subroutine test_list_b2() - - implicit none - integer :: ipoint - double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz - double precision :: r(3) - double precision, external :: j1b_nucl - - print*, ' test_list_b2 ...' - - PROVIDE v_1b_list_b2 - - eps_ij = 1d-7 - acc_tot = 0.d0 - - do ipoint = 1, n_points_final_grid - - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - - i_exc = v_1b_list_b2(ipoint) - i_num = j1b_nucl(r) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in list_b2 on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_num) - enddo - - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz - - return -end subroutine test_list_b2 - -! --- - -subroutine test_list_b3() - - implicit none - integer :: ipoint - double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_tmp, i_num, normalz - double precision :: r(3) - double precision, external :: j1b_nucl - - print*, ' test_list_b3 ...' - - PROVIDE v_1b_list_b3 - - eps_ij = 1d-7 - acc_tot = 0.d0 - - do ipoint = 1, n_points_final_grid - - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - - i_exc = v_1b_list_b3(ipoint) - i_tmp = j1b_nucl(r) - i_num = i_tmp * i_tmp - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in list_b3 on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_num) - enddo - - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz - - return -end subroutine test_list_b3 - -! --- - -subroutine test_fit_ugradu() - - implicit none - - integer :: ipoint, i - double precision :: i_exc, i_fit, i_num, x2 - double precision :: r1(3), r2(3), grad(3) - double precision :: eps_ij, acc_tot, acc_ij, normalz, coef, expo - - double precision, external :: j12_mu - - print*, ' test_fit_ugradu ...' - - eps_ij = 1d-7 - acc_tot = 0.d0 - - r2 = 0.d0 - - do ipoint = 1, n_points_final_grid - - r1(1) = final_grid_points(1,ipoint) - r1(2) = final_grid_points(2,ipoint) - r1(3) = final_grid_points(3,ipoint) - x2 = r1(1) * r1(1) + r1(2) * r1(2) + r1(3) * r1(3) - if(x2 .lt. 1d-10) cycle - - i_fit = 0.d0 - do i = 1, n_max_fit_slat - expo = expo_gauss_j_mu_1_erf(i) - coef = coef_gauss_j_mu_1_erf(i) - i_fit += coef * dexp(-expo*x2) + do j = 1, ao_num + do i = 1, ao_num + + i_exc = grad12_j12(i,j,ipoint) + i_num = num_grad12_j12(i, j, ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in grad12_j12 on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif + + acc_tot += acc_ij + normalz += dabs(i_num) + enddo enddo - i_fit = i_fit / dsqrt(x2) - - call grad1_j12_mu_exc(r1, r2, grad) - - ! --- - - i_exc = j12_mu(r1, r2) * grad(1) - i_num = i_fit * r1(1) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem on x in test_fit_ugradu on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - acc_tot += acc_ij - normalz += dabs(i_exc) - - ! --- - - i_exc = j12_mu(r1, r2) * grad(2) - i_num = i_fit * r1(2) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem on y in test_fit_ugradu on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - acc_tot += acc_ij - normalz += dabs(i_exc) - - ! --- - - i_exc = j12_mu(r1, r2) * grad(3) - i_num = i_fit * r1(3) - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem on z in test_fit_ugradu on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - acc_tot += acc_ij - normalz += dabs(i_exc) - - ! --- - enddo - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz return -end subroutine test_fit_ugradu +end subroutine test_grad12_j12 ! --- -subroutine test_fit_u() +subroutine test_u12sq_j1bsq() implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision, external :: num_u12sq_j1bsq - integer :: ipoint, i - double precision :: i_exc, i_fit, i_num, x2 - double precision :: r1(3), r2(3) - double precision :: eps_ij, acc_tot, acc_ij, normalz, coef, expo + print*, ' test_u12sq_j1bsq ...' - double precision, external :: j12_mu + PROVIDE u12sq_j1bsq - print*, ' test_fit_u ...' - - eps_ij = 1d-7 + eps_ij = 1d-3 acc_tot = 0.d0 - - r2 = 0.d0 + normalz = 0.d0 do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num - r1(1) = final_grid_points(1,ipoint) - r1(2) = final_grid_points(2,ipoint) - r1(3) = final_grid_points(3,ipoint) - x2 = r1(1) * r1(1) + r1(2) * r1(2) + r1(3) * r1(3) - if(x2 .lt. 1d-10) cycle + i_exc = u12sq_j1bsq(i,j,ipoint) + i_num = num_u12sq_j1bsq(i, j, ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in u12sq_j1bsq on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif - i_fit = 0.d0 - do i = 1, n_max_fit_slat - expo = expo_gauss_j_mu_x(i) - coef = coef_gauss_j_mu_x(i) - i_fit += coef * dexp(-expo*x2) + acc_tot += acc_ij + normalz += dabs(i_num) + enddo enddo - - i_exc = j12_mu(r1, r2) - i_num = i_fit - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in test_fit_u on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_exc) enddo - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz return -end subroutine test_fit_u +end subroutine test_u12sq_j1bsq ! --- -subroutine test_fit_u2() +subroutine test_u12_grad1_u12_j1b_grad1_j1b() implicit none + integer :: i, j, ipoint + double precision :: acc_ij, acc_tot, eps_ij, i_exc, i_num, normalz + double precision, external :: num_u12_grad1_u12_j1b_grad1_j1b - integer :: ipoint, i - double precision :: i_exc, i_fit, i_num, x2 - double precision :: r1(3), r2(3) - double precision :: eps_ij, acc_tot, acc_ij, normalz, coef, expo + print*, ' test_u12_grad1_u12_j1b_grad1_j1b ...' - double precision, external :: j12_mu + PROVIDE u12_grad1_u12_j1b_grad1_j1b - print*, ' test_fit_u2 ...' - - eps_ij = 1d-7 + eps_ij = 1d-3 acc_tot = 0.d0 - - r2 = 0.d0 + normalz = 0.d0 do ipoint = 1, n_points_final_grid + do j = 1, ao_num + do i = 1, ao_num - r1(1) = final_grid_points(1,ipoint) - r1(2) = final_grid_points(2,ipoint) - r1(3) = final_grid_points(3,ipoint) - x2 = r1(1) * r1(1) + r1(2) * r1(2) + r1(3) * r1(3) - if(x2 .lt. 1d-10) cycle + i_exc = u12_grad1_u12_j1b_grad1_j1b(i,j,ipoint) + i_num = num_u12_grad1_u12_j1b_grad1_j1b(i, j, ipoint) + acc_ij = dabs(i_exc - i_num) + if(acc_ij .gt. eps_ij) then + print *, ' problem in u12_grad1_u12_j1b_grad1_j1b on', i, j, ipoint + print *, ' analyt integ = ', i_exc + print *, ' numeri integ = ', i_num + print *, ' diff = ', acc_ij + endif - i_fit = 0.d0 - do i = 1, n_max_fit_slat - expo = expo_gauss_j_mu_x_2(i) - coef = coef_gauss_j_mu_x_2(i) - i_fit += coef * dexp(-expo*x2) + acc_tot += acc_ij + normalz += dabs(i_num) + enddo enddo - - i_exc = j12_mu(r1, r2) * j12_mu(r1, r2) - i_num = i_fit - acc_ij = dabs(i_exc - i_num) - if(acc_ij .gt. eps_ij) then - print *, ' problem in test_fit_u2 on', ipoint - print *, ' analyt = ', i_exc - print *, ' numeri = ', i_num - print *, ' diff = ', acc_ij - endif - - acc_tot += acc_ij - normalz += dabs(i_exc) enddo - acc_tot = acc_tot / normalz - print*, ' normalized acc = ', acc_tot - print*, ' normalz = ', normalz + print*, ' acc_tot = ', acc_tot + print*, ' normalz = ', normalz return -end subroutine test_fit_u2 +end subroutine test_u12_grad1_u12_j1b_grad1_j1b, ! --- diff --git a/src/non_h_ints_mu/grad_squared.irp.f b/src/non_h_ints_mu/grad_squared.irp.f index bf37c551..4e70bc5c 100644 --- a/src/non_h_ints_mu/grad_squared.irp.f +++ b/src/non_h_ints_mu/grad_squared.irp.f @@ -23,52 +23,63 @@ BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num, n_poi ! + -1.00 x v1 (grad_1 v1) \int r2 \phi_i(2) \phi_j(2) (grad_1 u12) v2^2 ! = v1^2 x int2_grad1u2_grad2u2_j1b2 ! + -0.5 x (grad_1 v1)^2 x int2_u2_j1b2 - ! + -1.0 X V1 x (grad_1 v1) \cdot int2_u_grad1u_x_j1b + ! + -1.0 X V1 x (grad_1 v1) \cdot [ int2_u_grad1u_j1b2 x r - int2_u_grad1u_x_j1b ] ! ! END_DOC implicit none integer :: ipoint, i, j, m, igauss - double precision :: r(3), delta, coef - double precision :: tmp_v, tmp_x, tmp_y, tmp_z, tmp1, tmp2, tmp3, tmp4, tmp5 + double precision :: x, y, z, r(3), delta, coef + double precision :: tmp_v, tmp_x, tmp_y, tmp_z + double precision :: tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9 double precision :: time0, time1 double precision, external :: overlap_gauss_r12_ao print*, ' providing gradu_squared_u_ij_mu ...' call wall_time(time0) - PROVIDE j1b_type j1b_pen + PROVIDE j1b_type if(j1b_type .eq. 3) then do ipoint = 1, n_points_final_grid + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) tmp_v = v_1b (ipoint) tmp_x = v_1b_grad(1,ipoint) tmp_y = v_1b_grad(2,ipoint) tmp_z = v_1b_grad(3,ipoint) tmp1 = tmp_v * tmp_v - tmp2 = 0.5d0 * (tmp_x * tmp_x + tmp_y * tmp_y + tmp_z * tmp_z) + tmp2 = -0.5d0 * (tmp_x * tmp_x + tmp_y * tmp_y + tmp_z * tmp_z) tmp3 = tmp_v * tmp_x tmp4 = tmp_v * tmp_y tmp5 = tmp_v * tmp_z + tmp6 = -x * tmp3 + tmp7 = -y * tmp4 + tmp8 = -z * tmp5 + do j = 1, ao_num do i = 1, ao_num - gradu_squared_u_ij_mu(j,i,ipoint) += tmp1 * int2_grad1u2_grad2u2_j1b2(i,j,ipoint) & - - tmp2 * int2_u2_j1b2 (i,j,ipoint) & - - tmp3 * int2_u_grad1u_x_j1b (1,i,j,ipoint) & - - tmp4 * int2_u_grad1u_x_j1b (2,i,j,ipoint) & - - tmp5 * int2_u_grad1u_x_j1b (3,i,j,ipoint) + tmp9 = int2_u_grad1u_j1b2(i,j,ipoint) + + gradu_squared_u_ij_mu(i,j,ipoint) = tmp1 * int2_grad1u2_grad2u2_j1b2(i,j,ipoint) & + + tmp2 * int2_u2_j1b2 (i,j,ipoint) & + + tmp6 * tmp9 + tmp3 * int2_u_grad1u_x_j1b2(1,i,j,ipoint) & + + tmp7 * tmp9 + tmp4 * int2_u_grad1u_x_j1b2(2,i,j,ipoint) & + + tmp8 * tmp9 + tmp5 * int2_u_grad1u_x_j1b2(3,i,j,ipoint) enddo enddo enddo else + gradu_squared_u_ij_mu = 0.d0 do ipoint = 1, n_points_final_grid r(1) = final_grid_points(1,ipoint) r(2) = final_grid_points(2,ipoint) @@ -78,7 +89,7 @@ BEGIN_PROVIDER [ double precision, gradu_squared_u_ij_mu, (ao_num, ao_num, n_poi do igauss = 1, n_max_fit_slat delta = expo_gauss_1_erf_x_2(igauss) coef = coef_gauss_1_erf_x_2(igauss) - gradu_squared_u_ij_mu(j,i,ipoint) += -0.25d0 * coef * overlap_gauss_r12_ao(r, delta, i, j) + gradu_squared_u_ij_mu(i,j,ipoint) += -0.25d0 * coef * overlap_gauss_r12_ao(r, delta, i, j) enddo enddo enddo @@ -93,6 +104,57 @@ END_PROVIDER ! --- +!BEGIN_PROVIDER [double precision, tc_grad_square_ao, (ao_num, ao_num, ao_num, ao_num)] +! +! BEGIN_DOC +! ! +! ! tc_grad_square_ao(k,i,l,j) = -1/2 +! ! +! END_DOC +! +! implicit none +! integer :: ipoint, i, j, k, l +! double precision :: weight1, ao_ik_r, ao_i_r +! double precision, allocatable :: ac_mat(:,:,:,:) +! +! allocate(ac_mat(ao_num,ao_num,ao_num,ao_num)) +! ac_mat = 0.d0 +! +! do ipoint = 1, n_points_final_grid +! weight1 = final_weight_at_r_vector(ipoint) +! +! do i = 1, ao_num +! ao_i_r = weight1 * aos_in_r_array_transp(ipoint,i) +! +! do k = 1, ao_num +! ao_ik_r = ao_i_r * aos_in_r_array_transp(ipoint,k) +! +! do j = 1, ao_num +! do l = 1, ao_num +! ac_mat(k,i,l,j) += ao_ik_r * gradu_squared_u_ij_mu(l,j,ipoint) +! enddo +! enddo +! enddo +! enddo +! enddo +! +! do j = 1, ao_num +! do l = 1, ao_num +! do i = 1, ao_num +! do k = 1, ao_num +! tc_grad_square_ao(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) +! !write(11,*) tc_grad_square_ao(k,i,l,j) +! enddo +! enddo +! enddo +! enddo +! +! deallocate(ac_mat) +! +!END_PROVIDER + +! --- + BEGIN_PROVIDER [double precision, tc_grad_square_ao, (ao_num, ao_num, ao_num, ao_num)] BEGIN_DOC @@ -103,27 +165,27 @@ BEGIN_PROVIDER [double precision, tc_grad_square_ao, (ao_num, ao_num, ao_num, ao implicit none integer :: ipoint, i, j, k, l - double precision :: contrib, weight1, ao_k_r, ao_i_r - double precision, allocatable :: ac_mat(:,:,:,:) + double precision :: weight1, ao_ik_r, ao_i_r + double precision, allocatable :: ac_mat(:,:,:,:), bc_mat(:,:,:,:) allocate(ac_mat(ao_num,ao_num,ao_num,ao_num)) ac_mat = 0.d0 + allocate(bc_mat(ao_num,ao_num,ao_num,ao_num)) + bc_mat = 0.d0 do ipoint = 1, n_points_final_grid - weight1 = 0.5d0 * final_weight_at_r_vector(ipoint) + weight1 = final_weight_at_r_vector(ipoint) do i = 1, ao_num - ao_i_r = aos_in_r_array_transp(ipoint,i) + ao_i_r = weight1 * aos_in_r_array_transp(ipoint,i) do k = 1, ao_num - ao_k_r = aos_in_r_array_transp(ipoint,k) + ao_ik_r = ao_i_r * aos_in_r_array_transp(ipoint,k) do j = 1, ao_num do l = 1, ao_num - - contrib = gradu_squared_u_ij_mu(l,j,ipoint) * ao_k_r * ao_i_r - - ac_mat(k,i,l,j) += weight1 * contrib + ac_mat(k,i,l,j) += ao_ik_r * ( u12sq_j1bsq(l,j,ipoint) + u12_grad1_u12_j1b_grad1_j1b(l,j,ipoint) ) + bc_mat(k,i,l,j) += ao_ik_r * grad12_j12(l,j,ipoint) enddo enddo enddo @@ -134,13 +196,147 @@ BEGIN_PROVIDER [double precision, tc_grad_square_ao, (ao_num, ao_num, ao_num, ao do l = 1, ao_num do i = 1, ao_num do k = 1, ao_num - tc_grad_square_ao(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + tc_grad_square_ao(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + bc_mat(k,i,l,j) enddo enddo enddo enddo deallocate(ac_mat) + deallocate(bc_mat) + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, grad12_j12, (ao_num, ao_num, n_points_final_grid) ] + + implicit none + integer :: ipoint, i, j, m, igauss + double precision :: r(3), delta, coef + double precision :: tmp1 + double precision :: time0, time1 + double precision, external :: overlap_gauss_r12_ao + + print*, ' providing grad12_j12 ...' + call wall_time(time0) + + PROVIDE j1b_type + + if(j1b_type .eq. 3) then + + do ipoint = 1, n_points_final_grid + tmp1 = v_1b(ipoint) + tmp1 = tmp1 * tmp1 + do j = 1, ao_num + do i = 1, ao_num + grad12_j12(i,j,ipoint) = tmp1 * int2_grad1u2_grad2u2_j1b2(i,j,ipoint) + enddo + enddo + enddo + + else + + grad12_j12 = 0.d0 + do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + do j = 1, ao_num + do i = 1, ao_num + do igauss = 1, n_max_fit_slat + delta = expo_gauss_1_erf_x_2(igauss) + coef = coef_gauss_1_erf_x_2(igauss) + grad12_j12(i,j,ipoint) += -0.25d0 * coef * overlap_gauss_r12_ao(r, delta, i, j) + enddo + enddo + enddo + enddo + + endif + + call wall_time(time1) + print*, ' Wall time for grad12_j12 = ', time1 - time0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, u12sq_j1bsq, (ao_num, ao_num, n_points_final_grid) ] + + implicit none + integer :: ipoint, i, j + double precision :: tmp_x, tmp_y, tmp_z + double precision :: tmp1 + double precision :: time0, time1 + + print*, ' providing u12sq_j1bsq ...' + call wall_time(time0) + + do ipoint = 1, n_points_final_grid + tmp_x = v_1b_grad(1,ipoint) + tmp_y = v_1b_grad(2,ipoint) + tmp_z = v_1b_grad(3,ipoint) + tmp1 = -0.5d0 * (tmp_x * tmp_x + tmp_y * tmp_y + tmp_z * tmp_z) + do j = 1, ao_num + do i = 1, ao_num + u12sq_j1bsq(i,j,ipoint) = tmp1 * int2_u2_j1b2(i,j,ipoint) + enddo + enddo + enddo + + call wall_time(time1) + print*, ' Wall time for u12sq_j1bsq = ', time1 - time0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [ double precision, u12_grad1_u12_j1b_grad1_j1b, (ao_num, ao_num, n_points_final_grid) ] + + implicit none + integer :: ipoint, i, j, m, igauss + double precision :: x, y, z + double precision :: tmp_v, tmp_x, tmp_y, tmp_z + double precision :: tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9 + double precision :: time0, time1 + double precision, external :: overlap_gauss_r12_ao + + print*, ' providing u12_grad1_u12_j1b_grad1_j1b ...' + call wall_time(time0) + + do ipoint = 1, n_points_final_grid + + x = final_grid_points(1,ipoint) + y = final_grid_points(2,ipoint) + z = final_grid_points(3,ipoint) + tmp_v = v_1b (ipoint) + tmp_x = v_1b_grad(1,ipoint) + tmp_y = v_1b_grad(2,ipoint) + tmp_z = v_1b_grad(3,ipoint) + + tmp3 = tmp_v * tmp_x + tmp4 = tmp_v * tmp_y + tmp5 = tmp_v * tmp_z + + tmp6 = -x * tmp3 + tmp7 = -y * tmp4 + tmp8 = -z * tmp5 + + do j = 1, ao_num + do i = 1, ao_num + + tmp9 = int2_u_grad1u_j1b2(i,j,ipoint) + + u12_grad1_u12_j1b_grad1_j1b(i,j,ipoint) = tmp6 * tmp9 + tmp3 * int2_u_grad1u_x_j1b2(1,i,j,ipoint) & + + tmp7 * tmp9 + tmp4 * int2_u_grad1u_x_j1b2(2,i,j,ipoint) & + + tmp8 * tmp9 + tmp5 * int2_u_grad1u_x_j1b2(3,i,j,ipoint) + enddo + enddo + enddo + + call wall_time(time1) + print*, ' Wall time for u12_grad1_u12_j1b_grad1_j1b = ', time1 - time0 END_PROVIDER diff --git a/src/non_h_ints_mu/grad_tc_int.irp.f b/src/non_h_ints_mu/grad_tc_int.irp.f index 40600335..cb3b71a3 100644 --- a/src/non_h_ints_mu/grad_tc_int.irp.f +++ b/src/non_h_ints_mu/grad_tc_int.irp.f @@ -1,67 +1,75 @@ + +! --- + BEGIN_PROVIDER [double precision, ao_non_hermit_term_chemist, (ao_num, ao_num, ao_num, ao_num)] - implicit none -BEGIN_DOC -! 1 1 2 2 1 2 1 2 -! -! ao_non_hermit_term_chemist(k,i,l,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the AO basis -END_DOC - integer :: i,j,k,l,ipoint,m - double precision :: weight1,thr,r(3) - thr = 1.d-8 - double precision, allocatable :: b_mat(:,:,:,:),ac_mat(:,:,:,:) -! provide v_ij_erf_rk_cst_mu - provide v_ij_erf_rk_cst_mu x_v_ij_erf_rk_cst_mu -! ao_non_hermit_term_chemist = non_h_ints -! return + + BEGIN_DOC + ! 1 1 2 2 1 2 1 2 + ! + ! ao_non_hermit_term_chemist(k,i,l,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the AO basis + ! + END_DOC + + implicit none + integer :: i, j, k, l, ipoint, m + double precision :: weight1, r(3) + double precision :: wall1, wall0 + double precision, allocatable :: b_mat(:,:,:,:), ac_mat(:,:,:,:) + + provide v_ij_erf_rk_cst_mu x_v_ij_erf_rk_cst_mu + call wall_time(wall0) - allocate(b_mat(n_points_final_grid,ao_num,ao_num,3),ac_mat(ao_num, ao_num, ao_num, ao_num)) - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + allocate(b_mat(n_points_final_grid,ao_num,ao_num,3), ac_mat(ao_num,ao_num,ao_num,ao_num)) + + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i,k,m,ipoint,r,weight1) & !$OMP SHARED (aos_in_r_array_transp,aos_grad_in_r_array_transp_bis,b_mat)& !$OMP SHARED (ao_num,n_points_final_grid,final_grid_points,final_weight_at_r_vector) !$OMP DO SCHEDULE (static) - do m = 1, 3 - do i = 1, ao_num - do k = 1, ao_num - do ipoint = 1, n_points_final_grid - r(1) = final_grid_points(1,ipoint) - r(2) = final_grid_points(2,ipoint) - r(3) = final_grid_points(3,ipoint) - weight1 = final_weight_at_r_vector(ipoint) - b_mat(ipoint,k,i,m) = 0.5d0 * aos_in_r_array_transp(ipoint,k) * r(m) * weight1 * aos_grad_in_r_array_transp_bis(ipoint,i,m) + do m = 1, 3 + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + r(1) = final_grid_points(1,ipoint) + r(2) = final_grid_points(2,ipoint) + r(3) = final_grid_points(3,ipoint) + weight1 = final_weight_at_r_vector(ipoint) + b_mat(ipoint,k,i,m) = 0.5d0 * aos_in_r_array_transp(ipoint,k) * r(m) * weight1 * aos_grad_in_r_array_transp_bis(ipoint,i,m) + enddo + enddo enddo - enddo enddo - enddo !$OMP END DO !$OMP END PARALLEL - - ! (A) b_mat(ipoint,k,i,m) X v_ij_erf_rk_cst_mu(j,l,r1) - ! 1/2 \int dr1 x1 phi_k(1) d/dx1 phi_i(1) \int dr2 (1 - erf(mu_r12))/r12 phi_j(2) phi_l(2) + ! (A) b_mat(ipoint,k,i,m) X v_ij_erf_rk_cst_mu(j,l,r1) + ! 1/2 \int dr1 x1 phi_k(1) d/dx1 phi_i(1) \int dr2 (1 - erf(mu_r12))/r12 phi_j(2) phi_l(2) ac_mat = 0.d0 do m = 1, 3 - ! A B^T dim(A,1) dim(B,2) dim(A,2) alpha * A LDA - call dgemm("N","N",ao_num*ao_num,ao_num*ao_num,n_points_final_grid,1.d0,v_ij_erf_rk_cst_mu(1,1,1),ao_num*ao_num & - ,b_mat(1,1,1,m),n_points_final_grid,1.d0,ac_mat,ao_num*ao_num) + ! A B^T dim(A,1) dim(B,2) dim(A,2) alpha * A LDA + + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, 1.d0 & + , v_ij_erf_rk_cst_mu(1,1,1), ao_num*ao_num, b_mat(1,1,1,m), n_points_final_grid & + , 1.d0, ac_mat, ao_num*ao_num) + enddo - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i,k,m,ipoint,weight1) & !$OMP SHARED (aos_in_r_array_transp,aos_grad_in_r_array_transp_bis,b_mat,ao_num,n_points_final_grid,final_weight_at_r_vector) !$OMP DO SCHEDULE (static) - do m = 1, 3 - do i = 1, ao_num - do k = 1, ao_num - do ipoint = 1, n_points_final_grid - weight1 = final_weight_at_r_vector(ipoint) - b_mat(ipoint,k,i,m) = 0.5d0 * aos_in_r_array_transp(ipoint,k) * weight1 * aos_grad_in_r_array_transp_bis(ipoint,i,m) + do m = 1, 3 + do i = 1, ao_num + do k = 1, ao_num + do ipoint = 1, n_points_final_grid + weight1 = final_weight_at_r_vector(ipoint) + b_mat(ipoint,k,i,m) = 0.5d0 * aos_in_r_array_transp(ipoint,k) * weight1 * aos_grad_in_r_array_transp_bis(ipoint,i,m) + enddo + enddo enddo - enddo enddo - enddo !$OMP END DO !$OMP END PARALLEL @@ -69,117 +77,141 @@ END_DOC ! 1/2 \int dr1 phi_k(1) d/dx1 phi_i(1) \int dr2 x2(1 - erf(mu_r12))/r12 phi_j(2) phi_l(2) do m = 1, 3 ! A B^T dim(A,1) dim(B,2) dim(A,2) alpha * A LDA - call dgemm("N","N",ao_num*ao_num,ao_num*ao_num,n_points_final_grid,-1.d0,x_v_ij_erf_rk_cst_mu(1,1,1,m),ao_num*ao_num & - ,b_mat(1,1,1,m),n_points_final_grid,1.d0,ac_mat,ao_num*ao_num) + + call dgemm( "N", "N", ao_num*ao_num, ao_num*ao_num, n_points_final_grid, -1.d0 & + , x_v_ij_erf_rk_cst_mu(1,1,1,m), ao_num*ao_num, b_mat(1,1,1,m), n_points_final_grid & + , 1.d0, ac_mat, ao_num*ao_num) enddo - !$OMP PARALLEL & - !$OMP DEFAULT (NONE) & + !$OMP PARALLEL & + !$OMP DEFAULT (NONE) & !$OMP PRIVATE (i,k,j,l) & !$OMP SHARED (ac_mat,ao_non_hermit_term_chemist,ao_num) !$OMP DO SCHEDULE (static) - do j = 1, ao_num - do l = 1, ao_num - do i = 1, ao_num - do k = 1, ao_num - ! (ki|lj) (ki|lj) (lj|ki) - ao_non_hermit_term_chemist(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + ! (ki|lj) (ki|lj) (lj|ki) + ao_non_hermit_term_chemist(k,i,l,j) = ac_mat(k,i,l,j) + ac_mat(l,j,k,i) + enddo + enddo enddo - enddo enddo - enddo !$OMP END DO !$OMP END PARALLEL - double precision :: wall1, wall0 + call wall_time(wall1) - print*,'wall time dgemm ',wall1 - wall0 + print *, ' wall time dgemm ', wall1 - wall0 + END_PROVIDER +! --- + +! TODO :: optimization :: transform into DGEM + BEGIN_PROVIDER [double precision, mo_non_hermit_term_chemist, (mo_num, mo_num, mo_num, mo_num)] - implicit none -BEGIN_DOC -! 1 1 2 2 1 2 1 2 -! -! mo_non_hermit_term_chemist(k,i,l,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the MO basis -END_DOC - integer :: i,j,k,l,m,n,p,q - double precision, allocatable :: mo_tmp_1(:,:,:,:),mo_tmp_2(:,:,:,:),mo_tmp_3(:,:,:,:) + + BEGIN_DOC + ! 1 1 2 2 1 2 1 2 + ! + ! mo_non_hermit_term_chemist(k,i,l,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the MO basis + END_DOC + + implicit none + integer :: i, j, k, l, m, n, p, q + double precision, allocatable :: mo_tmp_1(:,:,:,:), mo_tmp_2(:,:,:,:) - allocate(mo_tmp_1(mo_num,ao_num,ao_num,ao_num)) - ! TODO :: optimization :: transform into DGEM - mo_tmp_1 = 0.d0 - do m = 1, ao_num - do p = 1, ao_num - do n = 1, ao_num - do q = 1, ao_num - do k = 1, mo_num - ! (k n|p m) = sum_q c_qk * (q n|p m) - mo_tmp_1(k,n,p,m) += mo_coef_transp(k,q) * ao_non_hermit_term_chemist(q,n,p,m) - enddo + allocate(mo_tmp_1(mo_num,ao_num,ao_num,ao_num)) + mo_tmp_1 = 0.d0 + + do m = 1, ao_num + do p = 1, ao_num + do n = 1, ao_num + do q = 1, ao_num + do k = 1, mo_num + ! (k n|p m) = sum_q c_qk * (q n|p m) + mo_tmp_1(k,n,p,m) += mo_coef_transp(k,q) * ao_non_hermit_term_chemist(q,n,p,m) + enddo + enddo + enddo enddo - enddo enddo - enddo - free ao_non_hermit_term_chemist - allocate(mo_tmp_2(mo_num,mo_num,ao_num,ao_num)) - mo_tmp_2 = 0.d0 - do m = 1, ao_num - do p = 1, ao_num - do n = 1, ao_num - do i = 1, mo_num - do k = 1, mo_num - ! (k i|p m) = sum_n c_ni * (k n|p m) - mo_tmp_2(k,i,p,m) += mo_coef_transp(i,n) * mo_tmp_1(k,n,p,m) - enddo + free ao_non_hermit_term_chemist + + allocate(mo_tmp_2(mo_num,mo_num,ao_num,ao_num)) + mo_tmp_2 = 0.d0 + + do m = 1, ao_num + do p = 1, ao_num + do n = 1, ao_num + do i = 1, mo_num + do k = 1, mo_num + ! (k i|p m) = sum_n c_ni * (k n|p m) + mo_tmp_2(k,i,p,m) += mo_coef_transp(i,n) * mo_tmp_1(k,n,p,m) + enddo + enddo + enddo enddo - enddo enddo - enddo - deallocate(mo_tmp_1) - allocate(mo_tmp_1(mo_num,mo_num,mo_num,ao_num)) - mo_tmp_1 = 0.d0 - do m = 1, ao_num - do p = 1, ao_num - do l = 1, mo_num - do i = 1, mo_num - do k = 1, mo_num - mo_tmp_1(k,i,l,m) += mo_coef_transp(l,p) * mo_tmp_2(k,i,p,m) - enddo + deallocate(mo_tmp_1) + + allocate(mo_tmp_1(mo_num,mo_num,mo_num,ao_num)) + mo_tmp_1 = 0.d0 + + do m = 1, ao_num + do p = 1, ao_num + do l = 1, mo_num + do i = 1, mo_num + do k = 1, mo_num + mo_tmp_1(k,i,l,m) += mo_coef_transp(l,p) * mo_tmp_2(k,i,p,m) + enddo + enddo + enddo enddo - enddo enddo - enddo - deallocate(mo_tmp_2) - mo_non_hermit_term_chemist = 0.d0 - do m = 1, ao_num - do j = 1, mo_num - do l = 1, mo_num - do i = 1, mo_num - do k = 1, mo_num - mo_non_hermit_term_chemist(k,i,l,j) += mo_coef_transp(j,m) * mo_tmp_1(k,i,l,m) - enddo + deallocate(mo_tmp_2) + + mo_non_hermit_term_chemist = 0.d0 + do m = 1, ao_num + do j = 1, mo_num + do l = 1, mo_num + do i = 1, mo_num + do k = 1, mo_num + mo_non_hermit_term_chemist(k,i,l,j) += mo_coef_transp(j,m) * mo_tmp_1(k,i,l,m) + enddo + enddo + enddo enddo - enddo enddo - enddo + deallocate(mo_tmp_1) END_PROVIDER +! --- + BEGIN_PROVIDER [double precision, mo_non_hermit_term, (mo_num, mo_num, mo_num, mo_num)] - implicit none -BEGIN_DOC -! 1 2 1 2 1 2 1 2 -! -! mo_non_hermit_term(k,l,i,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the MO basis -END_DOC - integer :: i,j,k,l - do j = 1, mo_num - do i = 1, mo_num - do l = 1, mo_num - do k = 1, mo_num - mo_non_hermit_term(k,l,i,j) = mo_non_hermit_term_chemist(k,i,l,j) + + BEGIN_DOC + ! 1 2 1 2 1 2 1 2 + ! + ! mo_non_hermit_term(k,l,i,j) = < k l | [erf( mu r12) - 1] d/d_r12 | i j > on the MO basis + END_DOC + + implicit none + integer :: i, j, k, l + + do j = 1, mo_num + do i = 1, mo_num + do l = 1, mo_num + do k = 1, mo_num + mo_non_hermit_term(k,l,i,j) = mo_non_hermit_term_chemist(k,i,l,j) + enddo + enddo enddo - enddo enddo - enddo + END_PROVIDER + +! --- + diff --git a/src/non_h_ints_mu/j12_nucl_utils.irp.f b/src/non_h_ints_mu/j12_nucl_utils.irp.f index a6dd0939..f3b68f43 100644 --- a/src/non_h_ints_mu/j12_nucl_utils.irp.f +++ b/src/non_h_ints_mu/j12_nucl_utils.irp.f @@ -586,4 +586,38 @@ end subroutine grad1_j12_mu_exc ! --- +subroutine grad1_jmu_modif_num(r1, r2, grad) + + implicit none + + double precision, intent(in) :: r1(3), r2(3) + double precision, intent(out) :: grad(3) + + double precision :: tmp0, tmp1, tmp2, tmp3, tmp4, grad_u12(3) + + double precision, external :: j12_mu + double precision, external :: j1b_nucl + double precision, external :: grad_x_j1b_nucl + double precision, external :: grad_y_j1b_nucl + double precision, external :: grad_z_j1b_nucl + + call grad1_j12_mu_exc(r1, r2, grad_u12) + + tmp0 = j1b_nucl(r1) + tmp1 = j1b_nucl(r2) + tmp2 = j12_mu(r1, r2) + tmp3 = tmp0 * tmp1 + tmp4 = tmp2 * tmp1 + + grad(1) = tmp3 * grad_u12(1) + tmp4 * grad_x_j1b_nucl(r1) + grad(2) = tmp3 * grad_u12(2) + tmp4 * grad_y_j1b_nucl(r1) + grad(3) = tmp3 * grad_u12(3) + tmp4 * grad_z_j1b_nucl(r1) + + return +end subroutine grad1_jmu_modif_num + +! --- + + + diff --git a/src/non_h_ints_mu/new_grad_tc.irp.f b/src/non_h_ints_mu/new_grad_tc.irp.f index db659520..d34e629c 100644 --- a/src/non_h_ints_mu/new_grad_tc.irp.f +++ b/src/non_h_ints_mu/new_grad_tc.irp.f @@ -17,10 +17,10 @@ BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao, (3, ao_num, ao_num, n_poin ! if J(r1,r2) = u12 x v1 x v2 ! ! int2_grad1_u12_ao(:,i,j,ipoint) = v1 x [ 0.5 x \int dr2 [(r1 - r2) (erf(mu * r12)-1)r_12] v2 \phi_i(r2) \phi_j(r2) ] - ! + \grad_1 v1 x [ \int dr2 u12 v2 \phi_i(r2) \phi_j(r2) ] + ! - \grad_1 v1 x [ \int dr2 u12 v2 \phi_i(r2) \phi_j(r2) ] ! = 0.5 v_1b(ipoint) * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) * r(:) ! - 0.5 v_1b(ipoint) * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,:) - ! + v_1b_grad[:,ipoint] * v_ij_u_cst_mu_j1b(i,j,ipoint) + ! - v_1b_grad[:,ipoint] * v_ij_u_cst_mu_j1b(i,j,ipoint) ! ! END_DOC @@ -29,7 +29,7 @@ BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao, (3, ao_num, ao_num, n_poin integer :: ipoint, i, j double precision :: x, y, z, tmp_x, tmp_y, tmp_z, tmp0, tmp1, tmp2 - PROVIDE j1b_type j1b_pen + PROVIDE j1b_type if(j1b_type .eq. 3) then @@ -46,12 +46,12 @@ BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao, (3, ao_num, ao_num, n_poin do j = 1, ao_num do i = 1, ao_num - tmp1 = tmp0 * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) - tmp2 = v_ij_u_cst_mu_j1b(i,j,ipoint) + tmp1 = tmp0 * v_ij_erf_rk_cst_mu_j1b(i,j,ipoint) + tmp2 = v_ij_u_cst_mu_j1b(i,j,ipoint) - int2_grad1_u12_ao(1,i,j,ipoint) = tmp1 * x - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,1) + tmp_x * tmp2 - int2_grad1_u12_ao(2,i,j,ipoint) = tmp1 * y - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,2) + tmp_y * tmp2 - int2_grad1_u12_ao(3,i,j,ipoint) = tmp1 * z - tmp0 * x_v_ij_erf_rk_cst_mu_j1b(i,j,ipoint,3) + tmp_z * tmp2 + int2_grad1_u12_ao(1,i,j,ipoint) = tmp1 * x - tmp0 * x_v_ij_erf_rk_cst_mu_tmp_j1b(1,i,j,ipoint) - tmp2 * tmp_x + int2_grad1_u12_ao(2,i,j,ipoint) = tmp1 * y - tmp0 * x_v_ij_erf_rk_cst_mu_tmp_j1b(2,i,j,ipoint) - tmp2 * tmp_y + int2_grad1_u12_ao(3,i,j,ipoint) = tmp1 * z - tmp0 * x_v_ij_erf_rk_cst_mu_tmp_j1b(3,i,j,ipoint) - tmp2 * tmp_z enddo enddo enddo @@ -62,11 +62,14 @@ BEGIN_PROVIDER [ double precision, int2_grad1_u12_ao, (3, ao_num, ao_num, n_poin x = final_grid_points(1,ipoint) y = final_grid_points(2,ipoint) z = final_grid_points(3,ipoint) + do j = 1, ao_num do i = 1, ao_num - int2_grad1_u12_ao(1,i,j,ipoint) = v_ij_erf_rk_cst_mu(i,j,ipoint) * x - x_v_ij_erf_rk_cst_mu(i,j,ipoint,1) - int2_grad1_u12_ao(2,i,j,ipoint) = v_ij_erf_rk_cst_mu(i,j,ipoint) * y - x_v_ij_erf_rk_cst_mu(i,j,ipoint,2) - int2_grad1_u12_ao(3,i,j,ipoint) = v_ij_erf_rk_cst_mu(i,j,ipoint) * z - x_v_ij_erf_rk_cst_mu(i,j,ipoint,3) + tmp1 = v_ij_erf_rk_cst_mu(i,j,ipoint) + + int2_grad1_u12_ao(1,i,j,ipoint) = tmp1 * x - x_v_ij_erf_rk_cst_mu_tmp(1,i,j,ipoint) + int2_grad1_u12_ao(2,i,j,ipoint) = tmp1 * y - x_v_ij_erf_rk_cst_mu_tmp(2,i,j,ipoint) + int2_grad1_u12_ao(3,i,j,ipoint) = tmp1 * z - x_v_ij_erf_rk_cst_mu_tmp(3,i,j,ipoint) enddo enddo enddo @@ -93,9 +96,8 @@ BEGIN_PROVIDER [double precision, tc_grad_and_lapl_ao, (ao_num, ao_num, ao_num, implicit none integer :: ipoint, i, j, k, l - double precision :: contrib, weight1, contrib_x, contrib_y, contrib_z - double precision :: ao_k_r, ao_k_dx, ao_k_dy, ao_k_dz - double precision :: ao_i_r, ao_i_dx, ao_i_dy, ao_i_dz + double precision :: weight1, contrib_x, contrib_y, contrib_z, tmp_x, tmp_y, tmp_z + double precision :: ao_k_r, ao_i_r, ao_i_dx, ao_i_dy, ao_i_dz double precision, allocatable :: ac_mat(:,:,:,:) allocate(ac_mat(ao_num,ao_num,ao_num,ao_num)) @@ -105,27 +107,26 @@ BEGIN_PROVIDER [double precision, tc_grad_and_lapl_ao, (ao_num, ao_num, ao_num, weight1 = 0.5d0 * final_weight_at_r_vector(ipoint) do i = 1, ao_num - ao_i_r = aos_in_r_array_transp (ipoint,i) - ao_i_dx = aos_grad_in_r_array_transp_bis(ipoint,i,1) - ao_i_dy = aos_grad_in_r_array_transp_bis(ipoint,i,2) - ao_i_dz = aos_grad_in_r_array_transp_bis(ipoint,i,3) + ao_i_r = weight1 * aos_in_r_array_transp (ipoint,i) + ao_i_dx = weight1 * aos_grad_in_r_array_transp_bis(ipoint,i,1) + ao_i_dy = weight1 * aos_grad_in_r_array_transp_bis(ipoint,i,2) + ao_i_dz = weight1 * aos_grad_in_r_array_transp_bis(ipoint,i,3) do k = 1, ao_num - ao_k_r = aos_in_r_array_transp (ipoint,k) - ao_k_dx = aos_grad_in_r_array_transp_bis(ipoint,k,1) - ao_k_dy = aos_grad_in_r_array_transp_bis(ipoint,k,2) - ao_k_dz = aos_grad_in_r_array_transp_bis(ipoint,k,3) + ao_k_r = aos_in_r_array_transp(ipoint,k) + + tmp_x = ao_k_r * ao_i_dx - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,1) + tmp_y = ao_k_r * ao_i_dy - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,2) + tmp_z = ao_k_r * ao_i_dz - ao_i_r * aos_grad_in_r_array_transp_bis(ipoint,k,3) do j = 1, ao_num do l = 1, ao_num - contrib_x = int2_grad1_u12_ao(1,l,j,ipoint) * ( ao_k_r * ao_i_dx - ao_i_r * ao_k_dx ) - contrib_y = int2_grad1_u12_ao(2,l,j,ipoint) * ( ao_k_r * ao_i_dy - ao_i_r * ao_k_dy ) - contrib_z = int2_grad1_u12_ao(3,l,j,ipoint) * ( ao_k_r * ao_i_dz - ao_i_r * ao_k_dz ) + contrib_x = int2_grad1_u12_ao(1,l,j,ipoint) * tmp_x + contrib_y = int2_grad1_u12_ao(2,l,j,ipoint) * tmp_y + contrib_z = int2_grad1_u12_ao(3,l,j,ipoint) * tmp_z - contrib = weight1 * ( contrib_x + contrib_y + contrib_z ) - - ac_mat(k,i,l,j) += contrib + ac_mat(k,i,l,j) += contrib_x + contrib_y + contrib_z enddo enddo enddo diff --git a/src/non_h_ints_mu/numerical_integ.irp.f b/src/non_h_ints_mu/numerical_integ.irp.f index dae68649..dcd7a52a 100644 --- a/src/non_h_ints_mu/numerical_integ.irp.f +++ b/src/non_h_ints_mu/numerical_integ.irp.f @@ -55,6 +55,7 @@ double precision function num_int2_u2_j1b2(i, j, ipoint) double precision, external :: ao_value double precision, external :: j1b_nucl + double precision, external :: j12_mu r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) @@ -74,13 +75,14 @@ double precision function num_int2_u2_j1b2(i, j, ipoint) tmp1 = j1b_nucl(r2) tmp2 = tmp1 * tmp1 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) - tmp3 = 0.d0 - do i_fit = 1, n_max_fit_slat - expo = expo_gauss_j_mu_x_2(i_fit) - coef = coef_gauss_j_mu_x_2(i_fit) - - tmp3 += coef * dexp(-expo*x2) - enddo + !tmp3 = 0.d0 + !do i_fit = 1, n_max_fit_slat + ! expo = expo_gauss_j_mu_x_2(i_fit) + ! coef = coef_gauss_j_mu_x_2(i_fit) + ! tmp3 += coef * dexp(-expo*x2) + !enddo + tmp3 = j12_mu(r1, r2) + tmp3 = tmp3 * tmp3 num_int2_u2_j1b2 += tmp2 * tmp3 enddo @@ -127,13 +129,15 @@ double precision function num_int2_grad1u2_grad2u2_j1b2(i, j, ipoint) tmp1 = j1b_nucl(r2) tmp2 = tmp1 * tmp1 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) - tmp3 = 0.d0 - do i_fit = 1, n_max_fit_slat - expo = expo_gauss_1_erf_x_2(i_fit) - coef = coef_gauss_1_erf_x_2(i_fit) + !tmp3 = 0.d0 + !do i_fit = 1, n_max_fit_slat + ! expo = expo_gauss_1_erf_x_2(i_fit) + ! coef = coef_gauss_1_erf_x_2(i_fit) + ! tmp3 += coef * dexp(-expo*x2) + !enddo + tmp3 = derf(mu_erf*r12) - 1.d0 + tmp3 = tmp3 * tmp3 - tmp3 += coef * dexp(-expo*x2) - enddo tmp3 = -0.25d0 * tmp3 num_int2_grad1u2_grad2u2_j1b2 += tmp2 * tmp3 @@ -246,6 +250,12 @@ end subroutine num_x_v_ij_erf_rk_cst_mu_j1b subroutine num_int2_grad1_u12_ao(i, j, ipoint, integ) + BEGIN_DOC + ! + ! \int dr2 [-grad_1 u12] \phi_i(r2) \phi_j(r2) x v12_1b(r1, r2) + ! + END_DOC + implicit none integer, intent(in) :: i, j, ipoint @@ -256,7 +266,6 @@ subroutine num_int2_grad1_u12_ao(i, j, ipoint, integ) double precision :: tmp_x, tmp_y, tmp_z double precision, external :: ao_value - double precision, external :: j12_nucl r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) @@ -269,9 +278,9 @@ subroutine num_int2_grad1_u12_ao(i, j, ipoint, integ) r2(1) = final_grid_points(1,jpoint) r2(2) = final_grid_points(2,jpoint) r2(3) = final_grid_points(3,jpoint) - tmp = ao_value(i, r2) * ao_value(j, r2) * j12_nucl(r1, r2) * final_weight_at_r_vector(jpoint) + tmp = ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) - call grad1_j12_mu_exc(r1, r2, grad) + call grad1_jmu_modif_num(r1, r2, grad) tmp_x += tmp * (-1.d0 * grad(1)) tmp_y += tmp * (-1.d0 * grad(2)) @@ -289,16 +298,33 @@ end subroutine num_int2_grad1_u12_ao double precision function num_gradu_squared_u_ij_mu(i, j, ipoint) + BEGIN_DOC + ! + ! -0.50 x \int r2 \phi_i(2) \phi_j(2) x v2^2 + ! [ v1^2 ((grad_1 u12)^2 + (grad_2 u12^2)]) + ! + u12^2 (grad_1 v1)^2 + ! + 2 u12 v1 (grad_1 u12) . (grad_1 v1) + ! + END_DOC + + implicit none integer, intent(in) :: i, j, ipoint integer :: jpoint - double precision :: tmp, r1(3), r2(3), r12 - double precision :: tmp_x, tmp_y, tmp_z, tmp1, tmp2 + double precision :: r1(3), r2(3) + double precision :: tmp_x, tmp_y, tmp_z, r12 + double precision :: dx1_v1, dy1_v1, dz1_v1, grad_u12(3) + double precision :: tmp1, v1_tmp, v2_tmp, u12_tmp + double precision :: fst_term, scd_term, thd_term, tmp double precision, external :: ao_value - double precision, external :: j12_nucl + double precision, external :: j1b_nucl + double precision, external :: j12_mu + double precision, external :: grad_x_j1b_nucl + double precision, external :: grad_y_j1b_nucl + double precision, external :: grad_z_j1b_nucl r1(1) = final_grid_points(1,ipoint) r1(2) = final_grid_points(2,ipoint) @@ -306,16 +332,32 @@ double precision function num_gradu_squared_u_ij_mu(i, j, ipoint) num_gradu_squared_u_ij_mu = 0.d0 do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) r2(2) = final_grid_points(2,jpoint) r2(3) = final_grid_points(3,jpoint) + tmp_x = r1(1) - r2(1) tmp_y = r1(2) - r2(2) tmp_z = r1(3) - r2(3) - r12 = dsqrt( tmp_x*tmp_x + tmp_y*tmp_y + tmp_z*tmp_z ) - tmp1 = 1.d0 - derf(mu_erf * r12) - tmp2 = j12_nucl(r1, r2) - tmp = -0.25d0 * tmp1 * tmp1 * tmp2 * tmp2 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) + r12 = dsqrt(tmp_x*tmp_x + tmp_y*tmp_y + tmp_z*tmp_z) + + dx1_v1 = grad_x_j1b_nucl(r1) + dy1_v1 = grad_y_j1b_nucl(r1) + dz1_v1 = grad_z_j1b_nucl(r1) + + call grad1_j12_mu_exc(r1, r2, grad_u12) + + tmp1 = 1.d0 - derf(mu_erf * r12) + v1_tmp = j1b_nucl(r1) + v2_tmp = j1b_nucl(r2) + u12_tmp = j12_mu(r1, r2) + + fst_term = 0.5d0 * tmp1 * tmp1 * v1_tmp * v1_tmp + scd_term = u12_tmp * u12_tmp * (dx1_v1*dx1_v1 + dy1_v1*dy1_v1 + dz1_v1*dz1_v1) + thd_term = 2.d0 * v1_tmp * u12_tmp * (dx1_v1*grad_u12(1) + dy1_v1*grad_u12(2) + dz1_v1*grad_u12(3)) + + tmp = -0.5d0 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) * (fst_term + scd_term + thd_term) * v2_tmp * v2_tmp num_gradu_squared_u_ij_mu += tmp enddo @@ -325,4 +367,257 @@ end function num_gradu_squared_u_ij_mu ! --- +double precision function num_grad12_j12(i, j, ipoint) + BEGIN_DOC + ! + ! -0.50 x \int r2 \phi_i(2) \phi_j(2) x v2^2 [v1^2 ((grad_1 u12)^2 + (grad_2 u12^2)]) ] + ! + END_DOC + + + implicit none + + integer, intent(in) :: i, j, ipoint + + integer :: jpoint + double precision :: r1(3), r2(3) + double precision :: tmp_x, tmp_y, tmp_z, r12 + double precision :: dx1_v1, dy1_v1, dz1_v1, grad_u12(3) + double precision :: tmp1, v1_tmp, v2_tmp, u12_tmp + double precision :: fst_term, scd_term, thd_term, tmp + + double precision, external :: ao_value + double precision, external :: j1b_nucl + double precision, external :: j12_mu + double precision, external :: grad_x_j1b_nucl + double precision, external :: grad_y_j1b_nucl + double precision, external :: grad_z_j1b_nucl + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + num_grad12_j12 = 0.d0 + do jpoint = 1, n_points_final_grid + + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + + tmp_x = r1(1) - r2(1) + tmp_y = r1(2) - r2(2) + tmp_z = r1(3) - r2(3) + r12 = dsqrt(tmp_x*tmp_x + tmp_y*tmp_y + tmp_z*tmp_z) + + dx1_v1 = grad_x_j1b_nucl(r1) + dy1_v1 = grad_y_j1b_nucl(r1) + dz1_v1 = grad_z_j1b_nucl(r1) + + call grad1_j12_mu_exc(r1, r2, grad_u12) + + tmp1 = 1.d0 - derf(mu_erf * r12) + v1_tmp = j1b_nucl(r1) + v2_tmp = j1b_nucl(r2) + u12_tmp = j12_mu(r1, r2) + + fst_term = 0.5d0 * tmp1 * tmp1 * v1_tmp * v1_tmp + + tmp = -0.5d0 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) * fst_term * v2_tmp * v2_tmp + + num_grad12_j12 += tmp + enddo + + return +end function num_grad12_j12 + +! --- + +double precision function num_u12sq_j1bsq(i, j, ipoint) + + BEGIN_DOC + ! + ! -0.50 x \int r2 \phi_i(2) \phi_j(2) x v2^2 [ u12^2 (grad_1 v1)^2 ] + ! + END_DOC + + + implicit none + + integer, intent(in) :: i, j, ipoint + + integer :: jpoint + double precision :: r1(3), r2(3) + double precision :: tmp_x, tmp_y, tmp_z, r12 + double precision :: dx1_v1, dy1_v1, dz1_v1, grad_u12(3) + double precision :: tmp1, v1_tmp, v2_tmp, u12_tmp + double precision :: fst_term, scd_term, thd_term, tmp + + double precision, external :: ao_value + double precision, external :: j1b_nucl + double precision, external :: j12_mu + double precision, external :: grad_x_j1b_nucl + double precision, external :: grad_y_j1b_nucl + double precision, external :: grad_z_j1b_nucl + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + num_u12sq_j1bsq = 0.d0 + do jpoint = 1, n_points_final_grid + + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + + tmp_x = r1(1) - r2(1) + tmp_y = r1(2) - r2(2) + tmp_z = r1(3) - r2(3) + r12 = dsqrt(tmp_x*tmp_x + tmp_y*tmp_y + tmp_z*tmp_z) + + dx1_v1 = grad_x_j1b_nucl(r1) + dy1_v1 = grad_y_j1b_nucl(r1) + dz1_v1 = grad_z_j1b_nucl(r1) + + call grad1_j12_mu_exc(r1, r2, grad_u12) + + tmp1 = 1.d0 - derf(mu_erf * r12) + v1_tmp = j1b_nucl(r1) + v2_tmp = j1b_nucl(r2) + u12_tmp = j12_mu(r1, r2) + + scd_term = u12_tmp * u12_tmp * (dx1_v1*dx1_v1 + dy1_v1*dy1_v1 + dz1_v1*dz1_v1) + + tmp = -0.5d0 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) * scd_term * v2_tmp * v2_tmp + + num_u12sq_j1bsq += tmp + enddo + + return +end function num_u12sq_j1bsq + +! --- + +double precision function num_u12_grad1_u12_j1b_grad1_j1b(i, j, ipoint) + + BEGIN_DOC + ! + ! -0.50 x \int r2 \phi_i(2) \phi_j(2) x v2^2 [ 2 u12 v1 (grad_1 u12) . (grad_1 v1) ] + ! + END_DOC + + + implicit none + + integer, intent(in) :: i, j, ipoint + + integer :: jpoint + double precision :: r1(3), r2(3) + double precision :: tmp_x, tmp_y, tmp_z, r12 + double precision :: dx1_v1, dy1_v1, dz1_v1, grad_u12(3) + double precision :: tmp1, v1_tmp, v2_tmp, u12_tmp + double precision :: fst_term, scd_term, thd_term, tmp + + double precision, external :: ao_value + double precision, external :: j1b_nucl + double precision, external :: j12_mu + double precision, external :: grad_x_j1b_nucl + double precision, external :: grad_y_j1b_nucl + double precision, external :: grad_z_j1b_nucl + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + num_u12_grad1_u12_j1b_grad1_j1b = 0.d0 + do jpoint = 1, n_points_final_grid + + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + + tmp_x = r1(1) - r2(1) + tmp_y = r1(2) - r2(2) + tmp_z = r1(3) - r2(3) + r12 = dsqrt(tmp_x*tmp_x + tmp_y*tmp_y + tmp_z*tmp_z) + + dx1_v1 = grad_x_j1b_nucl(r1) + dy1_v1 = grad_y_j1b_nucl(r1) + dz1_v1 = grad_z_j1b_nucl(r1) + + call grad1_j12_mu_exc(r1, r2, grad_u12) + + tmp1 = 1.d0 - derf(mu_erf * r12) + v1_tmp = j1b_nucl(r1) + v2_tmp = j1b_nucl(r2) + u12_tmp = j12_mu(r1, r2) + + thd_term = 2.d0 * v1_tmp * u12_tmp * (dx1_v1*grad_u12(1) + dy1_v1*grad_u12(2) + dz1_v1*grad_u12(3)) + + tmp = -0.5d0 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) * thd_term * v2_tmp * v2_tmp + + num_u12_grad1_u12_j1b_grad1_j1b += tmp + enddo + + return +end function num_u12_grad1_u12_j1b_grad1_j1b + +! --- + +subroutine num_int2_u_grad1u_total_j1b2(i, j, ipoint, integ) + + BEGIN_DOC + ! + ! \int dr2 u12 (grad_1 u12) \phi_i(r2) \phi_j(r2) x v_1b(r2)^2 + ! + END_DOC + + implicit none + + integer, intent(in) :: i, j, ipoint + double precision, intent(out) :: integ(3) + + integer :: jpoint + double precision :: r1(3), r2(3), grad(3) + double precision :: dx, dy, dz, r12, tmp0, tmp1, tmp2 + double precision :: tmp_x, tmp_y, tmp_z + + double precision, external :: ao_value + double precision, external :: j1b_nucl + double precision, external :: j12_mu + + r1(1) = final_grid_points(1,ipoint) + r1(2) = final_grid_points(2,ipoint) + r1(3) = final_grid_points(3,ipoint) + + tmp_x = 0.d0 + tmp_y = 0.d0 + tmp_z = 0.d0 + do jpoint = 1, n_points_final_grid + r2(1) = final_grid_points(1,jpoint) + r2(2) = final_grid_points(2,jpoint) + r2(3) = final_grid_points(3,jpoint) + dx = r1(1) - r2(1) + dy = r1(2) - r2(2) + dz = r1(3) - r2(3) + r12 = dsqrt( dx * dx + dy * dy + dz * dz ) + if(r12 .lt. 1d-10) cycle + + tmp0 = j1b_nucl(r2) + tmp1 = 0.5d0 * j12_mu(r1, r2) * (1.d0 - derf(mu_erf * r12)) / r12 + tmp2 = tmp0 * tmp0 * tmp1 * ao_value(i, r2) * ao_value(j, r2) * final_weight_at_r_vector(jpoint) + + tmp_x += tmp2 * dx + tmp_y += tmp2 * dy + tmp_z += tmp2 * dz + enddo + + integ(1) = tmp_x + integ(2) = tmp_y + integ(3) = tmp_z + + return +end subroutine num_int2_u_grad1u_total_j1b2 + +! --- diff --git a/src/non_h_ints_mu/total_tc_int.irp.f b/src/non_h_ints_mu/total_tc_int.irp.f new file mode 100644 index 00000000..979296d1 --- /dev/null +++ b/src/non_h_ints_mu/total_tc_int.irp.f @@ -0,0 +1,60 @@ + +! --- + +BEGIN_PROVIDER [double precision, ao_tc_int_chemist, (ao_num, ao_num, ao_num, ao_num)] + + implicit none + integer :: i, j, k, l + double precision :: wall1, wall0 + + call wall_time(wall0) + + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + ao_tc_int_chemist(k,i,l,j) = tc_grad_square_ao(k,i,l,j) + tc_grad_and_lapl_ao(k,i,l,j) + ao_two_e_coul(k,i,l,j) + enddo + enddo + enddo + enddo + + call wall_time(wall1) + print *, ' wall time for ao_tc_int_chemist ', wall1 - wall0 + +END_PROVIDER + +! --- + +BEGIN_PROVIDER [double precision, ao_two_e_coul, (ao_num, ao_num, ao_num, ao_num) ] + + BEGIN_DOC + ! + ! ao_two_e_coul(k,i,l,j) = ( k i | 1/r12 | l j ) = < l k | 1/r12 | j i > + ! + END_DOC + + integer :: i, j, k, l + double precision :: integral + double precision, external :: get_ao_two_e_integral + + PROVIDE ao_integrals_map + + do j = 1, ao_num + do l = 1, ao_num + do i = 1, ao_num + do k = 1, ao_num + + ! < 1:k, 2:l | 1:i, 2:j > + integral = get_ao_two_e_integral(i, j, k, l, ao_integrals_map) + + ao_two_e_coul(k,i,l,j) = integral + enddo + enddo + enddo + enddo + +END_PROVIDER + +! --- + diff --git a/src/tc_bi_ortho/compute_deltamu_right.irp.f b/src/tc_bi_ortho/compute_deltamu_right.irp.f index 32566cc8..6464796e 100644 --- a/src/tc_bi_ortho/compute_deltamu_right.irp.f +++ b/src/tc_bi_ortho/compute_deltamu_right.irp.f @@ -34,6 +34,7 @@ subroutine delta_right() !do k = 1, 1 ! get < I_left | H_mu - H | psi_right > + !call get_h_bitc_right(psi_det, psi_r_coef_bi_ortho(:,k), N_det, N_int, delta(:,k)) call get_delta_bitc_right(psi_det, psi_r_coef_bi_ortho(:,k), N_det, N_int, delta(:,k)) ! order as QMCCHEM diff --git a/src/tc_bi_ortho/dressing_vectors_lr.irp.f b/src/tc_bi_ortho/dressing_vectors_lr.irp.f index e69a970b..08913bab 100644 --- a/src/tc_bi_ortho/dressing_vectors_lr.irp.f +++ b/src/tc_bi_ortho/dressing_vectors_lr.irp.f @@ -23,10 +23,12 @@ subroutine get_delta_bitc_right(psidet, psicoef, ndet, Nint, delta) double precision :: htc_mono, htc_twoe, htc_three, htc_tot double precision :: delta_mat + print *, ' get_delta_bitc_right ...' + i = 1 j = 1 call htilde_mu_mat_bi_ortho(psidet(1,1,i), psidet(1,1,j), Nint, htc_mono, htc_twoe, htc_three, htc_tot) - call hmat_bi_ortho (psidet(1,1,i), psidet(1,1,j), Nint, h_mono , h_twoe , h_tot) + call hmat_bi_ortho (psidet(1,1,i), psidet(1,1,j), Nint, h_mono, h_twoe, h_tot) delta = 0.d0 !$OMP PARALLEL DO DEFAULT(NONE) SCHEDULE(dynamic,8) & @@ -39,7 +41,7 @@ subroutine get_delta_bitc_right(psidet, psicoef, ndet, Nint, delta) ! < I | Htilde | J > call htilde_mu_mat_bi_ortho(psidet(1,1,i), psidet(1,1,j), Nint, htc_mono, htc_twoe, htc_three, htc_tot) ! < I | H | J > - call hmat_bi_ortho (psidet(1,1,i), psidet(1,1,j), Nint, h_mono , h_twoe , h_tot) + call hmat_bi_ortho(psidet(1,1,i), psidet(1,1,j), Nint, h_mono, h_twoe, h_tot) delta_mat = htc_tot - h_tot @@ -52,3 +54,102 @@ end subroutine get_delta_bitc_right ! --- +subroutine get_htc_bitc_right(psidet, psicoef, ndet, Nint, delta) + + BEGIN_DOC + ! + ! delta(I) = < I_left | H_TC | Psi_right > + ! + END_DOC + + use bitmasks + + implicit none + + integer, intent(in) :: ndet, Nint + double precision, intent(in) :: psicoef(ndet) + integer(bit_kind), intent(in) :: psidet(Nint,2,ndet) + double precision, intent(out) :: delta(ndet) + + integer :: i, j + double precision :: htc_mono, htc_twoe, htc_three, htc_tot + + print *, ' get_htc_bitc_right ...' + + i = 1 + j = 1 + call htilde_mu_mat_bi_ortho(psidet(1,1,i), psidet(1,1,j), Nint, htc_mono, htc_twoe, htc_three, htc_tot) + + delta = 0.d0 + !$OMP PARALLEL DO DEFAULT(NONE) SCHEDULE(dynamic,8) & + !$OMP SHARED(delta, ndet, psidet, psicoef, Nint) & + !$OMP PRIVATE(i, j, htc_mono, htc_twoe, htc_three, htc_tot) + do i = 1, ndet + do j = 1, ndet + + ! < I | Htilde | J > + call htilde_mu_mat_bi_ortho(psidet(1,1,i), psidet(1,1,j), Nint, htc_mono, htc_twoe, htc_three, htc_tot) + + delta(i) = delta(i) + psicoef(j) * htc_tot + enddo + enddo + !$OMP END PARALLEL DO + +end subroutine get_htc_bitc_right + +! --- + +subroutine get_h_bitc_right(psidet, psicoef, ndet, Nint, delta) + + BEGIN_DOC + ! + ! delta(I) = < I_left | H | Psi_right > + ! + END_DOC + + use bitmasks + + implicit none + + integer, intent(in) :: ndet, Nint + double precision, intent(in) :: psicoef(ndet) + integer(bit_kind), intent(in) :: psidet(Nint,2,ndet) + double precision, intent(out) :: delta(ndet) + + integer :: i, j + double precision :: h_mono, h_twoe, h_tot + + print *, ' get_h_bitc_right ...' + + i = 1 + j = 1 + call hmat_bi_ortho(psidet(1,1,i), psidet(1,1,j), Nint, h_mono, h_twoe, h_tot) + + !double precision :: norm + !norm = 0.d0 + !do i = 1, ndet + ! norm += psicoef(i) * psicoef(i) + !enddo + !print*, ' norm = ', norm + + call hmat_bi_ortho(psidet(1,1,i), psidet(1,1,j), Nint, h_mono, h_twoe, h_tot) + + delta = 0.d0 +! !$OMP PARALLEL DO DEFAULT(NONE) SCHEDULE(dynamic,8) & +! !$OMP SHARED(delta, ndet, psidet, psicoef, Nint) & +! !$OMP PRIVATE(i, j, h_mono, h_twoe, h_tot) + do i = 1, ndet + do j = 1, ndet + + ! < I | H | J > + call hmat_bi_ortho(psidet(1,1,i), psidet(1,1,j), Nint, h_mono, h_twoe, h_tot) + + delta(i) = delta(i) + psicoef(j) * h_tot + enddo + enddo +! !$OMP END PARALLEL DO + +end subroutine get_h_bitc_right + +! --- + diff --git a/src/tc_bi_ortho/h_biortho.irp.f b/src/tc_bi_ortho/h_biortho.irp.f index 0494399f..492e1282 100644 --- a/src/tc_bi_ortho/h_biortho.irp.f +++ b/src/tc_bi_ortho/h_biortho.irp.f @@ -5,7 +5,7 @@ subroutine hmat_bi_ortho(key_j, key_i, Nint, hmono, htwoe, htot) BEGIN_DOC ! - ! where | key_j > is developed on the LEFT basis and | key_i > is developed on the RIGHT basis + ! < key_j | H | key_i > where | key_j > is developed on the LEFT basis and | key_i > is developed on the RIGHT basis ! END_DOC @@ -13,11 +13,11 @@ subroutine hmat_bi_ortho(key_j, key_i, Nint, hmono, htwoe, htot) implicit none - integer, intent(in) :: Nint - integer(bit_kind), intent(in) :: key_i(Nint,2), key_j(Nint,2) - double precision, intent(out) :: hmono, htwoe, htot + integer, intent(in) :: Nint + integer(bit_kind), intent(in) :: key_i(Nint,2), key_j(Nint,2) + double precision, intent(out) :: hmono, htwoe, htot - integer :: degree + integer :: degree hmono = 0.d0 htwoe = 0.d0 @@ -31,11 +31,11 @@ subroutine hmat_bi_ortho(key_j, key_i, Nint, hmono, htwoe, htot) call diag_hmat_bi_ortho(Nint, key_i, hmono, htwoe) htot = htot + nuclear_repulsion - else if (degree == 1)then + else if (degree == 1) then call single_hmat_bi_ortho(Nint, key_j, key_i, hmono, htwoe) - else if(degree == 2)then + else if(degree == 2) then call double_hmat_bi_ortho(Nint, key_j, key_i, hmono, htwoe) @@ -59,8 +59,7 @@ subroutine diag_hmat_bi_ortho(Nint, key_i, hmono, htwoe) double precision, intent(out) :: hmono, htwoe integer :: occ(Nint*bit_kind_size,2) - integer :: Ne(2), i, j, ii, jj, ispin, jspin, k, kk - integer(bit_kind) :: key_i_core(Nint,2) + integer :: Ne(2), i, j, ii, jj, ispin, jspin hmono = 0.d0 htwoe = 0.d0 @@ -125,13 +124,11 @@ subroutine single_hmat_bi_ortho(Nint, key_j, key_i, hmono, htwoe) double precision, intent(out) :: hmono, htwoe integer :: occ(Nint*bit_kind_size,2) - integer :: Ne(2), i, j, ii, jj, ispin, jspin, k, kk + integer :: Ne(2), i, j, ii, ispin, jspin integer :: degree,exc(0:2,2,2) integer :: h1, p1, h2, p2, s1, s2 integer :: other_spin(2) - integer(bit_kind) :: key_j_core(Nint,2), key_i_core(Nint,2) double precision :: phase - double precision :: direct_int, exchange_int_12, exchange_int_23, exchange_int_13 other_spin(1) = 2 other_spin(2) = 1 @@ -201,11 +198,10 @@ subroutine double_hmat_bi_ortho(Nint, key_j, key_i, hmono, htwoe) double precision, intent(out) :: hmono, htwoe integer :: occ(Nint*bit_kind_size,2) - integer :: Ne(2), i, j, ii, jj, ispin, jspin, k, kk + integer :: Ne(2), i, j, ii, ispin, jspin integer :: degree,exc(0:2,2,2) integer :: h1, p1, h2, p2, s1, s2 integer :: other_spin(2) - integer(bit_kind) :: key_i_core(Nint,2) double precision :: phase other_spin(1) = 2 @@ -225,7 +221,7 @@ subroutine double_hmat_bi_ortho(Nint, key_j, key_i, hmono, htwoe) call get_double_excitation(key_i, key_j, exc, phase, Nint) call decode_exc(exc, 2, h1, p1, h2, p2, s1, s2) - if(s1.ne.s2) then + if(s1 .ne. s2) then htwoe = mo_bi_ortho_coul_e(p2,p1,h2,h1) @@ -233,10 +229,8 @@ subroutine double_hmat_bi_ortho(Nint, key_j, key_i, hmono, htwoe) ! same spin two-body - ! direct terms - htwoe = mo_bi_ortho_coul_e(p2,p1,h2,h1) - ! exchange terms - htwoe -= mo_bi_ortho_coul_e(p1,p2,h2,h1) + ! direct terms exchange terms + htwoe = mo_bi_ortho_coul_e(p2,p1,h2,h1) - mo_bi_ortho_coul_e(p1,p2,h2,h1) endif diff --git a/src/tc_bi_ortho/psi_r_l_prov.irp.f b/src/tc_bi_ortho/psi_r_l_prov.irp.f index 551da858..2a5887d5 100644 --- a/src/tc_bi_ortho/psi_r_l_prov.irp.f +++ b/src/tc_bi_ortho/psi_r_l_prov.irp.f @@ -41,6 +41,15 @@ BEGIN_PROVIDER [ double precision, psi_l_coef_bi_ortho, (psi_det_size,N_states) enddo deallocate(psi_l_coef_bi_ortho_read) + else + + print*, 'psi_l_coef_bi_ortho are psi_coef' + do k=1,N_states + do i=1,N_det + psi_l_coef_bi_ortho(i,k) = psi_coef(i,k) + enddo + enddo + endif endif endif @@ -100,6 +109,15 @@ BEGIN_PROVIDER [ double precision, psi_r_coef_bi_ortho, (psi_det_size,N_states) enddo deallocate(psi_r_coef_bi_ortho_read) + else + + print*, 'psi_r_coef_bi_ortho are psi_coef' + do k=1,N_states + do i=1,N_det + psi_r_coef_bi_ortho(i,k) = psi_coef(i,k) + enddo + enddo + endif endif endif diff --git a/src/tc_bi_ortho/slater_tc.irp.f b/src/tc_bi_ortho/slater_tc.irp.f index e0a52741..33b738ba 100644 --- a/src/tc_bi_ortho/slater_tc.irp.f +++ b/src/tc_bi_ortho/slater_tc.irp.f @@ -1,4 +1,6 @@ -!!!!!! + +! --- + subroutine htilde_mu_mat_bi_ortho_tot(key_j, key_i, Nint, htot) BEGIN_DOC @@ -15,13 +17,14 @@ subroutine htilde_mu_mat_bi_ortho_tot(key_j, key_i, Nint, htot) integer, intent(in) :: Nint integer(bit_kind), intent(in) :: key_j(Nint,2),key_i(Nint,2) double precision, intent(out) :: htot - double precision :: hmono,htwoe,hthree + double precision :: hmono, htwoe, hthree integer :: degree + call get_excitation_degree(key_j, key_i, degree, Nint) if(degree.gt.2)then - htot = 0.d0 + htot = 0.d0 else - call htilde_mu_mat_bi_ortho(key_j,key_i, Nint, hmono,htwoe,hthree,htot) + call htilde_mu_mat_bi_ortho(key_j, key_i, Nint, hmono, htwoe, hthree, htot) endif end subroutine htilde_mu_mat_bi_ortho_tot @@ -29,52 +32,63 @@ end subroutine htilde_mu_mat_bi_ortho_tot ! -- subroutine htilde_mu_mat_bi_ortho(key_j, key_i, Nint, hmono, htwoe, hthree, htot) - implicit none - use bitmasks + BEGIN_DOC + ! ! where |key_j> is developed on the LEFT basis and |key_i> is developed on the RIGHT basis !! ! Returns the detail of the matrix element in terms of single, two and three electron contribution. !! WARNING !! ! ! Non hermitian !! + ! END_DOC - integer, intent(in) :: Nint - integer(bit_kind), intent(in) :: key_i(Nint,2),key_j(Nint,2) - double precision, intent(out) :: hmono,htwoe,hthree,htot - integer :: degree - hmono = 0.d0 - htwoe= 0.d0 - htot = 0.d0 + use bitmasks + + implicit none + integer, intent(in) :: Nint + integer(bit_kind), intent(in) :: key_i(Nint,2), key_j(Nint,2) + double precision, intent(out) :: hmono, htwoe, hthree, htot + integer :: degree + + hmono = 0.d0 + htwoe = 0.d0 + htot = 0.d0 hthree = 0.D0 + call get_excitation_degree(key_i, key_j, degree, Nint) - if(degree.gt.2)return + if(degree.gt.2) return + if(degree == 0)then - call diag_htilde_mu_mat_bi_ortho(Nint, key_i, hmono, htwoe, htot) + call diag_htilde_mu_mat_bi_ortho(Nint, key_i, hmono, htwoe, htot) else if (degree == 1)then - call single_htilde_mu_mat_bi_ortho(Nint, key_j, key_i, hmono, htwoe, htot) + call single_htilde_mu_mat_bi_ortho(Nint, key_j, key_i, hmono, htwoe, htot) else if(degree == 2)then - call double_htilde_mu_mat_bi_ortho(Nint, key_j, key_i, hmono, htwoe, htot) - endif - if(three_body_h_tc) then - if(degree == 2) then - if(.not.double_normal_ord) then - call double_htilde_three_body_ints_bi_ort(Nint, key_j, key_i, hthree) - endif - else if(degree == 1)then - call single_htilde_three_body_ints_bi_ort(Nint, key_j, key_i, hthree) - else if(degree == 0)then - call diag_htilde_three_body_ints_bi_ort(Nint, key_i, hthree) - endif - endif - htot = hmono + htwoe + hthree - if(degree==0)then - htot += nuclear_repulsion - endif + call double_htilde_mu_mat_bi_ortho(Nint, key_j, key_i, hmono, htwoe, htot) + endif + + if(three_body_h_tc) then + if(degree == 2) then + if(.not.double_normal_ord) then + call double_htilde_three_body_ints_bi_ort(Nint, key_j, key_i, hthree) + endif + else if(degree == 1) then + call single_htilde_three_body_ints_bi_ort(Nint, key_j, key_i, hthree) + else if(degree == 0) then + call diag_htilde_three_body_ints_bi_ort(Nint, key_i, hthree) + endif + endif + + htot = hmono + htwoe + hthree + if(degree==0) then + htot += nuclear_repulsion + endif end +! --- + subroutine diag_htilde_mu_mat_bi_ortho(Nint, key_i, hmono, htwoe, htot) BEGIN_DOC diff --git a/src/tc_bi_ortho/slater_tc_3e.irp.f b/src/tc_bi_ortho/slater_tc_3e.irp.f index 7c2c9c86..f4899a80 100644 --- a/src/tc_bi_ortho/slater_tc_3e.irp.f +++ b/src/tc_bi_ortho/slater_tc_3e.irp.f @@ -207,6 +207,8 @@ subroutine single_htilde_three_body_ints_bi_ort(Nint, key_j, key_i, hthree) end +! --- + subroutine double_htilde_three_body_ints_bi_ort(Nint, key_j, key_i, hthree) BEGIN_DOC @@ -244,7 +246,7 @@ subroutine double_htilde_three_body_ints_bi_ort(Nint, key_j, key_i, hthree) return endif - if(core_tc_op)then + if(core_tc_op) then do i = 1, Nint key_i_core(i,1) = xor(key_i(i,1),core_bitmask(i,1)) key_i_core(i,2) = xor(key_i(i,2),core_bitmask(i,2)) @@ -291,3 +293,6 @@ subroutine double_htilde_three_body_ints_bi_ort(Nint, key_j, key_i, hthree) endif hthree *= phase end + +! --- +