BSE_JPCLperspective/Manuscript/BSE_JPCL.tex
2020-05-26 17:13:53 +02:00

775 lines
58 KiB
TeX

\newif\ifpreprint
%Preprint/reprint switch
%\preprinttrue % Enable for single column preprint
\preprintfalse % Enable for double column reprint
\ifpreprint
\documentclass[journal=jpclcd,manuscript=letter]{achemso}
\else
\documentclass[journal=jpclcd,manuscript=letter,layout=twocolumn]{achemso}
\fi
\usepackage[T1]{fontenc} % Use modern font encodings
\usepackage{amsmath}
\usepackage{newtxtext,newtxmath}
\usepackage{pifont}
\usepackage{graphicx}
\usepackage{dcolumn}
\usepackage{braket}
\usepackage{multirow}
\usepackage{threeparttable}
\usepackage{xspace}
\usepackage{verbatim}
\usepackage[version=4]{mhchem} % Formula subscripts using \ce{}
\usepackage{comment}
\usepackage{color,soul}
\usepackage{mathtools}
\usepackage[dvipsnames]{xcolor}
\usepackage{xspace}
\usepackage{graphicx,longtable,dcolumn,mhchem}
\usepackage{rotating,color}
\usepackage{lscape}
\usepackage{amsmath}
\usepackage{dsfont}
\usepackage{soul}
\usepackage{physics}
\newcommand{\cmark}{\color{green}{\text{\ding{51}}}}
\newcommand{\xmark}{\color{red}{\text{\ding{55}}}}
\newcommand{\ie}{\textit{i.e.}}
\newcommand{\eg}{\textit{e.g.}}
\newcommand{\alert}[1]{\textcolor{red}{#1}}
\definecolor{darkgreen}{HTML}{009900}
\usepackage[normalem]{ulem}
\newcommand{\titou}[1]{\textcolor{red}{#1}}
\newcommand{\denis}[1]{\textcolor{purple}{#1}}
\newcommand{\xavier}[1]{\textcolor{darkgreen}{#1}}
\newcommand{\trashPFL}[1]{\textcolor{red}{\sout{#1}}}
\newcommand{\trashDJ}[1]{\textcolor{purple}{\sout{#1}}}
\newcommand{\trashXB}[1]{\textcolor{darkgreen}{\sout{#1}}}
\newcommand{\PFL}[1]{\titou{(\underline{\bf PFL}: #1)}}
\renewcommand{\DJ}[1]{\denis{(\underline{\bf DJ}: #1)}}
\newcommand{\XB}[1]{\xavier{(\underline{\bf XB}: #1)}}
\newcommand{\mc}{\multicolumn}
\newcommand{\mr}{\multirow}
\newcommand{\ra}{\rightarrow}
\newcommand{\fnm}{\footnotemark}
\newcommand{\fnt}{\footnotetext}
\newcommand{\tabc}[1]{\multicolumn{1}{c}{#1}}
\newcommand{\SI}{\textcolor{blue}{supporting information}}
%matrices
\newcommand{\bO}{\boldsymbol{0}}
\newcommand{\bI}{\boldsymbol{I}}
\newcommand{\bA}{\boldsymbol{A}}
\newcommand{\bH}{\boldsymbol{H}}
\newcommand{\br}{\boldsymbol{r}}
\newcommand{\bx}{\boldsymbol{x}}
\newcommand{\bb}{\boldsymbol{b}}
\newcommand{\bc}{\boldsymbol{c}}
\renewcommand{\tr}[1]{{#1}^{\dag}}
% methods
\newcommand{\DFT}{\text{DFT}}
\newcommand{\KS}{\text{KS}}
\newcommand{\BSE}{\text{BSE}}
\newcommand{\GW}{GW}
\newcommand{\XC}{\text{xc}}
% orbitals, gaps, etc
\newcommand{\eps}{\varepsilon}
\newcommand{\IP}{I}
\newcommand{\EA}{A}
\newcommand{\HOMO}{\text{HOMO}}
\newcommand{\LUMO}{\text{LUMO}}
\newcommand{\Eg}{E_\text{g}}
\newcommand{\EgFun}{\Eg^\text{fund}}
\newcommand{\EgOpt}{\Eg^\text{opt}}
\newcommand{\EB}{E_B}
\newcommand{\Nel}{N}
\newcommand{\Norb}{N_\text{orb}}
% units
\newcommand{\IneV}[1]{#1 eV}
\newcommand{\InAU}[1]{#1 a.u.}
\newcommand{\InAA}[1]{#1 \AA}
\newcommand{\kcal}{kcal/mol}
% complex
\newcommand{\ii}{\mathrm{i}}
\usepackage[colorlinks = true,
linkcolor = blue,
urlcolor = black,
citecolor = blue,
anchorcolor = black]{hyperref}
\definecolor{goodorange}{RGB}{225,125,0}
\definecolor{goodgreen}{RGB}{5,130,5}
\definecolor{goodred}{RGB}{220,50,25}
\definecolor{goodblue}{RGB}{30,144,255}
\newcommand{\note}[2]{
\ifthenelse{\equal{#1}{F}}{
\colorbox{goodorange}{\textcolor{white}{\footnotesize \fontfamily{phv}\selectfont #1}}
\textcolor{goodorange}{{\footnotesize \fontfamily{phv}\selectfont #2}}\xspace
}{}
\ifthenelse{\equal{#1}{R}}{
\colorbox{goodred}{\textcolor{white}{\footnotesize \fontfamily{phv}\selectfont #1}}
\textcolor{goodred}{{\footnotesize \fontfamily{phv}\selectfont #2}}\xspace
}{}
\ifthenelse{\equal{#1}{N}}{
\colorbox{goodgreen}{\textcolor{white}{\footnotesize \fontfamily{phv}\selectfont #1}}
\textcolor{goodgreen}{{\footnotesize \fontfamily{phv}\selectfont #2}}\xspace
}{}
\ifthenelse{\equal{#1}{M}}{
\colorbox{goodblue}{\textcolor{white}{\footnotesize \fontfamily{phv}\selectfont #1}}
\textcolor{goodblue}{{\footnotesize \fontfamily{phv}\selectfont #2}}\xspace
}{}
}
\usepackage{titlesec}
%\usepackage{footnote}
%
\let\titlefont\undefined
\usepackage[fontsize=11pt]{scrextend}
\captionsetup{font={sf,footnotesize}}
%
\titleformat{\section}
{\normalfont\sffamily\bfseries\color{Blue}}
{\thesection.}{0.25em}{\uppercase}
\titleformat{\subsection}[runin]
{\normalfont\sffamily\bfseries}
{\thesubsection}{0.25em}{}[.\;\;]
\titleformat{\suppinfo}
{\normalfont\sffamily\bfseries}
{\thesubsection}{0.25em}{}
\titlespacing*{\section}{0pt}{0.5\baselineskip}{0.01\baselineskip}
\titlespacing*{\subsection}{0pt}{0.125\baselineskip}{0.01\baselineskip}
\renewcommand{\refname}{\normalfont\sffamily\bfseries\color{Blue}{\normalsize REFERENCES}}
\setlength{\bibsep}{0pt plus 0.3ex}
\author{Xavier Blase}
\email{xavier.blase@neel.cnrs.fr}
\affiliation[NEEL, Grenoble]{Universit\'e Grenoble Alpes, CNRS, Institut NEEL, F-38042 Grenoble, France}
\author{Ivan Duchemin}
\affiliation[CEA, Grenoble]{Universit\'e Grenoble Alpes, CEA, IRIG-MEM-L Sim, 38054 Grenoble, France}
\author{Denis Jacquemin}
\affiliation[CEISAM, Nantes]{Universit\'e de Nantes, CNRS, CEISAM UMR 6230, F-44000 Nantes, France}
\author{Pierre-Fran\c{c}ois Loos}
\email{loos@irsamc.ups-tlse.fr}
\affiliation[LCPQ, Toulouse]{Laboratoire de Chimie et Physique Quantiques, Universit\'e de Toulouse, CNRS, UPS, France}
\let\oldmaketitle\maketitle
\let\maketitle\relax
\title{A Chemist Guide to the Bethe-Salpeter Equation}
\date{\today}
\begin{tocentry}
\centering
\includegraphics[width=\textwidth]{../TOC/TOC}
\end{tocentry}
\begin{document}
\ifpreprint
\else
\twocolumn[
\begin{@twocolumnfalse}
\fi
\oldmaketitle
%%%%%%%%%%%%%%%%
%%% ABSTRACT %%%
%%%%%%%%%%%%%%%%
\begin{abstract}
The many-body Green's function Bethe-Salpeter formalism is steadily asserting itself as a new efficient and accurate tool in the armada of computational methods available to chemists in order to predict neutral electronic excitations in molecular systems.
In particular, the combination of the so-called $GW$ approximation of many-body perturbation theory, giving access to reliable charged excitations and quasiparticle energies, and the Bethe-Salpeter formalism, able to catch excitonic effects, has shown to provide accurate excitation energies in many chemical scenarios with a typical error of $0.1$--$0.3$ eV.
In this \textit{Perspective} article, we provide a historical overview of the Bethe-Salpeter formalism, with a particular focus on its condensed-matter roots.
We also propose a critical review of its strengths and weaknesses for different chemical situations.
Future directions of developments and improvements are also discussed.
\end{abstract}
\ifpreprint
\else
\end{@twocolumnfalse}
]
\fi
\ifpreprint
\else
\small
\fi
\noindent
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Introduction}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
In its press release announcing the attribution of the 2013 Nobel prize in Chemistry to Karplus, Levitt, and Warshel, the Royal Swedish Academy of Sciences concluded by stating \textit{``Today the computer is just as important a tool for chemists as the test tube.
Simulations are so realistic that they predict the outcome of traditional experiments.''} \cite{Nobel_2003}
Martin Karplus' Nobel lecture moderated this statement, introducing his presentation by a 1929 quote from Dirac emphasizing that laws of quantum mechanics are \textit{``much too complicated to be soluble''}, urging scientists to develop \textit{``approximate practical methods''}. This is where the electronic structure community stands, attempting to develop robust approximations to study with increasing accuracy the properties of ever more complex systems.
The study of neutral electronic excitations in condensed-matter systems, from molecules to extended solids, has witnessed the development of a large number of such approximate methods with numerous applications to a large variety of fields, from the prediction of the colour of precious metals and stones for jewellery, to the understanding, \eg, of the basic principles behind photovoltaics, photocatalysis or DNA damage under irradiation in the context of biology.
% T2: shall we add a few references?
The present \textit{Perspective} aims at describing the current status and upcoming challenges for the Bethe-Salpeter equation (BSE) formalism \cite{Salpeter_1951,Strinati_1988} that, while sharing many features with time-dependent density-functional theory (TD-DFT), \cite{Runge_1984,Casida_1995,Dreuw_2005} including computational cost scaling with system size, relies on a different formalism, with specific difficulties but also potential solutions to known issues. \cite{Blase_2018}
%%%%%%%%%%%%%%%%%%%%%%
\section{Theory}
%%%%%%%%%%%%%%%%%%%%%%
The BSE formalism \cite{Salpeter_1951,Strinati_1988,Albrecht_1998,Rohlfing_1998,Benedict_1998,vanderHorst_1999} belongs to the family of Green's function many-body perturbation theories (MBPT) \cite{Hedin_1965,Aryasetiawan_1998,Onida_2002,Reining_2017,ReiningBook} together with, for example, the algebraic-diagrammatic construction (ADC) techniques , originally developed by Schirmer and Trofimov, \cite{Schirmer_1982,Schirmer_1991,Schirmer_2004d,Schirmer_2018} in quantum chemistry. \cite{Dreuw_2015}
While the one-body density stands as the basic variable in density-functional theory DFT, \cite{Hohenberg_1964,Kohn_1965,ParrBook} the pillar of Green's function MBPT is the (time-ordered) one-body Green's function
\begin{equation}
G(\bx t,\bx't') = -i \mel{\Nel}{T \qty[ \Hat{\psi}(\bx t) \Hat{\psi}^{\dagger}(\bx't') ]}{\Nel},
\end{equation}
where $\ket{\Nel}$ is the $\Nel$-electron ground-state wave function.
The operators $\Hat{\psi}(\bx t)$ and $\Hat{\psi}^{\dagger}(\bx't')$ remove and add an electron (respectively) in space-spin-time positions ($\bx t$) and ($\bx't'$), while \titou{$T$ is the time-ordering operator}.
For $t > t'$, $G$ provides the amplitude of probability of finding, on top of the ground-state Fermi sea, an electron in ($\bx t$) that was previously introduced in ($\bx't'$), while for $t < t'$ the propagation of a hole is monitored.\\
%===================================
\subsection{Charged excitations}
%===================================
A central property of the one-body Green's function is that its frequency-dependent (\ie, dynamical) spectral representation has poles at the charged excitation energies of the system
\begin{equation}\label{eq:spectralG}
G(\bx,\bx'; \omega ) = \sum_s \frac{ f_s(\bx) f^*_s(\bx') }{ \omega - \varepsilon_s + i \eta \, \text{sgn}(\varepsilon_s - \mu ) },
\end{equation}
where $\mu$ is the chemical potential, $\eta$ is a positive infinitesimal, $\varepsilon_s = E_s^{\Nel+1} - E_0^{\Nel}$ for $\varepsilon_s > \mu$, and $\varepsilon_s = E_0^{\Nel} - E_s^{\Nel-1}$ for $\varepsilon_s < \mu$.
Here, $E_s^{\Nel}$ is the total energy of the $s$th excited state of the $\Nel$-electron system, and $E_0^\Nel$ corresponds to its ground-state energy.
The $f_s$'s are the so-called Lehmann amplitudes that reduce to one-body orbitals in the case of single-determinant many-body wave functions (see below).
Unlike Kohn-Sham (KS) eigenvalues, the Green's function poles $\lbrace \varepsilon_s \rbrace$ are proper \titou{charging} energies of the $\Nel$-electron system, leading to well-defined ionization potentials and electronic affinities. Contrary to standard $\Delta$SCF techniques, the knowledge of $G$ provides the full ionization spectrum, as measured by direct and inverse photoemission, not only that associated with frontier orbitals.
Using the equation-of-motion formalism for the creation/destruction operators, it can be shown formally that $G$ verifies
\begin{equation}\label{eq:Gmotion}
\qty[ \pdv{}{t_1} - h(\br_1) ] G(1,2) - \int d3 \, \Sigma(1,3) G(3,2)
= \delta(1,2),
\end{equation}
where we introduce the usual composite index, \eg, $1 \equiv (\bx_1,t_1)$.
Here, $h$ is the one-body Hartree Hamiltonian and $\Sigma$ is the so-called exchange-correlation (xc) self-energy operator.
Using the spectral representation of $G$ [see Eq.~\eqref{eq:spectralG}], one gets the familiar eigenvalue equation, \ie,
\begin{equation}
h(\br) f_s(\br) + \int d\br' \, \Sigma(\br,\br'; \varepsilon_s ) f_s(\br) = \varepsilon_s f_s(\br),
\end{equation}
which resembles formally the KS equation \cite{Kohn_1965} with the difference that the self-energy $\Sigma$ is non-local, energy-dependent and non-hermitian.
The knowledge of $\Sigma$ allows to access the true addition/removal energies, namely the entire spectrum of occupied and virtual electronic energy levels, at the cost of solving a generalized one-body eigenvalue equation.
\titou{[INTRODUCE QUASIPARTICLES and OTHER solutions ??]}
\titou{The spin variable has disappear. How do we deal with this?}
\\
%===================================
\subsection{The $GW$ self-energy}
%===================================
While the equations reported above are formally exact, it remains to provide an expression for the xc self-energy operator $\Sigma$.
This is where Green's function practical theories differ.
Developed by Lars Hedin in 1965 with application to the interacting homogeneous electron gas, \cite{Hedin_1965} the $GW$ approximation
\cite{Aryasetiawan_1998,Farid_1999,Onida_2002,Ping_2013,Leng_2016,Golze_2019rev} follows the path of linear response by considering the variation of $G$ with respect to an external perturbation (see Fig.~\ref{fig:pentagon}).
The resulting equation, when compared with the equation for the time-evolution of $G$ [see Eq.~\eqref{eq:Gmotion}], leads to a formal expression for the self-energy
\begin{equation}\label{eq:Sig}
\Sigma(1,2) = i \int d34 \, G(1,4) W(3,1^{+}) \Gamma(42,3),
\end{equation}
where $W$ is the dynamically-screened Coulomb potential and $\Gamma$ is a ``vertex" function that can be written as $\Gamma(12,3) = \delta(12)\delta(13) + \order{W}$, where $\order{W}$ means a corrective term with leading linear order in terms of $W$.
The neglect of the vertex leads to the so-called $GW$ approximation of the self-energy
\begin{equation}\label{eq:SigGW}
\Sigma^{\GW}(1,2) = i \, G(1,2) W(2,1^{+}),
\end{equation}
that can be regarded as the lowest-order perturbation in terms of the screened Coulomb potential $W$ with
\begin{gather}
W(1,2) = v(1,2) + \int d34 \, v(1,2) \chi_0(3,4) W(4,2), \label{eq:defW}
\\
\chi_0(1,2) = -i \int d34 \, G(2,3) G(4,2),
\end{gather}
where $\chi_0$ is the independent electron susceptibility and $v$ the bare Coulomb potential.
%%% FIG 1 %%%
\begin{figure}
\includegraphics[width=0.7\linewidth]{fig1/fig1}
\caption{
Hedin's pentagon: its connects the Green's function $G$, its non-interacting analog $G_0$, the irreducible vertex function $\Gamma$, the irreducible polarizability $P$, the dynamically-screened Coulomb potential $W$, and the self-energy $\Sigma$ through a set of five integro-differential equations known as Hedin's equations. \cite{Hedin_1965}
The path made of back arrow shows the $GW$ process which bypasses the computation of $\Gamma$ (gray arrows).
As input, one must provide KS (or HF) orbitals and their corresponding senergies.
Depending on the level of self-consistency of the $GW$ calculation, only the orbital energies or both the orbitals and their energies are corrected.
As output, $GW$ provides corrected quantities, \ie, quasiparticle energies and $W$ which can then be used to compute the BSE neutral excitations.
\label{fig:pentagon}}
\end{figure}
%%% %%% %%%
In practice, the input $G$ and $\chi_0$ required to initially build $\Sigma^{\GW}$ are taken as the ``best'' Green's function and susceptibility that can be easily computed, namely the KS or Hartree-Fock (HF) ones where the $\lbrace \varepsilon_p, f_p \rbrace$ of Eq.~\eqref{eq:spectralG} are taken to be KS (or HF) eigenstates.
Taking then $( \Sigma^{\GW}-V^{\XC} )$ as a correction to the KS xc potential $V^{\XC}$, a first-order correction to the input KS energies $\lbrace \varepsilon_p^{\KS} \rbrace$ is obtained as follows:
\begin{equation}
\varepsilon_p^{\GW} = \varepsilon_p^{\KS} +
\mel{\phi_p^{\KS}}{\Sigma^{\GW}(\varepsilon_p^{\GW}) - V^{\XC}}{\phi_p^{\KS}}.
\end{equation}
\titou{T2: Shall we introduce the renormalization factor or its non-linear version?
Here, I would prefer to introduce the frequency-dependent quasiparticle equation and talks about quasiparticle and satellites, and the way of solving this equation in practice (linearization, etc).}
Such an approach, where input KS energies are corrected to yield better electronic energy levels, is labeled as the single-shot, or perturbative, $G_0W_0$ technique.
This simple scheme was used in the early $GW$ studies of extended semiconductors and insulators, \cite{Strinati_1980,Hybertsen_1986,Godby_1988,Linden_1988}
surfaces, \cite{Northrup_1991,Blase_1994,Rohlfing_1995} and 2D systems, \cite{Blase_1995} allowing to dramatically reduced the errors associated with KS eigenvalues in conjunction with common local or gradient-corrected approximations to the xc potential.
In particular, the well-known ``band gap" problem, \cite{Perdew_1983,Sham_1983} namely the underestimation of the occupied to unoccupied bands energy gap at the LDA KS level, was dramatically reduced, bringing the agreement with experiment to within a few tenths of an eV [REFS] with a computational cost scaling quartically with the number of basis functions (see below).
Although $G_0W_0$ provides accurate results (at least for weakly/moderately correlated systems), it is strongly starting-point dependent due to its perturbative nature.
Further improvements may be obtained via self-consistency of the Hedin's equations (see Fig.~\ref{fig:pentagon}).
There exists two main types of self-consistent $GW$ methods:
i) \textit{``eigenvalue-only quasiparticle''} $GW$ (ev$GW$), \cite{Hybertsen_1986,Shishkin_2007,Blase_2011,Faber_2011}
where the quasiparticle energies are updated at each iteration, and
ii) \textit{``quasiparticle self-consistent''} $GW$ (qs$GW$), \cite{Faleev_2004,vanSchilfgaarde_2006,Kotani_2007,Ke_2011,Kaplan_2016}
where one updates both the quasiparticle energies and the corresponding orbitals.
Note that a starting point dependence remains in ev$GW$ as the orbitals are not self-consistently optimized in this case.
However, self-consistency does not always improve things, as self-consistency and vertex corrections are known to cancel to some extent. \cite{ReiningBook}
Indeed, there is a long-standing debate about the importance of partial and full self-consistency in $GW$. \cite{Stan_2006, Stan_2009, Rostgaard_2010, Caruso_2012, Caruso_2013, Caruso_2013a, Caruso_2013b, Koval_2014, Wilhelm_2018}
In some situations, it has been found that self-consistency can worsen spectral properties compared to the simpler $G_0W_0$ method.
A famous example has been provided by the calculations performed on the uniform electron gas. \cite{Holm_1998, Holm_1999,Holm_2000,Garcia-Gonzalez_2001}
This was further evidenced in real extended systems by several authors. \cite{Schone_1998, Ku_2002, Kutepov_2016, Kutepov_2017}
However, other approximations may have caused such deterioration, \eg, pseudo-potentials \cite{deGroot_1995} or finite-basis set effects. \cite{Friedrich_2006}
These studies have cast doubt on the importance of self-consistent schemes within $GW$, at least for solid-state calculations.
For finite systems such as atoms and molecules, the situation is less controversial, and partially or fully self-consistent $GW$ methods have shown great promise. \cite{Ke_2011,Blase_2011,Faber_2011,Caruso_2012,Caruso_2013,Caruso_2013a,Caruso_2013b,Koval_2014,Hung_2016,Blase_2018,Jacquemin_2017}
Another important feature compared to other perturbative techniques, the $GW$ formalism can tackle finite size and periodic systems, and does not present any divergence in the limit of zero gap (metallic) systems. \cite{Campillo_1999}
However, remaining a low order perturbative approach starting with single-determinant mean-field solutions, it is not intended to explore strongly correlated systems. \cite{Verdozzi_1995}\\
%===================================
\subsection{Neutral excitations}
%===================================
While TD-DFT starts with the variation of the charge density $\rho$ with respect to an external local perturbation \titou{$U(1)$}, the BSE formalism considers a generalized 4-points susceptibility that monitors the variation of the Green's function with respect to a non-local external perturbation \titou{$U(1,2)$}:
\begin{equation}
\chi(1,2) \stackrel{\DFT}{=} \pdv{\rho(1)}{U(2)}
\quad \rightarrow \quad
L(12,34) \stackrel{\BSE}{=} -i \pdv{G(1,2)}{U(3,4)}.
\end{equation}
%with the relation $\chi(1,2) = L(11,22)$ since $\rho(1) = -iG(1,1^{+})$, as a first bridge between the TD-DFT and BSE worlds.
The equation of motion for $G$ [see Eq.~\ref{eq:Gmotion}] can be reformulated in the form of a Dyson equation
\begin{equation}
G = G_0 + G_0 \Sigma G,
\end{equation}
that relates the full (interacting) Green's function, $G$, to its Hartree version, $G_0$, obtained by replacing the $\lbrace \varepsilon_p, f_p \rbrace$ by the Hartree eigenvalues and eigenfunctions.
The derivative with respect to $U$ of the Dyson equation yields
\begin{multline}\label{eq:DysonL}
L(12,34) = L_0(12,34)
\\
\int d5678 \, L_0(12,34) \Xi^{\BSE}(5,6,7,8) L(78,34),
\end{multline}
where $L_0 = \partial G_0 / \partial U$ is the Hartree 4-point susceptibility and
\begin{equation}
\Xi^{\BSE}(5,6,7,8) = v(5,7) \delta(56) \delta(78) + \pdv{\Sigma(5,6)}{G(7,8)}
\end{equation}
is the so-called BSE kernel.
This equation can be compared to its TD-DFT analog
\begin{equation}
\chi(1,2) = \chi_0(1,2) + \int d34 \, \chi_0(1,3) \Xi^{\DFT}(3,4) \chi(4,2),
\end{equation}
where
\begin{equation}
\Xi^{\DFT}(3,4) = v(3,4) + \pdv{V^{\XC}(3)}{\rho(4)}
\end{equation}
is the TD-DFT kernel.
Plugging now the $GW$ self-energy [see Eq.~\eqref{eq:SigGW}], in a scheme that we label the BSE@$GW$ approach, leads to an approximation to the BSE kernel
\begin{equation}\label{eq:BSEkernel}
\Xi^{\BSE}(5,6,7,8) = v(5,7) \delta(56) \delta(78) -W(5,6) \delta(57) \delta(68 ),
\end{equation}
where it is traditional to neglect the derivative $( \partial W / \partial G)$ that introduces again higher orders in $W$. \cite{Hanke_1980,Strinati_1982,Strinati_1984}
Taking the static limit, \ie, $W(\omega=0)$, for the screened Coulomb potential, that replaces the static DFT xc kernel, and expressing Eq.~\eqref{eq:DysonL} in the standard product space $\lbrace \phi_i(\br) \phi_a(\br') \rbrace$ [where $(i,j)$ are occupied orbitals and $(a,b)$ are unoccupied orbitals), leads to an eigenvalue problem similar to the so-called Casida's equations in TD-DFT:
\begin{equation}
\begin{pmatrix}
R & C
\\
-C^* & R^{*}
\end{pmatrix}
\begin{pmatrix}
X^m
\\
Y^m
\end{pmatrix}
=
\Omega_m
\begin{pmatrix}
X^m
\\
Y^m
\end{pmatrix},
\end{equation}
with electron-hole ($eh$) eigenstates written as
\begin{equation}
\psi_{m}^{eh}(\br_e,\br_h)
= \sum_{ia} \qty[ X_{ia}^{m} \phi_i(\br_h) \phi_a(\br_e)
+ Y_{ia}^{m} \phi_i(\br_e) \phi_a(\br_h) ],
\end{equation}
where $m$ indexes the electronic excitations.
The $\lbrace \phi_{i/a} \rbrace$ are the input (KS) eigenstates used to build the $GW$ self-energy.
\titou{T2: this is only true in the case of $G_0W_0$.}
The resonant and anti-resonant parts of the BSE Hamiltonian read
\begin{gather}
R_{ai,bj} = \qty( \varepsilon_a^{\GW} - \varepsilon_i^{\GW} ) \delta_{ij} \delta_{ab} + \eta (ai|bj) - W_{ai,bj},
\\
C_{ai,bj} = \eta (ai|jb) - W_{ai,jb},
\end{gather}
with $\eta=2,0$ for singlets/triplets and
\begin{equation}\label{eq:Wmatel}
W_{ai,bj} = \iint d\br d\br'
\phi_i(\br) \phi_j(\br) W(\br,\br'; \omega=0)
\phi_a(\br') \phi_b(\br'),
\end{equation}
where we notice that the two occupied (virtual) eigenstates are taken at the same position of space, in contrast with the
$(ai|bj)$ bare Coulomb term.
As compared to TD-DFT,
\begin{itemize}
\item the $GW$ quasiparticle energies $\lbrace \varepsilon_{i/a}^{\GW} \rbrace$ replace the KS eigenvalues
\item the non-local screened Coulomb matrix elements replaces the DFT xc kernel.
\end{itemize}
\titou{T2: would it be useful to say that there is 100\% exact exchange in BSE@$GW$?}
We emphasise that these equations can be solved at exactly the same cost as the standard TD-DFT equations once the quasiparticle energies and screened Coulomb potential $W$ are inherited from preceding $GW$ calculations. This defines the standard (static) BSE@$GW$ scheme that we discuss in this \textit{Perspective}, emphasizing its pros and cons. \\
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Historical overview}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Originally developed in the framework of nuclear physics, \cite{Salpeter_1951} the BSE formalism has emerged in condensed-matter physics around the 1960's at the semi-empirical tight-binding level with the study of the optical properties of simple semiconductors. \cite{Sham_1966,Strinati_1984,Delerue_2000}
Three decades later, the first \textit{ab initio} implementations, starting with small clusters \cite{Onida_1995,Rohlfing_1998} extended semiconductors and wide-gap insulators, \cite{Albrecht_1997,Benedict_1998,Rohlfing_1999} paved the way to the popularization in the solid-state physics community of the BSE formalism.
Following early applications to periodic polymers and molecules, [REFS] BSE gained much momentum in the quantum chemistry community with, in particular, several benchmarks \cite{Korbel_2014,Jacquemin_2015a,Bruneval_2015,Jacquemin_2015b,Hirose_2015,Jacquemin_2017,Krause_2017,Gui_2018} on large molecular systems performed with the very same parameters (geometries, basis sets, etc) than the available higher-level reference calculations, \cite{Schreiber_2008} such as CC3. \cite{Christiansen_1995}
Such comparisons were grounded in the development of codes replacing the plane-wave paradigm of solid-state physics by well-documented correlation-consistent Gaussian basis sets, \cite{Dunning_1989} together with adequate auxiliary bases when resolution-of-the-identity (RI) techniques were used. [REFS]
An important conclusion drawn from these calculations was that the quality of the BSE excitation energies is strongly correlated to the deviation of the preceding $GW$ HOMO-LUMO gap
\begin{equation}
\Eg^{\GW} = \eps_{\LUMO}^{\GW} - \varepsilon_{\HOMO}^{\GW},
\end{equation}
with the experimental (photoemission) fundamental gap \cite{Bredas_2014}
\begin{equation}
\EgFun = I^\Nel - A^\Nel,
\end{equation}
where $I^\Nel = E_0^{\Nel-1} - E_0^\Nel$ and $A^\Nel = E_0^{\Nel+1} - E_0^\Nel$ are the ionization potential and the electron affinity of the $\Nel$-electron system (see Fig.~\ref{fig:gaps}).
%%% FIG 2 %%%
\begin{figure*}
\includegraphics[width=0.7\linewidth]{gaps}
\caption{
Definition of the optical gap $\EgOpt$ and fundamental gap $\EgFun$.
$\EB$ is the excitonic effect, while $I^\Nel$ and $A^\Nel$ are the ionization potential and the electron affinity of the $\Nel$-electron system.
$\Eg^{\KS}$ and $\Eg^{\GW}$ are the KS and $GW$ HOMO-LUMO gaps.
See main text for the definition of the other quantities
\label{fig:gaps}}
\end{figure*}
%%% %%% %%%
Standard $G_0W_0$ calculations starting with KS eigenstates generated with (semi)local functionals yield much larger HOMO-LUMO gaps than the input KS gap
\begin{equation}
\Eg^{\KS} = \eps_{\LUMO}^{\KS} - \varepsilon_{\HOMO}^{\KS},
\end{equation}
but still too small as compared to the experimental value, \ie,
\begin{equation}
\Eg^{\KS} \ll \Eg^{G_0W_0} < \EgFun.
\end{equation}
Such an underestimation of the fundamental gap leads to a similar underestimation of the optical gap $\EgOpt$, \ie, the lowest optical excitation energy.
\begin{equation}
\EgOpt = E_1^{\Nel} - E_0^{\Nel} = \EgFun + \EB,
\end{equation}
where $\EB$ is the excitonic effect, that is, the stabilization implied by the attraction of the excited electron and its hole left behind (see Fig.~\ref{fig:gaps}).
Because of this, we have $\EgOpt < \EgFun$.
Such a residual gap problem can be significantly improved by adopting xc functionals with a tuned amount of exact exchange \cite{Stein_2009,Kronik_2012} that yield a much improved KS gap as a starting point for the $GW$ correction. \cite{Bruneval_2013,Rangel_2016,Knight_2016}
Alternatively, self-consistent schemes such as ev$GW$ and qs$GW$, \cite{Hybertsen_1986,Shishkin_2007,Blase_2011,Faber_2011,Faleev_2004,vanSchilfgaarde_2006,Kotani_2007,Ke_2011} where corrected eigenvalues, and possibly orbitals, are reinjected in the construction of $G$ and $W$, have been shown to lead to a significant improvement of the quasiparticle energies in the case of molecular systems, with the advantage of significantly removing the dependence on the starting point functional. \cite{Rangel_2016,Kaplan_2016,Caruso_2016}
As a result, BSE excitation singlet energies starting from such improved quasiparticle energies were found to be in much better agreement with reference calculations.
For sake of illustration, an average error of $0.2$ eV was found for the well-known Thiel set \cite{Schreiber_2008} gathering more than hundred representative singlet excitations from a large variety of representative molecules. \cite{Jacquemin_2015a,Bruneval_2015,Gui_2018,Krause_2017}
This is equivalent to the best TD-DFT results obtained by scanning a large variety of global hybrid functionals with various amounts of exact exchange.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Successes \& Challenges}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%==========================================
\subsection{Charge-transfer excited states}
%==========================================
A very remarkable success of the BSE formalism lies in the description of the charge-transfer (CT) excitations, a notoriously difficult problem for TD-DFT calculations adopting standard functionals. \cite{Dreuw_2004}
Similar difficulties emerge in solid-state physics for semiconductors where extended Wannier excitons, characterized by weakly overlapping electrons and holes, cause a dramatic deficit of spectral weight at low energy. \cite{Botti_2004}
These difficulties can be ascribed to the lack of long-range electron-hole interaction with local xc functionals.
It can be cured through an exact exchange contribution, a solution that explains in particular the success of optimally-tuned range-separated hybrids for the description of CT excitations. \cite{Stein_2009,Kronik_2012}
The analysis of the screened Coulomb potential matrix elements in the BSE kernel [see Eq.~\eqref{eq:BSEkernel}] reveals that such long-range (non-local) electron-hole interactions are properly described, including in environments (solvents, molecular solid, etc) where screening reduces the long-range electron-hole interactions.
The success of the BSE formalism to treat CT excitations has been demonstrated in several studies, \cite{Blase_2011,Baumeier_2012a,Duchemin_2012,Sharifzadeh_2013,Cudazzo_2010,Cudazzo_2013} opening the way to important applications such as doping, photovoltaics or photocatalysis in organic systems.\\
%==========================================
\subsection{ Combining BSE with PCM and QM/MM models }
%==========================================
Recent attempts to merge the $GW$ and BSE formalisms with model polarizable environments at the PCM or QM/MM levels
\cite{Baumeier_2014,Duchemin_2016,Li_2016,Varsano_2016,Duchemin_2018,Li_2019,Tirimbo_2020} paved the way not only to interesting applications but also to a better understanding of the merits of these approaches relying on the use of the screened Coulomb potential designed to capture polarization effects at all spatial ranges. As a matter of fact,
dressing the bare Coulomb potential with the reaction field matrix
$$
v({\bf r},{\bf r}') \longrightarrow v({\bf r},{\bf r}') + v^{\text{reac}}({\bf r},{\bf r}'; \omega)
$$
in the relation between the screened Coulomb potential $W$ and the independent-electron susceptibility (equation \ref{eq:defW}) allows to perform $GW$ and BSE calculations in a polarizable environment (a solvent, a donor/acceptor interface, a semiconducting or metallic substrate, etc.) with the same complexity as in the gas phase. The reaction field matrix $v^{\text{reac}}({\bf r},{\bf r}'; \omega)$ describes the potential generated in ${\bf r}'$ by the charge rearrangements in the polarizable environment induced by a source charge located in ${\bf r}$, with $\bf r$ and ${\bf r}'$ in the quantum mechanical (QM) subsystem of interest. The reaction field is dynamical since the dielectric properties of the environment, such as the macroscopic dielectric constant $\epsilon_M(\omega)$, are in principle frequency dependent. Once the reaction field matrix known, with typically $\mathcal{O}(N^3)$ operations, the full spectrum of $GW$ quasiparticle energies and BSE neutral excitations can be renormalized by the effect of the environment.
A remarkable property \cite{Duchemin_2018} of the BSE formalism combined with a polarizable environment is that the scheme described here above, with electron-electron and electron-hole interactions renormalized by the reaction field, allows to capture both linear-response (LR) and state-specific (SS) contributions \cite{Cammi_2005} to the solvatochromic shift of the optical lines, allowing to treat on the same footing Frenkel and CT excitations. This is an important advantage as compared e.g. to TD-DFT calculations where LR and SS effects have to be explored with different formalisms.
To date, the effect of the environment on fast electronic excitations is only included by considering the low-frequency optical response of the polarizable medium (e.g. considering the $\epsilon_{\infty} \simeq 1.78$ macroscopic dielectric constant for water in the optical range), neglecting the variations with frequency of the dielectric constant in the optical range. Generalization to fully frequency-dependent polarizable properties of the environment would allow to explore systems where the relative dynamics of the solute and the solvent are not decoupled, namely in situations where neither the adiabatic or antiadiabatic limits are expected to be valid (for a recent discussion, see Ref.
~\citenum{Huu_2020}). \\
We now leave the description of successes to discuss difficulties and Perspectives.\\
%==========================================
\subsection{The computational challenge}
%==========================================
As emphasized above, the BSE eigenvalue equation in the occupied-to-virtual product space is formally equivalent to that of TD-DFT or TD-HF. \cite{Dreuw_2005}
Searching iteratively for the lowest eigenstates presents the same $\order*{\Norb^4}$ matrix-vector multiplication computational cost within BSE and TD-DFT (where $\Norb$ is the number of orbitals).
Concerning the construction of the BSE Hamiltonian, it is no more expensive than building the TD-DFT one with hybrid functionals, reducing again to $\order*{\Norb^4}$ operations with standard RI techniques.
At the price of sacrificing the knowledge of the eigenvectors, the BSE absorption spectrum can be known with $\order*{\Norb^3}$ operations using iterative techniques. \cite{Ljungberg_2015}
With the same restriction on the eigenvectors, a time-propagation approach, similar to that implemented for TD-DFT, \cite{Yabana_1996} combined with stochastic techniques to reduce the cost of building the BSE Hamiltonian matrix elements, allows quadratic scaling with systems size. \cite{Rabani_2015}
In practice, the main bottleneck for standard BSE calculations as compared to TD-DFT resides in the preceding $GW$ calculations that scale as $\order{\Norb^4}$ with system size using plane-wave basis sets or RI techniques, but with a rather large prefactor.
%%Such a cost is mainly associated with calculating the free-electron susceptibility with its entangled summations over occupied and virtual states.
%%While attempts to bypass the $GW$ calculations are emerging, replacing quasiparticle energies by Kohn-Sham eigenvalues matching energy electron addition/removal, \cite{Elliott_2019}
The field of low-scaling $GW$ calculations is however witnessing significant advances.
While the sparsity of ..., \cite{Foerster_2011,Wilhelm_2018} efficient real-space-grid and time techniques are blooming, \cite{Rojas_1995,Liu_2016} borrowing in particular the well-known Laplace transform approach used in quantum chemistry. \cite{Haser_1992}
Together with a stochastic sampling of virtual states, this family of techniques allow to set up linear scaling $GW$ calculations. \cite{Vlcek_2017}
The separability of occupied and virtual states summations lying at the heart of these approaches are now blooming in quantum chemistry within the interpolative separable density fitting (ISDF) approach applied to calculating with cubic scaling the susceptibility needed in random-phase approximation (RPA) and $GW$ calculations. \cite{Lu_2017,Duchemin_2019,Gao_2020}
These ongoing developments pave the way to applying the $GW$@BSE formalism to systems comprising several hundred atoms on standard laboratory clusters.
\\
%==========================================
\subsection{The triplet instability challenge}
%==========================================
The analysis of the singlet-triplet splitting is central to numerous applications such as singlet fission, thermally activated delayed fluorescence (TADF) or
stability analysis of restricted closed-shell solutions at the HF \cite{Seeger_1977} and TD-DFT \cite{Bauernschmitt_1996} levels.
contaminating as well TD-DFT calculations with popular range-separated hybrids that generally contains a large fraction of exact exchange in the long-range. \cite{Sears_2011}
While TD-DFT with range-separated hybrids can benefit from tuning the range-separation parameter as a mean to act on the triplet instability, \cite{Sears_2011} BSE calculations do not offer this pragmatic way-out since the screened Coulomb potential that builds the kernel does not offer any parameter to tune.
benchmarks \cite{Jacquemin_2017b,Rangel_2017}
a first cure was offered by hybridizing TD-DFT and BSE, namely adding to the BSE kernel the correlation part of the underlying DFT functional used to build the susceptibility and resulting screened Coulomb potential $W$. \cite{Holzer_2018b}\\
%==========================================
\subsection{The challenge of analytical nuclear gradients}
%==========================================
%An additional issue concerns the formalism taken to calculate the ground-state energy for a given atomic configuration. Since the BSE formalism presented so far concerns the calculation of the electronic excitations, namely the difference of energy between the GS and the ES, gradients of the ES absolute energy require
%
%This points to another direction for the BSE formalism, namely the calculation of GS total energy with the correlation energy calculated at the BSE level. Such a task was performed by several groups using in particular the adiabatic connection fluctuation-dissipation theorem (ACFDT), focusing in particular on small dimers. \cite{Olsen_2014,Holzer_2018b,Li_2020,Loos_2020}\\
The features of ground- and excited-state potential energy surfaces (PES) are critical for the faithful description and a deeper understanding of photochemical and photophysical processes. \cite{Bernardi_1996,Olivucci_2010,Robb_2007}
For example, chemoluminescence, fluorescence and other related processes are associated with geometric relaxation of excited states, and structural changes upon electronic excitation. \cite{Navizet_2011}
Reliable predictions of these mechanisms which have attracted much experimental and theoretical interest lately require exploring the ground- and excited-state PES.
From a theoretical point of view, the accurate prediction of excited electronic states remains a challenge, \cite{Gonzales_2012, Loos_2020a} especially for large systems where state-of-the-art computational techniques (such as multiconfigurational methods \cite{Andersson_1990,Andersson_1992,Roos_1996,Angeli_2001}) cannot be afforded.
For the last two decades, TD-DFT has been the go-to method to compute absorption and emission spectra in large molecular systems.
In TD-DFT, the PES for the excited states can be easily and efficiently obtained as a function of the molecular geometry by simply adding the ground-state DFT energy to the excitation energy of the selected state.
One of the strongest assets of TD-DFT is the availability of first- and second-order analytic nuclear gradients (\ie, the first derivatives of the excited-state energy with respect to the nuclear displacements), which enables the exploration of excited-state PES.\cite{Furche_2002}
A significant limitation of the BSE formalism, as compared to TD-DFT, lies in the lack of analytical nuclear gradients for both the ground and excited states, preventing efficient studies of excited-state processes.
While calculations of the $GW$ quasiparticle energy ionic gradients is becoming increasingly popular,
\cite{Lazzeri_2008,Faber_2011b,Yin_2013,Faber_2015,Montserrat_2016,Zhenglu_2019} only one pioneering study of the excited-state BSE gradients has been published so far. \cite{Beigi_2003}
In this seminal work devoted to small molecules (\ce{CO} and \ce{NH3}), only the BSE excitation energy gradients were calculated, while computing the KS-LDA forces as its ground-state contribution.
\\
%==========================================
\subsection{The challenge of the ground-state energy}
%==========================================
In contrast to TD-DFT which relies on KS-DFT as its ground-state analog, the ground-state BSE energy is not a well-defined quantity, and no clear consensus has been found regarding its formal definition.
Consequently, the BSE ground-state formalism remains in its infancy with very few available studies for atomic and molecular systems. \cite{Olsen_2014,Holzer_2018,Li_2019,Li_2020,Loos_2020}
A promising route, which closely follows RPA-type formalisms, \cite{Furche_2008,Toulouse_2009,Toulouse_2010,Angyan_2011,Ren_2012} is to calculated the ground-state BSE energy within the adiabatic-connection fluctuation-dissipation theorem (ACFDT) framework. \cite{Furche_2005,Olsen_2014,Maggio_2016,Holzer_2018,Loos_2020}
Thanks to comparisons with both similar and state-of-art computational approaches, we have recently showed that the ACFDT@BSE@$GW$ approach yields extremely accurate PES around equilibrium, and can even compete with high-order coupled cluster methods in terms of absolute ground-state energies and equilibrium distances. \cite{Loos_2020}
Their accuracy near the dissociation limit remains an open question. \cite{Caruso_2013,Olsen_2014,Colonna_2014,Hellgren_2015,Holzer_2018}
Indeed, in the largest available benchmark study \cite{Holzer_2018} encompassing the total energies of the atoms \ce{H}--\ce{Ne}, the atomization energies of the 26 small molecules forming the HEAT test set, and the bond lengths and harmonic vibrational frequencies of $3d$ transition-metal monoxides, the BSE correlation energy, as evaluated within the ACFDT framework, \cite{Furche_2005} was mostly discarded from the set of tested techniques due to instabilities (negative frequency modes in the BSE polarization propagator) and replaced by an approximate (RPAsX) approach where the screened-Coulomb potential matrix elements was removed from the resonant electron-hole contribution. \cite{Maggio_2016,Holzer_2018}
Moreover, we also observe \cite{Loos_2020} that, in some cases, unphysical irregularities on the ground-state PES due to the appearance of discontinuities as a function of the bond length for some of the $GW$ quasiparticle energies.
Such an unphysical behavior stems from defining the quasiparticle energy as the solution of the quasiparticle equation with the largest spectral weight in cases where several solutions can be found.
This shortcoming has been thoroughly described in several previous studies.\cite{vanSetten_2015,Maggio_2017,Loos_2018,Veril_2018,Duchemin_2020}
\\
%==========================================
%\subsection{Unphysical discontinuities}
%==========================================
%The GW approximation of many-body perturbation theory has been highly successful at predicting the electronic properties of solids and molecules. \cite{Onida_2002, Aryasetiawan_1998, Reining_2017}
%However, it is also known to be inadequate to model strongly correlated systems. \cite{Romaniello_2009a,Romaniello_2012,DiSabatino_2015,DiSabatino_2016,Tarantino_2017,DiSabatino_2019}
%Here, we have found severe shortcomings of two widely-used variants of $GW$ in the weakly correlated regime.
%We report unphysical irregularities and discontinuities in some key experimentally-measurable quantities computed within the $GW$ approximation
%of many-body perturbation theory applied to molecular systems.
%In particular, we show that the solution obtained with partially self-consistent GW schemes depends on the algorithm one uses to solve self-consistently the quasi-particle (QP) equation.
%The main observation of the present study is that each branch of the self-energy is associated with a distinct QP solution, and that each switch between solutions implies a significant discontinuity in the quasiparticle energy as a function of the internuclear distance.
%Moreover, we clearly observe ``ripple'' effects, i.e., a discontinuity in one of the QP energies induces (smaller) discontinuities in the other QP energies.
%Going from one branch to another implies a transfer of weight between two solutions of the QP equation.
%The case of occupied, virtual and frontier orbitals are separately discussed on distinct diatomics.
%In particular, we show that multisolution behavior in frontier orbitals is more likely if the HOMO-LUMO gap is small.
%
%We have evidenced that one can hit multiple solution issues within $G_0W_0$ and ev$GW$ due to the location of the QP solution near poles of the self-energy.
%Within linearized $G_0W_0$, this implies irregularities in key experimentally-measurable quantities of simple diatomics, while, at the partially self-consistent ev$GW$ level, discontinues arise.
%Because the RPA correlation energy \cite{Casida_1995, Dahlen_2006, Furche_2008, Bruneval_2016} and the Bethe-Salpeter excitation energies \cite{Strinati_1988, Leng_2016, Blase_2018} directly dependent on the QP energies, these types of discontinuities are also present in these quantities, hence in the energy surfaces of ground and excited states.
%
%In a recent article, \cite{Loos_2018} while studying a model two-electron system, we have observed that, within partially self-consistent $GW$ (such as ev$GW$ and qs$GW$), one can observe, in the weakly correlated regime, (unphysical) discontinuities in the energy surfaces of several key quantities (ionization potential, electron affinity, HOMO-LUMO gap, total and correlation energies, as well as vertical excitation energies).
%==========================================
\subsection{Beyond the static approximation}
%==========================================
Going beyond the static approximation is a difficult challenge which has been, nonetheless, embraced by several groups around the world.\cite{Strinati_1988,Rohlfing_2000,Sottile_2003,Ma_2009a,Ma_2009b,Romaniello_2009b,Sangalli_2011,Huix-Rotllant_2011,Zhang_2013,Rebolini_2016,Olevano_2019,Lettmann_2019}
As mentioned earlier in this \textit{Perspective}, most of BSE calculations are performed within the so-called static approximation, which substitutes the dynamically-screened (\ie, frequency-dependent) Coulomb potential $W(\omega)$ by its static limit $W(\omega = 0)$.
It is important to mention that diagonalizing the BSE Hamiltonian in the static approximation corresponds to solving a \textit{linear} eigenvalue problem in the space of single excitations, while it is, in its dynamical form, a non-linear eigenvalue problem (in the same space) which is much harder to solve from a numerical point of view.
In complete analogy with the ubiquitous adiabatic approximation in TD-DFT, one key consequence of the static approximation is that double (and higher) excitations are completely absent from the BSE spectrum, which obviously hampers the applicability of BSE as double excitation may play, indirectly, a key role in photochemistry mechanisms.
Higher excitations would be explicitly present in the BSE Hamiltonian by ``unfolding'' the dynamical BSE kernel, and one would recover a linear eigenvalue problem with, nonetheless, a much larger dimension.
Corrections to take into account the dynamical nature of the screening may or may not recover these multiple excitations.
However, dynamical corrections permit, in any case, to recover, for transitions with a dominant single-excitation character, additional relaxation effects coming from higher excitations (and, in particular, non-interacting double excitations).
From a more practical point of view, dynamical effects have been found to affect the positions and widths of core-exciton resonances in semiconductors, \cite{Strinati_1982,Strinati_1984} rare gas solids, and transition metals. \cite{Ankudinov_2003}
Thanks to first-order perturbation theory, Rohlfing and coworkers have developed an efficient way of taking into account the dynamical effects via a plasmon-pole approximation combined with the Tamm-Dancoff approximation. \cite{Rohlfing_2000,Ma_2009a,Ma_2009b,Baumeier_2012b}
With such as scheme, they have been able to compute the excited states of biological chromophores, showing that taking into account the electron-hole dynamical screening is important for an accurate description of the lowest $n \ra \pi^*$ excitations. \cite{Ma_2009a,Ma_2009b,Baumeier_2012b}
Studying PYP, retinal and GFP chromophore models, Ma \textit{et al.}~found that \textit{``the influence of dynamical screening on the excitation energies is about $0.1$ eV for the lowest $\pi \ra \pi^*$ transitions, but for the lowest $n \ra \pi^*$ transitions the influence is larger, up to $0.25$ eV.''} \cite{Ma_2009b}
Zhang \textit{et al.}~have studied the frequency-dependent second-order Bethe-Salpeter kernel and they have observed an appreciable improvement over configuration interaction with singles (CIS), time-dependent Hartree-Fock (TDHF), and adiabatic TD-DFT results. \cite{Zhang_2013}
Rebolini and Toulouse have performed a similar investigation in a range-separated context, and they have reported a modest improvement over its static counterpart. \cite{Rebolini_2016,Rebolini_PhD}
In these two latter studies, they also followed a (non-self-consistent) perturbative approach within the Tamm-Dancoff approximation with a renormalization of the first-order perturbative correction.
%Finally, let us also mentioned the work of Romaniello and coworkers, \cite{Romaniello_2009b,Sangalli_2011} in which the authors genuinely accessed additional excitations by solving the non-linear, frequency-dependent eigenvalue problem.
%However, it is based on a rather simple model (the Hubbard dimer) which permits to analytically solve the dynamical equations.
\\
%==========================================
%\subsection{The double excitation challenge}
%==========================================
%As a chemist, it is maybe difficult to understand the concept of dynamical properties, the motivation behind their introduction, and their actual usefulness.
%Here, we will try to give a pedagogical example showing the importance of dynamical quantities and their main purposes. \cite{Romaniello_2009b,Sangalli_2011,Zhang_2013,ReiningBook}
%To do so, let us consider the usual chemical scenario where one wants to get the neutral excitations of a given system.
%In most cases, this can be done by solving a set of linear equations of the form
%\begin{equation}
% \label{eq:lin_sys}
% \bA \bx = \omega \bx,
%\end{equation}
%where $\omega$ is one of the neutral excitation energies of the system associated with the transition vector $\bx$.
%If we assume that the operator $\bA$ has a matrix representation of size $K \times K$, this \textit{linear} set of equations yields $K$ excitation energies.
%However, in practice, $K$ might be very large, and it might therefore be practically useful to recast this system as two smaller coupled systems, such that
%\begin{equation}
% \label{eq:lin_sys_split}
% \begin{pmatrix}
% \bA_1 & \tr{\bb} \\
% \bb & \bA_2 \\
% \end{pmatrix}
% \begin{pmatrix}
% \bx_1 \\
% \bx_2 \\
% \end{pmatrix}
% = \omega
% \begin{pmatrix}
% \bx_1 \\
% \bx_2 \\
% \end{pmatrix},
%\end{equation}
%where the blocks $\bA_1$ and $\bA_2$, of sizes $K_1 \times K_1$ and $K_2 \times K_2$ (with $K_1 + K_2 = K$), can be associated with, for example, the single and double excitations of the system.
%Note that this \textit{exact} decomposition does not alter, in any case, the values of the excitation energies, not their eigenvectors.
%
%Solving separately each row of the system \eqref{eq:lin_sys_split} yields
%\begin{subequations}
%\begin{gather}
% \label{eq:row1}
% \bA_1 \bx_1 + \tr{\bb} \bx_2 = \omega \bx_1,
% \\
% \label{eq:row2}
% \bx_2 = (\omega \bI - \bA_2)^{-1} \bb \bx_1.
%\end{gather}
%\end{subequations}
%Substituting Eq.~\eqref{eq:row2} into Eq.~\eqref{eq:row1} yields the following effective \textit{non-linear}, frequency-dependent operator
%\begin{equation}
% \label{eq:non_lin_sys}
% \Tilde{\bA}_1(\omega) \bx_1 = \omega \bx_1
%\end{equation}
%with
%\begin{equation}
% \Tilde{\bA}_1(\omega) = \bA_1 + \tr{\bb} (\omega \bI - \bA_2)^{-1} \bb
%\end{equation}
%which has, by construction, exactly the same solutions than the linear system \eqref{eq:lin_sys} but a smaller dimension.
%For example, an operator $\Tilde{\bA}_1(\omega)$ built in the basis of single excitations can potentially provide excitation energies for double excitations thanks to its frequency-dependent nature, the information from the double excitations being ``folded'' into $\Tilde{\bA}_1(\omega)$ via Eq.~\eqref{eq:row2}. \cite{Romaniello_2009b,Sangalli_2011,ReiningBook}
%
%How have we been able to reduce the dimension of the problem while keeping the same number of solutions?
%To do so, we have transformed a linear operator $\bA$ into a non-linear operator $\Tilde{\bA}_1(\omega)$ by making it frequency dependent.
%In other words, we have sacrificed the linearity of the system in order to obtain a new, non-linear systems of equations of smaller dimension.
%This procedure converting degrees of freedom into frequency or energy dependence is very general and can be applied in various contexts. \cite{Gatti_2007,Garniron_2018}
%Thanks to its non-linearity, Eq.~\eqref{eq:non_lin_sys} can produce more solutions than its actual dimension.
%However, because there is no free lunch, this non-linear system is obviously harder to solve than its corresponding linear analogue given by Eq.~\eqref{eq:lin_sys}.
%Nonetheless, approximations can be now applied to Eq.~\eqref{eq:non_lin_sys} in order to solve it efficiently.
%
%One of these approximations is the so-called \textit{static} approximation, which corresponds to fix the frequency to a particular value.
%For example, as commonly done within the Bethe-Salpeter formalism, $\Tilde{\bA}_1(\omega) = \Tilde{\bA}_1 \equiv \Tilde{\bA}_1(\omega = 0)$.
%In such a way, the operator $\Tilde{\bA}_1$ is made linear again by removing its frequency-dependent nature.
%This approximation comes with a heavy price as the number of solutions provided by the system of equations \eqref{eq:non_lin_sys} has now been reduced from $K$ to $K_1$.
%Coming back to our example, in the static approximation, the operator $\Tilde{\bA}_1$ built in the basis of single excitations cannot provide double excitations anymore, and the only $K_1$ excitation energies are associated with single excitations.\\
%==========================================
\subsection{Core-level spectroscopy}
%==========================================
XANES, \cite{Olovsson_2009,Vinson_2011}
diabatization and conical intersections \cite{Kaczmarski_2010}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Conclusion}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Here goes the conclusion.
%%%%%%%%%%%%%%%%%%%%%%%%
\section*{Acknowledgments}
%%%%%%%%%%%%%%%%%%%%%%%%
Funding from the \textit{``Centre National de la Recherche Scientifique''} is acknowledged.
This work has also been supported through the EUR Grant NanoX ANR-17-EURE-0009 in the framework of the \textit{``Programme des Investissements d'Avenir''}.
DJ acknowledges the \textit{R\'egion des Pays de la Loire} for financial support.
%%%%%%%%%%%%%%%%%%%%
%%% BIBLIOGRAPHY %%%
%%%%%%%%%%%%%%%%%%%%
\bibliography{BSE_JPCL}
\clearpage
\section*{Biographies}
\begin{center}
\includegraphics[width=3cm]{XBlase}
\end{center}
\noindent{\bfseries Xavier Blase} holds his PhD in Physics from the University of California at Berkeley.
He is presently CNRS Research Director at Institut N\'eel, Grenoble, France.
His research focuses on the electronic and optical properties of systems relevant to condensed matter physics, materials sciences and physical chemistry, with much emphasis on methodology.
He is the co-author of the FIESTA code (Bull-Fourier prize 2014) that implements the $GW$ and Bethe-Salpeter formalisms with Gaussian basis sets.
He received the 2008 CNRS silver medal.
\begin{center}
\includegraphics[width=3cm]{IDuchemin}
\end{center}
\noindent{\bfseries Ivan Duchemin} holds his PhD from the CEMES in Toulouse, France.
He stayed as a postdoctoral fellow at the UC Davis Physics department, California, and the Max Plank Institute for Polymer Research in Mainz, Germany, before joining the French Center for Alternative Energies (CEA), Grenoble, France, as a senior scientist.
His research interests focus on methodological and code developments in ab initio quantum simulations.
He is presently the main developer of the FIESTA code (Bull-Fourier prize 2014).
\begin{center}
\includegraphics[width=4cm]{DJacquemin}
\end{center}
\noindent{\bfseries Denis Jacquemin} received his PhD in Chemistry from the University of Namur in 1998, before moving to the University of Florida for his postdoctoral stay. He is currently full Professor at the University of Nantes (France).
His research is focused on modeling electronically excited-state processes in organic and inorganic dyes as well as photochromes using a large panel of \emph{ab initio} approaches. His group collaborates with many experimental
and theoretical groups. He is the author of more than 500 scientific papers. He has been ERC grantee (2011--2016), member of Institut Universitaire de France (2012--2017) and received the WATOC's Dirac Medal (2014).
\begin{center}
\includegraphics[width=3cm]{PFLoos}
\end{center}
\noindent{\bfseries Pierre-Fran\c{c}ois Loos} was born in Nancy, France in 1982. He received his M.S.~in Computational and Theoretical Chemistry from the Universit\'e Henri Poincar\'e (Nancy, France) in 2005 and his Ph.D.~from the same university in 2008. From 2009 to 2013, He was undertaking postdoctoral research with Peter M.W.~Gill at the Australian National University (ANU). From 2013 to 2017, he was a \textit{``Discovery Early Career Researcher Award''} recipient and, then, a senior lecturer at the ANU. Since 2017, he holds a researcher position from the \textit{``Centre National de la Recherche Scientifique (CNRS)} at the \textit{Laboratoire de Chimie et Physique Quantiques} in Toulouse (France), and was awarded, in 2019, an ERC consolidator grant for the development of new excited-state methodologies.
\end{document}