3
0
mirror of https://github.com/triqs/dft_tools synced 2024-12-27 06:43:40 +01:00
dft_tools/triqs/gfs/local/tail.hpp

414 lines
16 KiB
C++
Raw Normal View History

/*******************************************************************************
*
* TRIQS: a Toolbox for Research in Interacting Quantum Systems
*
* Copyright (C) 2012 by M. Ferrero, O. Parcollet
*
* TRIQS is free software: you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software
* Foundation, either version 3 of the License, or (at your option) any later
* version.
*
* TRIQS is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* TRIQS. If not, see <http://www.gnu.org/licenses/>.
*
******************************************************************************/
#ifndef TRIQS_GF_LOCAL_TAIL_H
#define TRIQS_GF_LOCAL_TAIL_H
#include <triqs/arrays.hpp>
#include <triqs/arrays/algorithms.hpp>
2013-07-29 09:57:07 +02:00
#include <triqs/gfs/tools.hpp>
namespace triqs { namespace gfs { namespace local {
namespace details {
static constexpr double small = 1.e-10;
}
2014-05-10 21:39:11 +02:00
using arrays::matrix_view;
using arrays::matrix;
namespace tqa= triqs::arrays; //namespace tql= triqs::clef; namespace mpl= boost::mpl;
typedef std::complex<double> dcomplex;
class tail; // the value class
class tail_view; // the view class
template<typename G> struct LocalTail : mpl::false_{}; // a boolean trait to identify the objects modelling the concept LocalTail
template<> struct LocalTail<tail > : mpl::true_{};
template<> struct LocalTail<tail_view >: mpl::true_{};
// a trait to find the scalar of the algebra i.e. the true scalar and the matrix ...
template <typename T> struct is_scalar_or_element : mpl::or_< tqa::ImmutableMatrix<T>, utility::is_in_ZRC<T> > {};
// ---------------------- implementation --------------------------------
/// A common implementation class. Idiom: ValueView
template<bool IsView> class tail_impl {
public:
typedef tail_view view_type;
typedef tail regular_type;
typedef arrays::array <dcomplex,3> data_regular_type;
typedef arrays::array_view <dcomplex,3> data_view_type;
typedef typename mpl::if_c<IsView, data_view_type, data_regular_type>::type data_type;
typedef arrays::array<long,2> mask_regular_type;
typedef arrays::array_view<long,2> mask_view_type;
typedef typename mpl::if_c<IsView, mask_view_type, mask_regular_type>::type mask_type;
typedef arrays::matrix_view<dcomplex> mv_type;
typedef arrays::matrix_view<dcomplex> const_mv_type;
data_view_type data() { return _data;}
const data_view_type data() const { return _data;}
mask_view_type mask_view() { return mask;}
const mask_view_type mask_view() const { return mask;}
long order_min() const {return omin;}
long order_max() const {return min_element(mask);}
size_t size() const {return _data.shape()[0];}
long smallest_nonzero() const {
long om = omin;
while ((om < this->order_max()) && (max_element(abs(_data(om-omin,tqa::range(),tqa::range()))) < details::small)) om++;
return om;
}
typedef tqa::mini_vector<size_t,2> shape_type;
shape_type shape() const { return shape_type(_data.shape()[1], _data.shape()[2]);}
bool is_decreasing_at_infinity() const { return (smallest_nonzero() >=1);}
protected:
long omin;
mask_type mask;
data_type _data;
// All constructors
tail_impl(): omin(0), mask(), _data() {} // all arrays of zero size (empty)
tail_impl(size_t N1, size_t N2, size_t size_, long order_min):
omin(order_min), mask(tqa::make_shape(N1,N2)), _data(tqa::make_shape(size_,N1,N2)) {
mask() = order_min+size_-1;
_data() = 0;
}
tail_impl(data_type const &d, mask_type const &m, long omin_) : omin(omin_), mask(m), _data(d) {}
tail_impl(tail_impl<!IsView> const & x): omin(x.omin), mask(x.mask), _data(x._data) {}
tail_impl(tail_impl const &) = default;
tail_impl(tail_impl &&) = default;
friend class tail_impl<!IsView>;
public:
mv_type operator() (int n) {
if (n>this->order_max()) TRIQS_RUNTIME_ERROR<<" n > Max Order. n= "<<n <<", Max Order = "<<order_max() ;
if (n<this->order_min()) TRIQS_RUNTIME_ERROR<<" n < Min Order. n= "<<n <<", Min Order = "<<order_min() ;
return this->_data(n-omin, tqa::range(), tqa::range());
}
const_mv_type operator() (int n) const {
if (n>this->order_max()) TRIQS_RUNTIME_ERROR<<" n > Max Order. n= "<<n <<", Max Order = "<<order_max() ;
if (n<this->order_min()) { mv_type::regular_type r(this->shape()); r()=0; return r;}
return this->_data(n-omin, tqa::range(), tqa::range());
}
/// same as (), but if n is too large, then returns 0 instead of raising an exception
const_mv_type get_or_zero (int n) const {
if ( (n>this->order_max()) || (n<this->order_min()) ) { mv_type::regular_type r(this->shape()); r()=0; return r; }
return this->_data(n-omin, tqa::range(), tqa::range());
}
operator freq_infty() const { return freq_infty(); }
/// Evaluate the tail to sum_{n=order_min}^ordermax M_n/omega^n
arrays::matrix<dcomplex> evaluate(dcomplex const &omega) const {
auto r = arrays::matrix<dcomplex>{this->shape()};
r() = 0;
auto omin = this->order_min();
auto omax = this->order_max(); // precompute since long to do...
auto _ = arrays::range{};
for (int u = omax; u >= omin; --u)
r = r / omega +
const_mv_type{this->_data(u - omin, _, _)}; // need to make a matrix view because otherwise + is not defined
r /= pow(omega, omin);
return r;
}
/// Save in txt file: doc the format ? ---> prefer serialization or hdf5 !
void save(std::string file, bool accumulate=false) const {}
/// Load from txt file: doc the format ?
//void load(std::string file){}
friend std::string get_triqs_hdf5_data_scheme(tail_impl const & g) { return "TailGf"; }
///
friend void h5_write (h5::group fg, std::string subgroup_name, tail_impl const & t) {
auto gr = fg.create_group(subgroup_name);
h5_write(gr,"omin",t.omin);
h5_write(gr,"mask",t.mask);
h5_write(gr,"data",t._data);
}
friend void h5_read (h5::group fg, std::string subgroup_name, tail_impl & t){
auto gr = fg.open_group(subgroup_name);
h5_read(gr,"omin",t.omin);
h5_read(gr,"mask",t.mask);
h5_read(gr,"data",t._data);
}
// BOOST Serialization
friend class boost::serialization::access;
template<class Archive>
void serialize(Archive & ar, const unsigned int version) {
ar & TRIQS_MAKE_NVP("omin",omin);
ar & TRIQS_MAKE_NVP("mask",mask);
ar & TRIQS_MAKE_NVP("data",_data);
}
friend std::ostream & operator << (std::ostream & out, tail_impl const & x) {
out <<"tail/tail_view: min/smallest/max = "<< x.order_min() << " " << x.smallest_nonzero() << " "<< x.order_max();
for (long u = x.order_min(); u <= x.order_max(); ++u) out <<"\n ... Order "<<u << " = " << x(u);
return out;
}
};
// -----------------------------
// the view class
class tail_view: public tail_impl <true> {
typedef tail_impl <true> B;
friend class tail;
public:
template<bool V> tail_view(tail_impl<V> const & t): B(t){}
tail_view(B::data_type const &d, B::mask_type const &m, long order_min): B(d, m, order_min) {}
tail_view(tail_view const &) = default;
tail_view(tail_view &&) = default;
void rebind(tail_view const &X) {
omin = X.omin;
mask.rebind(X.mask);
_data.rebind(X._data);
}
inline void rebind(tail const &X);
// operator = for views
tail_view & operator = (const tail_view & rhs) {
if ((_data.shape()[1] != rhs._data.shape()[1]) || (_data.shape()[2] != rhs._data.shape()[2]) || (omin != rhs.omin))
TRIQS_RUNTIME_ERROR<<"tails are incompatible";
mask = rhs.mask;
_data = rhs._data;
return *this;
}
inline tail_view & operator=(const tail & rhs);
tail_view & operator = (std::complex<double> const & x) {
_data() = 0.0;
mv_type(_data(-omin, tqa::range(), tqa::range())) = x;
mask() = omin+size()-1;
return *this;
}
using B::operator(); // import all previously defined operator() for overloading
friend std::ostream & triqs_nvl_formal_print(std::ostream & out, tail_view const & x) { return out<<"tail_view"; }
};
// -----------------------------
// the regular class
class tail: public tail_impl <false> {
typedef tail_impl <false> B;
friend class tail_view;
public:
tail():B() {}
typedef tqa::mini_vector<size_t,2> shape_type;
tail(size_t N1, size_t N2, size_t size_=10, long order_min=-1): B(N1,N2,size_,order_min) {}
tail(shape_type const & sh, size_t size_=10, long order_min=-1): B(sh[0],sh[1],size_,order_min) {}
2014-05-10 21:39:11 +02:00
tail(B::data_type const &d, B::mask_type const &m, long order_min): B(d, m, order_min) {}
tail(tail const & g): B(g) {}
tail(tail_view const & g): B(g) {}
tail(tail &&) = default;
// operator = for values
tail & operator = (tail_view const & rhs) {
omin = rhs.omin;
mask = rhs.mask;
_data = rhs._data;
return *this;
}
tail & operator = (tail const & rhs) {
omin = rhs.omin;
mask = rhs.mask;
_data = rhs._data;
return *this;
}
using B::operator();
/// The simplest tail corresponding to omega
static tail_view omega(size_t N1, size_t N2, size_t size_, long order_min) {
tail t(N1, N2, size_, order_min);
t(-1) = 1;
return t;
}
/// The simplest tail corresponding to omega, constructed from a shape for convenience
static tail_view omega(tail::shape_type const & sh, size_t size_, long order_min) { return omega(sh[0],sh[1],size_,order_min); }
};
[API change] gf : factories -> constructors - Make more general constructors for the gf. gf( mesh, target_shape_t) - remove the old make_gf for the basic gf. - 2 var non generic gf removed. - clean evaluator - add tensor_valued - add a simple vertex test. - clean specialisation - Fix bug introduced in 1906dc3 - forgot to resize the gf in new version of operator = - Fix make_singularity in gf.hpp - clean resize in operator = - update h5 read/write for block gf - changed a bit the general trait to save *all* the gf. - allows a more general specialization, then a correct for blocks - NOT FINISHED : need to save the block indice for python. How to reread ? Currently it read the blocks names and reconstitute the mesh from it. Is it sufficient ? - clean block constructors - block constructors simplest possible : an int for the number of blocks - rest in free factories. - fixed the generic constructor from GfType for the regular type : only enable iif GfType is ImmutableGreenFunction - multivar. fix linear index in C, and h5 format - linear index now correctly flatten in C mode (was in fortran mode), using a simple reverse of the tuple in the folding. - fix the h5 read write of the multivar fonctions in order to write an array on dimension # variables + dim_target i.e. without flattening the indices of the meshes. Easier for later data analysis, e.g. in Python. - merge matrix/tensor_valued. improve factories - matrix_valued now = tensor_valued<2> (simplifies generic code for h5). - factories_one_var -> factories : this is the generic case ... only a few specialization, code is simpler. - clef expression call with rvalue for *this - generalize matrix_proxy to tensor and clean - clean exception catch in tests - exception catching catch in need in test because the silly OS X does not print anything, just "exception occurred". Very convenient for the developer... - BUT, one MUST add return 1, or the make test will *pass* !! - --> systematically replace the catch by a macro TRIQS_CATCH_AND_ABORT which return a non zero error code. - exception : curry_and_fourier which does not work at this stage (mesh incompatible). - gf: clean draft of gf 2 times - comment the python interface for the moment. - rm useless tests
2013-10-16 23:55:26 +02:00
template<typename RHS> void assign_from_expression(tail_view t,RHS const & rhs) { t = rhs( tail::omega(t.shape(),t.size(),t.order_min()) ); }
inline void tail_view::rebind(tail const &X) {
omin = X.omin;
mask.rebind(X.mask);
_data.rebind(X._data);
}
inline tail_view & tail_view::operator = (const tail & rhs) {
if ((_data.shape()[1] != rhs._data.shape()[1]) || (_data.shape()[2] != rhs._data.shape()[2]) || (omin != rhs.omin))
TRIQS_RUNTIME_ERROR<<"tails are incompatible";
mask = rhs.mask;
_data = rhs._data;
return *this;
}
2014-05-10 21:39:11 +02:00
inline tail conj(tail_view t) { return {conj(t.data()), t.mask_view(),t.order_min()};}
inline tail transpose(tail_view t) { return {transposed_view(t.data(),0,2,1), transposed_view(t.mask_view(),1,0),t.order_min()};}
2014-05-10 21:39:11 +02:00
/// Slice in orbital space
2013-10-06 22:10:42 +02:00
//template<bool V> tail_view slice_target(tail_impl<V> const & t, tqa::range R1, tqa::range R2) {
inline tail_view slice_target(tail_view t, tqa::range R1, tqa::range R2) {
return tail_view(t.data()(tqa::range(),R1,R2), t.mask_view()(R1,R2), t.order_min());
}
inline tail inverse(tail_view const & t) {
long omin1 = - t.smallest_nonzero();
long omax1 = std::min(t.order_max() + 2*omin1, t.order_min()+long(t.size())-1);
size_t si = omax1-omin1+1;
tail res(t.shape(), t.size(), t.order_min());
res.mask_view() = omax1;
res(omin1) = inverse(t(-omin1));
for (size_t n=1; n<si; n++) {
for (size_t p=0; p<n; p++) {
res(omin1 + n) -= t(n-omin1-p) * res(omin1+p);
}
res(omin1 + n) = res(omin1) * make_clone(res(omin1 + n));
}
return res;
}
[API change] gf : factories -> constructors - Make more general constructors for the gf. gf( mesh, target_shape_t) - remove the old make_gf for the basic gf. - 2 var non generic gf removed. - clean evaluator - add tensor_valued - add a simple vertex test. - clean specialisation - Fix bug introduced in 1906dc3 - forgot to resize the gf in new version of operator = - Fix make_singularity in gf.hpp - clean resize in operator = - update h5 read/write for block gf - changed a bit the general trait to save *all* the gf. - allows a more general specialization, then a correct for blocks - NOT FINISHED : need to save the block indice for python. How to reread ? Currently it read the blocks names and reconstitute the mesh from it. Is it sufficient ? - clean block constructors - block constructors simplest possible : an int for the number of blocks - rest in free factories. - fixed the generic constructor from GfType for the regular type : only enable iif GfType is ImmutableGreenFunction - multivar. fix linear index in C, and h5 format - linear index now correctly flatten in C mode (was in fortran mode), using a simple reverse of the tuple in the folding. - fix the h5 read write of the multivar fonctions in order to write an array on dimension # variables + dim_target i.e. without flattening the indices of the meshes. Easier for later data analysis, e.g. in Python. - merge matrix/tensor_valued. improve factories - matrix_valued now = tensor_valued<2> (simplifies generic code for h5). - factories_one_var -> factories : this is the generic case ... only a few specialization, code is simpler. - clef expression call with rvalue for *this - generalize matrix_proxy to tensor and clean - clean exception catch in tests - exception catching catch in need in test because the silly OS X does not print anything, just "exception occurred". Very convenient for the developer... - BUT, one MUST add return 1, or the make test will *pass* !! - --> systematically replace the catch by a macro TRIQS_CATCH_AND_ABORT which return a non zero error code. - exception : curry_and_fourier which does not work at this stage (mesh incompatible). - gf: clean draft of gf 2 times - comment the python interface for the moment. - rm useless tests
2013-10-16 23:55:26 +02:00
inline tail inverse(tail const & t) { return inverse(tail_view(t));}
inline tail mult_impl(tail_view const & l, tail_view const& r) {
if (l.shape()[1] != r.shape()[0] || l.order_min() != r.order_min() || l.size() != r.size())
TRIQS_RUNTIME_ERROR<< "tail multiplication: shape mismatch";
//long omin1 = l.smallest_nonzero() + r.smallest_nonzero();
long omax1 = std::min(std::min(r.order_max()+l.smallest_nonzero(), l.order_max()+r.smallest_nonzero()), r.order_min()+long(r.size())-1);
//size_t si = omax1-omin1+1;
tail res(l.shape(), l.size(), l.order_min());
res.mask_view() = omax1;
for (long n=res.order_min(); n<=res.order_max(); ++n) {
// sum_{p}^n a_p b_{n-p}. p <= a.n_max, p >= a.n_min and n-p <=b.n_max and n-p >= b.n_min
// hence p <= min ( a.n_max, n-b.n_min ) and p >= max ( a.n_min, n- b.n_max)
const long pmin = std::max(l.smallest_nonzero(), n - r.order_max() );
const long pmax = std::min(l.order_max(), n - r.smallest_nonzero() );
for (long p = pmin; p <= pmax; ++p) { res(n) += l(p) * r(n-p);}
}
return res;
}
2014-05-10 21:39:11 +02:00
template <typename T1, typename T2>
TYPE_ENABLE_IF(tail, mpl::and_<LocalTail<T1>, LocalTail<T2>>) operator*(T1 const &a, T2 const &b) {
return mult_impl(a, b);
}
2014-05-10 21:39:11 +02:00
template <typename T1, typename T2>
TYPE_ENABLE_IF(tail, mpl::and_<tqa::ImmutableMatrix<T1>, LocalTail<T2>>) operator*(T1 const &a, T2 const &b) {
auto res = tail{first_dim(a), b.shape()[1], b.size(), b.order_min()};
res.mask_view() = b.order_max();
2014-05-10 21:39:11 +02:00
for (int n = res.order_min(); n <= res.order_max(); ++n) res(n) = a * b(n);
return res;
}
2014-05-10 21:39:11 +02:00
template <typename T1, typename T2>
TYPE_ENABLE_IF(tail, mpl::and_<LocalTail<T1>, tqa::ImmutableMatrix<T2>>) operator*(T1 const &a, T2 const &b) {
auto res = tail{a.shape()[0], second_dim(b), a.size(), a.order_min()};
res.mask_view() = a.order_max();
2014-05-10 21:39:11 +02:00
for (int n = res.order_min(); n <= res.order_max(); ++n) res(n) = a(n) * b;
return res;
}
inline tail operator * (dcomplex a, tail_view const & r) { tail res(r); res.data()*=a; return res;}
inline tail operator * (tail_view const & r, dcomplex a) { return a*r; }
template<typename T1, typename T2> TYPE_ENABLE_IF(tail,mpl::and_<LocalTail<T1>, LocalTail<T2>>)
operator/ (T1 const & a, T2 const & b) { return a * inverse(b); }
inline tail operator / (tail_view const & r, dcomplex a) { tail res(r); res.data() /=a; return res;}
inline tail operator / (dcomplex a, tail_view const & r) { return a * inverse(r); }
template<typename T1, typename T2> TYPE_ENABLE_IF(tail,mpl::and_<LocalTail<T1>, LocalTail<T2>>)
operator + (T1 const & l, T2 const& r) {
if (l.shape() != r.shape() || l.order_min() != r.order_min() || (l.size() != r.size()))
TRIQS_RUNTIME_ERROR<< "tail addition: shape mismatch";
tail res(l.shape(), l.size(), l.order_min());
res.mask_view() = std::min(l.order_max(), r.order_max());
for (long i = res.order_min(); i<=res.order_max(); ++i) res(i) = l(i) + r(i);
return res;
}
template<typename T1, typename T2> TYPE_ENABLE_IF(tail,mpl::and_<LocalTail<T1>, LocalTail<T2>>)
operator - (T1 const & l, T2 const& r) {
if (l.shape() != r.shape() || l.order_min() != r.order_min() || (l.size() != r.size()))
TRIQS_RUNTIME_ERROR<< "tail addition: shape mismatch";
tail res(l.shape(), l.size(), l.order_min());
res.mask_view() = std::min(l.order_max(), r.order_max());
for (long i = res.order_min(); i<=res.order_max(); ++i) res(i) = l(i) - r(i);
return res;
}
template<typename T1, typename T2> TYPE_ENABLE_IF(tail,mpl::and_<is_scalar_or_element<T1>, LocalTail<T2>>)
operator + (T1 const & a, T2 const & t) {
tail res(t);
res(0) += a;
return res;
}
template<typename T1, typename T2> TYPE_ENABLE_IF(tail,mpl::and_<LocalTail<T1>, is_scalar_or_element<T2>>)
operator + (T1 const & t, T2 const & a) { return a+t;}
template<typename T1, typename T2> TYPE_ENABLE_IF(tail,mpl::and_<is_scalar_or_element<T1>, LocalTail<T2>>)
operator - (T1 const & a, T2 const & t) { return (-a) + t;}
template<typename T1, typename T2> TYPE_ENABLE_IF(tail,mpl::and_<LocalTail<T1>, is_scalar_or_element<T2>>)
operator - (T1 const & t, T2 const & a) { return (-a) + t;}
// inplace operators
#define DEFINE_OPERATOR(OP1, OP2) \
template <typename T> void operator OP1(tail_view g, T &&x) { g = g OP2 x; } \
template <typename T> void operator OP1(tail &g, T &&x) { g = g OP2 x; }
DEFINE_OPERATOR(+=, +);
DEFINE_OPERATOR(-=, -);
DEFINE_OPERATOR(*=, *);
DEFINE_OPERATOR(/=, / );
#undef DEFINE_OPERATOR
}}}
#endif