1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 07:55:35 +00:00
Grid/tests/solver/Test_wilson_mg.cc

1024 lines
43 KiB
C++
Raw Normal View History

/*************************************************************************************
2017-07-21 12:39:03 +01:00
Grid physics library, www.github.com/paboyle/Grid
2018-01-30 09:13:08 +00:00
Source file: ./tests/solver/Test_wilson_mg.cc
2017-07-21 12:39:03 +01:00
2018-01-29 16:45:48 +00:00
Copyright (C) 2017
2017-07-21 12:39:03 +01:00
Author: Daniel Richtmann <daniel.richtmann@ur.de>
2017-07-21 12:39:03 +01:00
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
2017-07-21 12:39:03 +01:00
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
2017-07-21 12:39:03 +01:00
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
template<class Field, int nbasis> class TestVectorAnalyzer {
2017-11-28 14:03:02 +00:00
public:
void operator()(LinearOperatorBase<Field> &Linop, std::vector<Field> const &vectors, int nn = nbasis) {
auto positiveOnes = 0;
2018-01-29 16:45:48 +00:00
std::vector<Field> tmp(4, vectors[0]._grid);
Gamma g5(Gamma::Algebra::Gamma5);
2017-11-28 14:03:02 +00:00
std::cout << GridLogMessage << "Test vector analysis:" << std::endl;
for(auto i = 0; i < nn; ++i) {
2017-11-28 14:03:02 +00:00
Linop.Op(vectors[i], tmp[3]);
2017-11-28 14:03:02 +00:00
2018-01-29 16:45:48 +00:00
tmp[0] = g5 * tmp[3];
2017-11-28 14:03:02 +00:00
auto lambda = innerProduct(vectors[i], tmp[0]) / innerProduct(vectors[i], vectors[i]);
tmp[1] = tmp[0] - lambda * vectors[i];
2017-11-28 14:03:02 +00:00
auto mu = ::sqrt(norm2(tmp[1]) / norm2(vectors[i]));
2017-11-28 14:03:02 +00:00
auto nrm = ::sqrt(norm2(vectors[i]));
if(real(lambda) > 0)
positiveOnes++;
std::cout << GridLogMessage << std::scientific << std::setprecision(2) << std::setw(2) << std::showpos << "vector " << i << ": "
<< "singular value: " << lambda << ", singular vector precision: " << mu << ", norm: " << nrm << std::endl;
2017-11-28 14:03:02 +00:00
}
std::cout << GridLogMessage << std::scientific << std::setprecision(2) << std::setw(2) << std::showpos << positiveOnes << " out of "
<< nn << " vectors were positive" << std::endl;
2017-11-28 14:03:02 +00:00
}
};
class myclass : Serializable {
2017-07-21 12:39:03 +01:00
public:
// clang-format off
2017-07-21 12:39:03 +01:00
GRID_SERIALIZABLE_CLASS_MEMBERS(myclass,
int, domaindecompose,
int, domainsize,
int, coarsegrids,
int, order,
int, Ls,
double, mq,
double, lo,
double, hi,
int, steps);
// clang-format on
2017-07-21 12:39:03 +01:00
myclass(){};
};
myclass params;
RealD InverseApproximation(RealD x) {
return 1.0 / x;
2017-07-21 12:39:03 +01:00
}
template<int nbasis> struct CoarseGrids {
public:
std::vector<std::vector<int>> LattSizes;
std::vector<std::vector<int>> Seeds;
std::vector<GridCartesian *> Grids;
std::vector<GridParallelRNG> PRNGs;
CoarseGrids(std::vector<std::vector<int>> const &blockSizes, int coarsegrids) {
assert(blockSizes.size() == coarsegrids);
std::cout << GridLogMessage << "Constructing " << coarsegrids << " CoarseGrids" << std::endl;
for(int cl = 0; cl < coarsegrids; ++cl) { // may be a bit ugly and slow but not perf critical
// need to differentiate between first and other coarse levels in size calculation
LattSizes.push_back({cl == 0 ? GridDefaultLatt() : LattSizes[cl - 1]});
Seeds.push_back(std::vector<int>(LattSizes[cl].size()));
for(int d = 0; d < LattSizes[cl].size(); ++d) {
LattSizes[cl][d] = LattSizes[cl][d] / blockSizes[cl][d];
Seeds[cl][d] = (cl + 1) * LattSizes[cl].size() + d + 1;
// calculation unimportant, just to get. e.g., {5, 6, 7, 8} for first coarse level and so on
}
Grids.push_back(SpaceTimeGrid::makeFourDimGrid(LattSizes[cl], GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi()));
PRNGs.push_back(GridParallelRNG(Grids[cl]));
PRNGs[cl].SeedFixedIntegers(Seeds[cl]);
std::cout << GridLogMessage << "cl = " << cl << ": LattSize = " << LattSizes[cl] << std::endl;
std::cout << GridLogMessage << "cl = " << cl << ": Seeds = " << Seeds[cl] << std::endl;
}
}
};
template<class Field> void testLinearOperator(LinearOperatorBase<Field> &LinOp, GridBase *Grid, std::string const &name = "") {
std::vector<int> seeds({1, 2, 3, 4});
GridParallelRNG RNG(Grid);
RNG.SeedFixedIntegers(seeds);
{
std::cout << GridLogMessage << "Testing that Mdiag + Σ_μ Mdir_μ == M for operator " << name << ":" << std::endl;
// clang-format off
Field src(Grid); random(RNG, src);
Field ref(Grid); ref = zero;
Field result(Grid); result = zero;
Field diag(Grid); diag = zero;
Field sumDir(Grid); sumDir = zero;
Field tmp(Grid);
Field err(Grid);
// clang-format on
LinOp.Op(src, ref);
std::cout << GridLogMessage << " norm2(M * src) = " << norm2(ref) << std::endl;
LinOp.OpDiag(src, diag);
std::cout << GridLogMessage << " norm2(Mdiag * src) = " << norm2(diag) << std::endl;
for(int dir = 0; dir < 4; dir++) {
for(auto disp : {+1, -1}) {
LinOp.OpDir(src, tmp, dir, disp);
std::cout << GridLogMessage << " norm2(Mdir_{" << dir << "," << disp << "} * src) = " << norm2(tmp) << std::endl;
sumDir = sumDir + tmp;
}
}
std::cout << GridLogMessage << " norm2(Σ_μ Mdir_μ * src) = " << norm2(sumDir) << std::endl;
result = diag + sumDir;
err = ref - result;
std::cout << GridLogMessage << " Absolute deviation = " << norm2(err) << std::endl;
std::cout << GridLogMessage << " Relative deviation = " << norm2(err) / norm2(ref) << std::endl;
}
{
std::cout << GridLogMessage << "Testing hermiticity stochastically for operator " << name << ":" << std::endl;
// clang-format off
Field phi(Grid); random(RNG, phi);
Field chi(Grid); random(RNG, chi);
Field MPhi(Grid);
Field MdagChi(Grid);
// clang-format on
LinOp.Op(phi, MPhi);
LinOp.AdjOp(chi, MdagChi);
ComplexD chiMPhi = innerProduct(chi, MPhi);
ComplexD phiMdagChi = innerProduct(phi, MdagChi);
ComplexD phiMPhi = innerProduct(phi, MPhi);
ComplexD chiMdagChi = innerProduct(chi, MdagChi);
std::cout << GridLogMessage << " chiMPhi = " << chiMPhi << " phiMdagChi = " << phiMdagChi
<< " difference = " << chiMPhi - conjugate(phiMdagChi) << std::endl;
std::cout << GridLogMessage << " phiMPhi = " << phiMPhi << " chiMdagChi = " << chiMdagChi << " <- should be real if hermitian"
<< std::endl;
}
{
std::cout << GridLogMessage << "Testing linearity for operator " << name << ":" << std::endl;
// clang-format off
Field phi(Grid); random(RNG, phi);
Field chi(Grid); random(RNG, chi);
Field phiPlusChi(Grid);
Field MPhi(Grid);
Field MChi(Grid);
Field MPhiPlusChi(Grid);
Field linearityError(Grid);
// clang-format on
LinOp.Op(phi, MPhi);
LinOp.Op(chi, MChi);
phiPlusChi = phi + chi;
LinOp.Op(phiPlusChi, MPhiPlusChi);
linearityError = MPhiPlusChi - MPhi;
linearityError = linearityError - MChi;
std::cout << GridLogMessage << " norm2(linearityError) = " << norm2(linearityError) << std::endl;
}
}
// template < class Fobj, class CComplex, int coarseSpins, int nbasis, class Matrix >
// class MultiGridPreconditioner : public LinearFunction< Lattice< Fobj > > {
template<class Fobj, class CComplex, int nbasis, class Matrix> class MultiGridPreconditioner : public LinearFunction<Lattice<Fobj>> {
2017-07-21 12:39:03 +01:00
public:
typedef Aggregation<Fobj, CComplex, nbasis> Aggregates;
typedef CoarsenedMatrix<Fobj, CComplex, nbasis> CoarseOperator;
typedef typename Aggregation<Fobj, CComplex, nbasis>::siteVector siteVector;
typedef typename Aggregation<Fobj, CComplex, nbasis>::CoarseScalar CoarseScalar;
typedef typename Aggregation<Fobj, CComplex, nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj, CComplex, nbasis>::CoarseMatrix CoarseMatrix;
typedef typename Aggregation<Fobj, CComplex, nbasis>::FineField FineField;
typedef LinearOperatorBase<FineField> FineOperator;
Aggregates & _Aggregates;
CoarseOperator &_CoarseOperator;
Matrix & _FineMatrix;
FineOperator & _FineOperator;
Matrix & _SmootherMatrix;
FineOperator & _SmootherOperator;
2017-07-21 12:39:03 +01:00
// Constructor
MultiGridPreconditioner(Aggregates & Agg,
CoarseOperator &Coarse,
FineOperator & Fine,
Matrix & FineMatrix,
FineOperator & Smooth,
Matrix & SmootherMatrix)
: _Aggregates(Agg)
, _CoarseOperator(Coarse)
, _FineOperator(Fine)
, _FineMatrix(FineMatrix)
, _SmootherOperator(Smooth)
, _SmootherMatrix(SmootherMatrix) {}
2017-07-21 12:39:03 +01:00
void PowerMethod(const FineField &in) {
FineField p1(in._grid);
FineField p2(in._grid);
MdagMLinearOperator<Matrix, FineField> fMdagMOp(_FineMatrix);
2017-07-21 12:39:03 +01:00
p1 = in;
2017-07-21 12:39:03 +01:00
RealD absp2;
for(int i = 0; i < 20; i++) {
RealD absp1 = std::sqrt(norm2(p1));
fMdagMOp.HermOp(p1, p2); // this is the G5 herm bit
// _FineOperator.Op(p1,p2); // this is the G5 herm bit
RealD absp2 = std::sqrt(norm2(p2));
if(i % 10 == 9)
std::cout << GridLogMessage << "Power method on mdagm " << i << " " << absp2 / absp1 << std::endl;
p1 = p2 * (1.0 / std::sqrt(absp2));
2017-07-21 12:39:03 +01:00
}
}
void operator()(const FineField &in, FineField &out) {
if(params.domaindecompose) {
operatorSAP(in, out);
} else {
operatorCheby(in, out);
2017-07-21 12:39:03 +01:00
}
}
////////////////////////////////////////////////////////////////////////
// ADEF2: [PTM+Q] in = [1 - Q A] M in + Q in = Min + Q [ in -A Min]
// ADEF1: [MP+Q ] in = M [1 - A Q] in + Q in
2017-07-21 12:39:03 +01:00
////////////////////////////////////////////////////////////////////////
#if 1
void operatorADEF2(const FineField &in, FineField &out) {
2017-07-21 12:39:03 +01:00
CoarseVector Csrc(_CoarseOperator.Grid());
CoarseVector Ctmp(_CoarseOperator.Grid());
CoarseVector Csol(_CoarseOperator.Grid());
ConjugateGradient<CoarseVector> CG(1.0e-10, 100000);
ConjugateGradient<FineField> fCG(3.0e-2, 1000);
2017-07-21 12:39:03 +01:00
HermitianLinearOperator<CoarseOperator, CoarseVector> HermOp(_CoarseOperator);
MdagMLinearOperator<CoarseOperator, CoarseVector> MdagMOp(_CoarseOperator);
MdagMLinearOperator<Matrix, FineField> fMdagMOp(_FineMatrix);
2017-07-21 12:39:03 +01:00
FineField tmp(in._grid);
FineField res(in._grid);
FineField Min(in._grid);
// Monitor completeness of low mode space
_Aggregates.ProjectToSubspace(Csrc, in);
_Aggregates.PromoteFromSubspace(Csrc, out);
std::cout << GridLogMessage << "Coarse Grid Preconditioner\nCompleteness in: " << std::sqrt(norm2(out) / norm2(in)) << std::endl;
2017-07-21 12:39:03 +01:00
// [PTM+Q] in = [1 - Q A] M in + Q in = Min + Q [ in -A Min]
_FineOperator.Op(in, tmp); // this is the G5 herm bit
fCG(fMdagMOp, tmp, Min); // solves MdagM = g5 M g5M
2017-07-21 12:39:03 +01:00
// Monitor completeness of low mode space
_Aggregates.ProjectToSubspace(Csrc, Min);
_Aggregates.PromoteFromSubspace(Csrc, out);
std::cout << GridLogMessage << "Completeness Min: " << std::sqrt(norm2(out) / norm2(Min)) << std::endl;
_FineOperator.Op(Min, tmp);
tmp = in - tmp; // in - A Min
Csol = zero;
_Aggregates.ProjectToSubspace(Csrc, tmp);
HermOp.AdjOp(Csrc, Ctmp); // Normal equations
CG(MdagMOp, Ctmp, Csol);
HermOp.Op(Csol, Ctmp);
Ctmp = Ctmp - Csrc;
std::cout << GridLogMessage << "coarse space true residual " << std::sqrt(norm2(Ctmp) / norm2(Csrc)) << std::endl;
_Aggregates.PromoteFromSubspace(Csol, out);
_FineOperator.Op(out, res);
res = res - tmp;
std::cout << GridLogMessage << "promoted sol residual " << std::sqrt(norm2(res) / norm2(tmp)) << std::endl;
_Aggregates.ProjectToSubspace(Csrc, res);
std::cout << GridLogMessage << "coarse space proj of residual " << norm2(Csrc) << std::endl;
out = out + Min; // additive coarse space correction
2017-07-21 12:39:03 +01:00
// out = Min; // no additive coarse space correction
_FineOperator.Op(out, tmp);
tmp = tmp - in; // tmp is new residual
2017-07-21 12:39:03 +01:00
std::cout << GridLogMessage << " Preconditioner in " << norm2(in) << std::endl;
std::cout << GridLogMessage << " Preconditioner out " << norm2(out) << std::endl;
std::cout << GridLogMessage << "preconditioner thinks residual is " << std::sqrt(norm2(tmp) / norm2(in)) << std::endl;
2017-07-21 12:39:03 +01:00
}
#endif
// ADEF1: [MP+Q ] in = M [1 - A Q] in + Q in
2017-07-21 12:39:03 +01:00
#if 1
void operatorADEF1(const FineField &in, FineField &out) {
2017-07-21 12:39:03 +01:00
CoarseVector Csrc(_CoarseOperator.Grid());
CoarseVector Ctmp(_CoarseOperator.Grid());
CoarseVector Csol(_CoarseOperator.Grid());
Csol = zero;
2017-07-21 12:39:03 +01:00
ConjugateGradient<CoarseVector> CG(1.0e-10, 100000);
ConjugateGradient<FineField> fCG(3.0e-2, 1000);
2017-07-21 12:39:03 +01:00
HermitianLinearOperator<CoarseOperator, CoarseVector> HermOp(_CoarseOperator);
MdagMLinearOperator<CoarseOperator, CoarseVector> MdagMOp(_CoarseOperator);
ShiftedMdagMLinearOperator<Matrix, FineField> fMdagMOp(_FineMatrix, 0.1);
2017-07-21 12:39:03 +01:00
FineField tmp(in._grid);
FineField res(in._grid);
FineField Qin(in._grid);
// Monitor completeness of low mode space
// _Aggregates.ProjectToSubspace (Csrc,in);
// _Aggregates.PromoteFromSubspace(Csrc,out);
// std::cout<<GridLogMessage<<"Coarse Grid Preconditioner\nCompleteness in: "<<std::sqrt(norm2(out)/norm2(in))<<std::endl;
_Aggregates.ProjectToSubspace(Csrc, in);
HermOp.AdjOp(Csrc, Ctmp); // Normal equations
CG(MdagMOp, Ctmp, Csol);
_Aggregates.PromoteFromSubspace(Csol, Qin);
2017-07-21 12:39:03 +01:00
// Qin=0;
_FineOperator.Op(Qin, tmp); // A Q in
tmp = in - tmp; // in - A Q in
2017-07-21 12:39:03 +01:00
_FineOperator.Op(tmp, res); // this is the G5 herm bit
fCG(fMdagMOp, res, out); // solves MdagM = g5 M g5M
2017-07-21 12:39:03 +01:00
out = out + Qin;
_FineOperator.Op(out, tmp);
tmp = tmp - in; // tmp is new residual
2017-07-21 12:39:03 +01:00
std::cout << GridLogMessage << "preconditioner thinks residual is " << std::sqrt(norm2(tmp) / norm2(in)) << std::endl;
2017-07-21 12:39:03 +01:00
}
#endif
void SAP(const FineField &src, FineField &psi) {
Lattice<iScalar<vInteger>> coor(src._grid);
Lattice<iScalar<vInteger>> subset(src._grid);
2017-07-21 12:39:03 +01:00
FineField r(src._grid);
FineField zz(src._grid);
zz = zero;
2017-07-21 12:39:03 +01:00
FineField vec1(src._grid);
FineField vec2(src._grid);
const Integer block = params.domainsize;
2017-07-21 12:39:03 +01:00
subset = zero;
for(int mu = 0; mu < Nd; mu++) {
LatticeCoordinate(coor, mu + 1);
coor = div(coor, block);
subset = subset + coor;
2017-07-21 12:39:03 +01:00
}
subset = mod(subset, (Integer)2);
ShiftedMdagMLinearOperator<Matrix, FineField> fMdagMOp(_SmootherMatrix, 0.0);
Chebyshev<FineField> Cheby(params.lo, params.hi, params.order, InverseApproximation);
2017-07-21 12:39:03 +01:00
RealD resid;
for(int i = 0; i < params.steps; i++) {
2017-07-21 12:39:03 +01:00
// Even domain residual
_FineOperator.Op(psi, vec1); // this is the G5 herm bit
r = src - vec1;
resid = norm2(r) / norm2(src);
std::cout << "SAP " << i << " resid " << resid << std::endl;
2017-07-21 12:39:03 +01:00
// Even domain solve
r = where(subset == (Integer)0, r, zz);
_SmootherOperator.AdjOp(r, vec1);
Cheby(fMdagMOp, vec1, vec2); // solves MdagM = g5 M g5M
psi = psi + vec2;
2017-07-21 12:39:03 +01:00
// Odd domain residual
_FineOperator.Op(psi, vec1); // this is the G5 herm bit
r = src - vec1;
r = where(subset == (Integer)1, r, zz);
2017-07-21 12:39:03 +01:00
resid = norm2(r) / norm2(src);
std::cout << "SAP " << i << " resid " << resid << std::endl;
2017-07-21 12:39:03 +01:00
// Odd domain solve
_SmootherOperator.AdjOp(r, vec1);
Cheby(fMdagMOp, vec1, vec2); // solves MdagM = g5 M g5M
psi = psi + vec2;
_FineOperator.Op(psi, vec1); // this is the G5 herm bit
r = src - vec1;
resid = norm2(r) / norm2(src);
std::cout << "SAP " << i << " resid " << resid << std::endl;
2017-07-21 12:39:03 +01:00
}
};
void SmootherTest(const FineField &in) {
2017-07-21 12:39:03 +01:00
FineField vec1(in._grid);
FineField vec2(in._grid);
RealD lo[3] = {0.5, 1.0, 2.0};
2017-07-21 12:39:03 +01:00
// MdagMLinearOperator<Matrix,FineField> fMdagMOp(_FineMatrix);
ShiftedMdagMLinearOperator<Matrix, FineField> fMdagMOp(_SmootherMatrix, 0.0);
2017-07-21 12:39:03 +01:00
RealD Ni, r;
2017-07-21 12:39:03 +01:00
Ni = norm2(in);
for(int ilo = 0; ilo < 3; ilo++) {
for(int ord = 5; ord < 50; ord *= 2) {
2017-07-21 12:39:03 +01:00
_SmootherOperator.AdjOp(in, vec1);
2017-07-21 12:39:03 +01:00
Chebyshev<FineField> Cheby(lo[ilo], 70.0, ord, InverseApproximation);
Cheby(fMdagMOp, vec1, vec2); // solves MdagM = g5 M g5M
2017-07-21 12:39:03 +01:00
_FineOperator.Op(vec2, vec1); // this is the G5 herm bit
vec1 = in - vec1; // tmp = in - A Min
r = norm2(vec1);
std::cout << GridLogMessage << "Smoother resid " << std::sqrt(r / Ni) << std::endl;
2017-07-21 12:39:03 +01:00
}
}
}
void operatorCheby(const FineField &in, FineField &out) {
2017-07-21 12:39:03 +01:00
CoarseVector Csrc(_CoarseOperator.Grid());
CoarseVector Ctmp(_CoarseOperator.Grid());
CoarseVector Csol(_CoarseOperator.Grid());
Csol = zero;
2017-07-21 12:39:03 +01:00
ConjugateGradient<CoarseVector> CG(3.0e-3, 100000);
2017-07-21 12:39:03 +01:00
// ConjugateGradient<FineField> fCG(3.0e-2,1000);
HermitianLinearOperator<CoarseOperator, CoarseVector> HermOp(_CoarseOperator);
MdagMLinearOperator<CoarseOperator, CoarseVector> MdagMOp(_CoarseOperator);
2017-07-21 12:39:03 +01:00
// MdagMLinearOperator<Matrix,FineField> fMdagMOp(_FineMatrix);
ShiftedMdagMLinearOperator<Matrix, FineField> fMdagMOp(_SmootherMatrix, 0.0);
2017-07-21 12:39:03 +01:00
FineField vec1(in._grid);
FineField vec2(in._grid);
// Chebyshev<FineField> Cheby (0.5,70.0,30,InverseApproximation);
// Chebyshev<FineField> ChebyAccu(0.5,70.0,30,InverseApproximation);
Chebyshev<FineField> Cheby(params.lo, params.hi, params.order, InverseApproximation);
Chebyshev<FineField> ChebyAccu(params.lo, params.hi, params.order, InverseApproximation);
2017-07-21 12:39:03 +01:00
// Cheby.JacksonSmooth();
// ChebyAccu.JacksonSmooth();
// _Aggregates.ProjectToSubspace (Csrc,in);
// _Aggregates.PromoteFromSubspace(Csrc,out);
// std::cout<<GridLogMessage<<"Completeness: "<<std::sqrt(norm2(out)/norm2(in))<<std::endl;
2017-07-21 12:39:03 +01:00
// ofstream fout("smoother");
// Cheby.csv(fout);
// V11 multigrid.
// Use a fixed chebyshev and hope hermiticity helps.
// To make a working smoother for indefinite operator
// must multiply by "Mdag" (ouch loses all low mode content)
// and apply to poly approx of (mdagm)^-1.
// so that we end up with an odd polynomial.
RealD Ni = norm2(in);
_SmootherOperator.AdjOp(in, vec1); // this is the G5 herm bit
ChebyAccu(fMdagMOp, vec1, out); // solves MdagM = g5 M g5M
2017-07-21 12:39:03 +01:00
std::cout << GridLogMessage << "Smoother norm " << norm2(out) << std::endl;
2017-07-21 12:39:03 +01:00
// Update with residual for out
_FineOperator.Op(out, vec1); // this is the G5 herm bit
vec1 = in - vec1; // tmp = in - A Min
2017-07-21 12:39:03 +01:00
RealD r = norm2(vec1);
std::cout << GridLogMessage << "Smoother resid " << std::sqrt(r / Ni) << " " << r << " " << Ni << std::endl;
_Aggregates.ProjectToSubspace(Csrc, vec1);
HermOp.AdjOp(Csrc, Ctmp); // Normal equations
CG(MdagMOp, Ctmp, Csol);
_Aggregates.PromoteFromSubspace(Csol, vec1); // Ass^{-1} [in - A Min]_s
// Q = Q[in - A Min]
out = out + vec1;
2017-07-21 12:39:03 +01:00
// Three preconditioner smoothing -- hermitian if C3 = C1
// Recompute error
_FineOperator.Op(out, vec1); // this is the G5 herm bit
vec1 = in - vec1; // tmp = in - A Min
r = norm2(vec1);
2017-07-21 12:39:03 +01:00
std::cout << GridLogMessage << "Coarse resid " << std::sqrt(r / Ni) << std::endl;
2017-07-21 12:39:03 +01:00
// Reapply smoother
_SmootherOperator.Op(vec1, vec2); // this is the G5 herm bit
ChebyAccu(fMdagMOp, vec2, vec1); // solves MdagM = g5 M g5M
2017-07-21 12:39:03 +01:00
out = out + vec1;
vec1 = in - vec1; // tmp = in - A Min
r = norm2(vec1);
std::cout << GridLogMessage << "Smoother resid " << std::sqrt(r / Ni) << std::endl;
2017-07-21 12:39:03 +01:00
}
void operatorSAP(const FineField &in, FineField &out) {
2017-07-21 12:39:03 +01:00
CoarseVector Csrc(_CoarseOperator.Grid());
CoarseVector Ctmp(_CoarseOperator.Grid());
CoarseVector Csol(_CoarseOperator.Grid());
Csol = zero;
2017-07-21 12:39:03 +01:00
ConjugateGradient<CoarseVector> CG(1.0e-3, 100000);
2017-07-21 12:39:03 +01:00
HermitianLinearOperator<CoarseOperator, CoarseVector> HermOp(_CoarseOperator);
MdagMLinearOperator<CoarseOperator, CoarseVector> MdagMOp(_CoarseOperator);
2017-07-21 12:39:03 +01:00
FineField vec1(in._grid);
FineField vec2(in._grid);
_Aggregates.ProjectToSubspace(Csrc, in);
_Aggregates.PromoteFromSubspace(Csrc, out);
std::cout << GridLogMessage << "Completeness: " << std::sqrt(norm2(out) / norm2(in)) << std::endl;
2017-07-21 12:39:03 +01:00
// To make a working smoother for indefinite operator
// must multiply by "Mdag" (ouch loses all low mode content)
// and apply to poly approx of (mdagm)^-1.
// so that we end up with an odd polynomial.
SAP(in, out);
2017-07-21 12:39:03 +01:00
// Update with residual for out
_FineOperator.Op(out, vec1); // this is the G5 herm bit
vec1 = in - vec1; // tmp = in - A Min
2017-07-21 12:39:03 +01:00
RealD r = norm2(vec1);
2017-07-21 12:39:03 +01:00
RealD Ni = norm2(in);
std::cout << GridLogMessage << "SAP resid " << std::sqrt(r / Ni) << " " << r << " " << Ni << std::endl;
_Aggregates.ProjectToSubspace(Csrc, vec1);
HermOp.AdjOp(Csrc, Ctmp); // Normal equations
CG(MdagMOp, Ctmp, Csol);
_Aggregates.PromoteFromSubspace(Csol, vec1); // Ass^{-1} [in - A Min]_s
// Q = Q[in - A Min]
out = out + vec1;
2017-07-21 12:39:03 +01:00
// Three preconditioner smoothing -- hermitian if C3 = C1
// Recompute error
_FineOperator.Op(out, vec1); // this is the G5 herm bit
vec1 = in - vec1; // tmp = in - A Min
r = norm2(vec1);
2017-07-21 12:39:03 +01:00
std::cout << GridLogMessage << "Coarse resid " << std::sqrt(r / Ni) << std::endl;
2017-07-21 12:39:03 +01:00
// Reapply smoother
SAP(vec1, vec2);
out = out + vec2;
2017-07-21 12:39:03 +01:00
// Update with residual for out
_FineOperator.Op(out, vec1); // this is the G5 herm bit
vec1 = in - vec1; // tmp = in - A Min
2017-07-21 12:39:03 +01:00
r = norm2(vec1);
2017-07-21 12:39:03 +01:00
Ni = norm2(in);
std::cout << GridLogMessage << "SAP resid(post) " << std::sqrt(r / Ni) << " " << r << " " << Ni << std::endl;
2017-07-21 12:39:03 +01:00
}
void runChecks(CoarseGrids<nbasis> &cGrids, int whichCoarseGrid) {
/////////////////////////////////////////////
// Some stuff we need for the checks below //
/////////////////////////////////////////////
auto tolerance = 1e-13; // TODO: this obviously depends on the precision we use, current value is for double
std::vector<CoarseVector> cTmps(4, _CoarseOperator.Grid());
std::vector<FineField> fTmps(2, _Aggregates.subspace[0]._grid); // atm only for one coarser grid
// need to construct an operator, since _CoarseOperator is not a LinearOperator but only a matrix (the name is a bit misleading)
MdagMLinearOperator<CoarseOperator, CoarseVector> MdagMOp(_CoarseOperator);
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "MG correctness check: 0 == (1 - P R) v" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
for(auto i = 0; i < _Aggregates.subspace.size(); ++i) {
_Aggregates.ProjectToSubspace(cTmps[0], _Aggregates.subspace[i]); // R v_i
_Aggregates.PromoteFromSubspace(cTmps[0], fTmps[0]); // P R v_i
fTmps[1] = _Aggregates.subspace[i] - fTmps[0]; // v_i - P R v_i
auto deviation = std::sqrt(norm2(fTmps[1]) / norm2(_Aggregates.subspace[i]));
std::cout << GridLogMessage << "Vector " << i << ": norm2(v_i) = " << norm2(_Aggregates.subspace[i])
<< " | norm2(R v_i) = " << norm2(cTmps[0]) << " | norm2(P R v_i) = " << norm2(fTmps[0])
<< " | relative deviation = " << deviation << std::endl;
if(deviation > tolerance) {
std::cout << GridLogError << "Vector " << i << ": relative deviation check failed " << deviation << " > " << tolerance << std::endl;
abort();
}
}
std::cout << GridLogMessage << "Check passed!" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "MG correctness check: 0 == (1 - R P) v_c" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
random(cGrids.PRNGs[whichCoarseGrid], cTmps[0]);
_Aggregates.PromoteFromSubspace(cTmps[0], fTmps[0]); // P v_c
_Aggregates.ProjectToSubspace(cTmps[1], fTmps[0]); // R P v_c
cTmps[2] = cTmps[0] - cTmps[1]; // v_c - R P v_c
auto deviation = std::sqrt(norm2(cTmps[2]) / norm2(cTmps[0]));
std::cout << GridLogMessage << "norm2(v_c) = " << norm2(cTmps[0]) << " | norm2(R P v_c) = " << norm2(cTmps[1])
<< " | norm2(P v_c) = " << norm2(fTmps[0]) << " | relative deviation = " << deviation << std::endl;
if(deviation > tolerance) {
std::cout << GridLogError << "relative deviation check failed " << deviation << " > " << tolerance << std::endl;
abort();
}
std::cout << GridLogMessage << "Check passed!" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "MG correctness check: 0 == (R D P - D_c) v_c" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
random(cGrids.PRNGs[whichCoarseGrid], cTmps[0]);
_Aggregates.PromoteFromSubspace(cTmps[0], fTmps[0]); // P v_c
_FineOperator.Op(fTmps[0], fTmps[1]); // D P v_c
_Aggregates.ProjectToSubspace(cTmps[1], fTmps[1]); // R D P v_c
MdagMOp.Op(cTmps[0], cTmps[2]); // D_c v_c
cTmps[3] = cTmps[1] - cTmps[2]; // R D P v_c - D_c v_c
deviation = std::sqrt(norm2(cTmps[3]) / norm2(cTmps[1]));
std::cout << GridLogMessage << "norm2(R D P v_c) = " << norm2(cTmps[1]) << " | norm2(D_c v_c) = " << norm2(cTmps[2])
<< " | relative deviation = " << deviation << std::endl;
if(deviation > tolerance) {
std::cout << GridLogError << "relative deviation check failed " << deviation << " > " << tolerance << std::endl;
abort();
}
std::cout << GridLogMessage << "Check passed!" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "MG correctness check: 0 == |(Im(v_c^dag D_c^dag D_c v_c)|" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
random(cGrids.PRNGs[whichCoarseGrid], cTmps[0]);
MdagMOp.Op(cTmps[0], cTmps[1]); // D_c v_c
MdagMOp.AdjOp(cTmps[1], cTmps[2]); // D_c^dag D_c v_c
// // alternative impl, which is better?
// MdagMOp.HermOp(cTmps[0], cTmps[2]); // D_c^dag D_c v_c
auto dot = innerProduct(cTmps[0], cTmps[2]); //v_c^dag D_c^dag D_c v_c
deviation = abs(imag(dot)) / abs(real(dot));
std::cout << GridLogMessage << "Re(v_c^dag D_c^dag D_c v_c) = " << real(dot) << " | Im(v_c^dag D_c^dag D_c v_c) = " << imag(dot)
<< " | relative deviation = " << deviation << std::endl;
if(deviation > tolerance) {
std::cout << GridLogError << "relative deviation check failed " << deviation << " > " << tolerance << std::endl;
abort();
}
std::cout << GridLogMessage << "Check passed!" << std::endl;
}
2017-07-21 12:39:03 +01:00
};
int main(int argc, char **argv) {
Grid_init(&argc, &argv);
2017-07-21 12:39:03 +01:00
params.domainsize = 1;
params.coarsegrids = 1;
params.domaindecompose = 0;
params.order = 30;
params.Ls = 1;
// params.mq = .13;
params.mq = .5;
params.lo = 0.5;
params.hi = 70.0;
2017-07-21 12:39:03 +01:00
params.steps = 1;
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Params: " << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2017-07-21 12:39:03 +01:00
std::cout << params << std::endl;
2017-07-21 12:39:03 +01:00
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Set up some fine level stuff: " << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2017-07-21 12:39:03 +01:00
GridCartesian *FGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
GridRedBlackCartesian *FrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(FGrid);
2017-07-21 12:39:03 +01:00
std::vector<int> fSeeds({1, 2, 3, 4});
GridParallelRNG fPRNG(FGrid);
fPRNG.SeedFixedIntegers(fSeeds);
2017-07-21 12:39:03 +01:00
Gamma g5(Gamma::Algebra::Gamma5);
// clang-format off
LatticeFermion src(FGrid); gaussian(fPRNG, src); // src=src + g5 * src;
LatticeFermion result(FGrid); result = zero;
LatticeFermion ref(FGrid); ref = zero;
LatticeFermion tmp(FGrid);
LatticeFermion err(FGrid);
LatticeGaugeField Umu(FGrid); SU3::HotConfiguration(fPRNG, Umu);
LatticeGaugeField UmuDD(FGrid);
LatticeColourMatrix U(FGrid);
LatticeColourMatrix zz(FGrid);
// clang-format on
if(params.domaindecompose) {
Lattice<iScalar<vInteger>> coor(FGrid);
zz = zero;
for(int mu = 0; mu < Nd; mu++) {
LatticeCoordinate(coor, mu);
U = PeekIndex<LorentzIndex>(Umu, mu);
U = where(mod(coor, params.domainsize) == (Integer)0, zz, U);
PokeIndex<LorentzIndex>(UmuDD, U, mu);
2017-07-21 12:39:03 +01:00
}
} else {
2017-07-21 12:39:03 +01:00
UmuDD = Umu;
}
RealD mass = params.mq;
2017-07-21 12:39:03 +01:00
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Set up some coarser levels stuff: " << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2017-07-21 12:39:03 +01:00
const int nbasis = 20; // we fix the number of test vector to the same
// number on every level for now
//////////////////////////////////////////
// toggle to run two/three level method
//////////////////////////////////////////
// // two-level algorithm
// std::vector<std::vector<int>> blockSizes({{2, 2, 2, 2}});
// CoarseGrids<nbasis> coarseGrids(blockSizes, 1);
// three-level algorithm
std::vector<std::vector<int>> blockSizes({{2, 2, 2, 2}, {2, 2, 1, 1}});
CoarseGrids<nbasis> coarseGrids(blockSizes, 2);
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Building the wilson operator on the fine grid" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2017-07-21 12:39:03 +01:00
WilsonFermionR Dw(Umu, *FGrid, *FrbGrid, mass);
WilsonFermionR DwDD(UmuDD, *FGrid, *FrbGrid, mass);
2017-07-21 12:39:03 +01:00
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Some typedefs" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
// typedefs for transition from fine to first coarsened grid
typedef vSpinColourVector FineSiteVector;
typedef vTComplex CoarseSiteScalar;
typedef Aggregation<FineSiteVector, CoarseSiteScalar, nbasis> Subspace;
typedef CoarsenedMatrix<FineSiteVector, CoarseSiteScalar, nbasis> CoarseOperator;
typedef CoarseOperator::CoarseVector CoarseVector;
typedef CoarseOperator::siteVector CoarseSiteVector;
typedef TestVectorAnalyzer<LatticeFermion, nbasis> FineTVA;
typedef MultiGridPreconditioner<FineSiteVector, CoarseSiteScalar, nbasis, WilsonFermionR> FineMGPreconditioner;
typedef TrivialPrecon<LatticeFermion> FineTrivialPreconditioner;
// typedefs for transition from a coarse to the next coarser grid (some defs remain the same)
typedef Aggregation<CoarseSiteVector, CoarseSiteScalar, nbasis> SubSubSpace;
typedef CoarsenedMatrix<CoarseSiteVector, CoarseSiteScalar, nbasis> CoarseCoarseOperator;
typedef CoarseCoarseOperator::CoarseVector CoarseCoarseVector;
typedef CoarseCoarseOperator::siteVector CoarseCoarseSiteVector;
typedef TestVectorAnalyzer<CoarseVector, nbasis> CoarseTVA;
typedef MultiGridPreconditioner<CoarseSiteVector, CoarseSiteScalar, nbasis, CoarseOperator> CoarseMGPreconditioner;
typedef TrivialPrecon<CoarseVector> CoarseTrivialPreconditioner;
static_assert(std::is_same<CoarseVector, CoarseCoarseVector>::value, "CoarseVector and CoarseCoarseVector must be of the same type");
2017-07-21 12:39:03 +01:00
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Calling Aggregation class to build subspaces" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2017-07-21 12:39:03 +01:00
MdagMLinearOperator<WilsonFermionR, LatticeFermion> FineHermPosdefOp(Dw);
Subspace FineAggregates(coarseGrids.Grids[0], FGrid, 0);
assert((nbasis & 0x1) == 0);
int nb = nbasis / 2;
std::cout << GridLogMessage << " nbasis/2 = " << nb << std::endl;
2017-07-21 12:39:03 +01:00
FineAggregates.CreateSubspace(fPRNG, FineHermPosdefOp /*, nb */); // Don't specify nb to see the orthogonalization check
2017-11-28 14:03:02 +00:00
std::cout << GridLogMessage << "Test vector analysis after initial creation of MG test vectors" << std::endl;
FineTVA fineTVA;
fineTVA(FineHermPosdefOp, FineAggregates.subspace, nb);
2017-07-21 12:39:03 +01:00
for(int n = 0; n < nb; n++) {
FineAggregates.subspace[n + nb] = g5 * FineAggregates.subspace[n];
2017-07-21 12:39:03 +01:00
}
auto coarseSites = 1;
for(auto const &elem : coarseGrids.LattSizes[0]) coarseSites *= elem;
std::cout << GridLogMessage << "Norms of MG test vectors after chiral projection (coarse sites = " << coarseSites << ")" << std::endl;
for(int n = 0; n < nbasis; n++) {
std::cout << GridLogMessage << "vec[" << n << "] = " << norm2(FineAggregates.subspace[n]) << std::endl;
2017-07-21 12:39:03 +01:00
}
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Building coarse representation of Dirac operator" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2017-07-21 12:39:03 +01:00
// using Gamma5HermitianLinearOperator corresponds to working with H = g5 * D
Gamma5HermitianLinearOperator<WilsonFermionR, LatticeFermion> FineHermIndefOp(Dw);
Gamma5HermitianLinearOperator<WilsonFermionR, LatticeFermion> FineHermIndefOpDD(DwDD);
CoarseOperator Dc(*coarseGrids.Grids[0]);
Dc.CoarsenOperator(FGrid, FineHermIndefOp, FineAggregates); // uses only linop.OpDiag & linop.OpDir
std::cout << GridLogMessage << "Test vector analysis after construction of D_c" << std::endl;
fineTVA(FineHermPosdefOp, FineAggregates.subspace, nb);
2017-07-21 12:39:03 +01:00
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Building coarse vectors" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2017-07-21 12:39:03 +01:00
CoarseVector coarseSource(coarseGrids.Grids[0]);
CoarseVector coarseResult(coarseGrids.Grids[0]);
gaussian(coarseGrids.PRNGs[0], coarseSource);
coarseResult = zero;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2018-02-01 11:24:16 +00:00
std::cout << GridLogMessage << "Testing some coarse space solvers" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2017-11-24 09:43:34 +00:00
MdagMLinearOperator<CoarseOperator, CoarseVector> CoarseHermPosdefOp(Dc);
2017-11-24 09:43:34 +00:00
2018-02-01 11:24:16 +00:00
std::vector<std::unique_ptr<OperatorFunction<CoarseVector>>> coarseSolvers;
coarseSolvers.emplace_back(new GeneralisedMinimalResidual<CoarseVector>(5.0e-2, 100, 8, false));
coarseSolvers.emplace_back(new MinimalResidual<CoarseVector>(5.0e-2, 100, false));
coarseSolvers.emplace_back(new ConjugateGradient<CoarseVector>(5.0e-2, 100, false));
2018-02-01 11:24:16 +00:00
for(auto const &solver : coarseSolvers) {
coarseResult = zero;
(*solver)(CoarseHermPosdefOp, coarseSource, coarseResult);
2018-02-01 11:24:16 +00:00
}
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Testing the operators" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "MdagMLinearOperator<WilsonFermionR, LatticeFermion> FineHermPosdefOp(Dw);" << std::endl;
testOperator(FineHermPosdefOp, FGrid);
std::cout << GridLogMessage << "Gamma5HermitianLinearOperator<WilsonFermionR, LatticeFermion> FineHermIndefOp(Dw);" << std::endl;
testOperator(FineHermIndefOp, FGrid);
std::cout << GridLogMessage << "Gamma5HermitianLinearOperator<WilsonFermionR, LatticeFermion> FineHermIndefOpDD(DwDD);" << std::endl;
testOperator(FineHermIndefOpDD, FGrid);
std::cout << GridLogMessage << "MdagMLinearOperator<CoarseOperator, CoarseVector> CoarseHermPosdefOp(Dc);" << std::endl;
testOperator(CoarseHermPosdefOp, coarseGrids.Grids[0]);
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Building deflation preconditioner " << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
FineMGPreconditioner FineMGPrecon(FineAggregates, Dc, FineHermIndefOp, Dw, FineHermIndefOp, Dw);
FineMGPreconditioner FineMGPreconDD(FineAggregates, Dc, FineHermIndefOp, Dw, FineHermIndefOpDD, DwDD);
FineTrivialPreconditioner FineSimplePrecon;
FineMGPrecon.runChecks(coarseGrids, 0);
if(coarseGrids.LattSizes.size() == 2) {
std::cout << GridLogMessage << "**************************************************" << std::endl;
std::cout << GridLogMessage << "Dummy testing for building a second coarse level" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
SubSubSpace CoarseAggregates(coarseGrids.Grids[1], coarseGrids.Grids[0], 0);
CoarseAggregates.CreateSubspace(coarseGrids.PRNGs[0], CoarseHermPosdefOp);
2017-11-24 09:43:34 +00:00
// // this doesn't work because this function applies g5 to a vector, which
// // doesn't work for coarse vectors atm -> FIXME
// CoarseTVA coarseTVA;
// coarseTVA(CoarseHermPosdefOp, CoarseAggregates.subspace, nb);
// // cannot apply g5 to coarse vectors atm -> FIXME
// for(int n=0;n<nb;n++){
// CoarseAggregates.subspace[n+nb] = g5 * CoarseAggregates.subspace[n]; // multiply with g5 normally instead of G5R5 since this specific to DWF
// std::cout<<GridLogMessage<<n<<" subspace "<<norm2(CoarseAggregates.subspace[n+nb])<<" "<<norm2(CoarseAggregates.subspace[n]) <<std::endl;
// }
2017-11-24 09:43:34 +00:00
auto coarseCoarseSites = 1;
for(auto const &elem : coarseGrids.LattSizes[1]) coarseCoarseSites *= elem;
2017-11-24 09:43:34 +00:00
std::cout << GridLogMessage << "Norms of MG test vectors after chiral projection (coarse coarse sites = " << coarseCoarseSites << ")"
<< std::endl;
for(int n = 0; n < nbasis; n++) {
std::cout << GridLogMessage << "vec[" << n << "] = " << norm2(CoarseAggregates.subspace[n]) << std::endl;
}
CoarseCoarseOperator Dcc(*coarseGrids.Grids[1]);
Dcc.CoarsenOperator(coarseGrids.Grids[0], CoarseHermPosdefOp, CoarseAggregates); // uses only linop.OpDiag & linop.OpDir
// // this doesn't work because this function applies g5 to a vector, which
// // doesn't work for coarse vectors atm -> FIXME
// std::cout << GridLogMessage << "Test vector analysis after construction of D_c_c" << std::endl;
// coarseTVA(CoarseHermPosdefOp, CoarseAggregates.subspace, nb);
CoarseCoarseVector coarseCoarseSource(coarseGrids.Grids[1]);
CoarseCoarseVector coarseCoarseResult(coarseGrids.Grids[1]);
gaussian(coarseGrids.PRNGs[1], coarseCoarseSource);
coarseCoarseResult = zero;
MdagMLinearOperator<CoarseCoarseOperator, CoarseCoarseVector> CoarseCoarseHermPosdefOp(Dcc);
2018-02-01 11:24:16 +00:00
std::vector<std::unique_ptr<OperatorFunction<CoarseCoarseVector>>> coarseCoarseSolvers;
coarseSolvers.emplace_back(new GeneralisedMinimalResidual<CoarseCoarseVector>(5.0e-2, 100, 8, false));
coarseSolvers.emplace_back(new MinimalResidual<CoarseCoarseVector>(5.0e-2, 100, false));
coarseSolvers.emplace_back(new ConjugateGradient<CoarseCoarseVector>(5.0e-2, 100, false));
for(auto const &solver : coarseCoarseSolvers) {
coarseCoarseResult = zero;
(*solver)(CoarseCoarseHermPosdefOp, coarseCoarseSource, coarseCoarseResult);
2018-02-01 11:24:16 +00:00
}
CoarseMGPreconditioner CoarseMGPrecon(CoarseAggregates, Dcc, CoarseHermPosdefOp, Dc, CoarseHermPosdefOp, Dc);
CoarseMGPrecon.runChecks(coarseGrids, 1);
std::cout << GridLogMessage << "ARTIFICIAL ABORT" << std::endl;
abort();
}
std::cout << GridLogMessage << "**************************************************" << std::endl;
2018-02-01 11:24:16 +00:00
std::cout << GridLogMessage << "Building VPGCR and FGMRES solvers w/ & w/o MG Preconditioner" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2018-02-01 11:24:16 +00:00
std::vector<std::unique_ptr<OperatorFunction<LatticeFermion>>> solvers;
solvers.emplace_back(new PrecGeneralisedConjugateResidual<LatticeFermion>(1.0e-12, 100, FineMGPrecon, 8, 8));
solvers.emplace_back(new FlexibleGeneralisedMinimalResidual<LatticeFermion>(1.0e-12, 100, FineMGPrecon, 8));
solvers.emplace_back(new PrecGeneralisedConjugateResidual<LatticeFermion>(1.0e-12, 4000000, FineSimplePrecon, 8, 8));
solvers.emplace_back(new FlexibleGeneralisedMinimalResidual<LatticeFermion>(1.0e-12, 4000000, FineSimplePrecon, 8));
std::cout << GridLogMessage << "**************************************************" << std::endl;
2018-02-01 11:24:16 +00:00
std::cout << GridLogMessage << "Testing the solvers" << std::endl;
std::cout << GridLogMessage << "**************************************************" << std::endl;
2018-02-01 11:24:16 +00:00
std::cout << GridLogMessage << "checking norm src " << norm2(src) << std::endl;
2018-02-01 11:24:16 +00:00
for(auto const &solver : solvers) {
result = zero;
2018-02-01 11:24:16 +00:00
(*solver)(FineHermIndefOp, src, result);
}
2017-11-24 09:43:34 +00:00
2017-07-21 12:39:03 +01:00
Grid_finalize();
}