1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-09-20 09:15:38 +01:00

Hadrons: much cleaner eigenpack implementation, to be tested

This commit is contained in:
Antonin Portelli 2018-03-13 13:51:09 +00:00
parent 229977c955
commit f57afe2079
6 changed files with 362 additions and 219 deletions

View File

@ -0,0 +1,213 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: extras/Hadrons/EigenPack.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_EigenPack_hpp_
#define Hadrons_EigenPack_hpp_
#include <Grid/Hadrons/Global.hpp>
#include <Grid/algorithms/iterative/LocalCoherenceLanczos.h>
BEGIN_HADRONS_NAMESPACE
// Lanczos type
#ifndef HADRONS_DEFAULT_LANCZOS_NBASIS
#define HADRONS_DEFAULT_LANCZOS_NBASIS 60
#endif
template <typename Field>
class EigenPack
{
public:
std::vector<RealD> eval;
std::vector<Field> evec;
public:
EigenPack(void) = default;
virtual ~EigenPack(void) = default;
EigenPack(const size_t size, GridBase *grid)
{
resize(size, grid);
}
void resize(const size_t size, GridBase *grid)
{
eval.resize(size);
evec.resize(size, grid);
}
virtual void read(const std::string fileStem, const int traj = -1)
{
std::string evecFilename, evalFilename;
makeFilenames(evecFilename, evalFilename, fileStem, traj);
XmlReader xmlReader(evalFilename);
basicRead(evec, evecFilename, evec.size());
LOG(Message) << "Reading " << eval.size() << " eigenvalues from '"
<< evalFilename << "'" << std::endl;
Grid::read(xmlReader, "evals", eval);
}
virtual void write(const std::string fileStem, const int traj = -1)
{
std::string evecFilename, evalFilename;
makeFilenames(evecFilename, evalFilename, fileStem, traj);
XmlWriter xmlWriter(evalFilename);
basicWrite(evecFilename, evec, evec.size());
LOG(Message) << "Writing " << eval.size() << " eigenvalues to '"
<< evalFilename << "'" << std::endl;
Grid::write(xmlWriter, "evals", eval);
}
protected:
void makeFilenames(std::string &evecFilename, std::string &evalFilename,
const std::string stem, const int traj = -1)
{
std::string t = (traj < 0) ? "" : ("." + std::to_string(traj));
evecFilename = stem + "_evec" + t + ".bin";
evalFilename = stem + "_eval" + t + ".xml";
}
template <typename T>
static void basicRead(std::vector<T> &evec, const std::string filename,
const unsigned int size)
{
emptyUserRecord record;
ScidacReader binReader;
binReader.open(filename);
for(int k = 0; k < size; ++k)
{
binReader.readScidacFieldRecord(evec[k], record);
}
binReader.close();
}
template <typename T>
static void basicWrite(const std::string filename, std::vector<T> &evec,
const unsigned int size)
{
emptyUserRecord record;
ScidacWriter binWriter;
binWriter.open(filename);
for(int k = 0; k < size; ++k)
{
binWriter.writeScidacFieldRecord(evec[k], record);
}
binWriter.close();
}
};
template <typename FineField, typename CoarseField>
class CoarseEigenPack: public EigenPack<FineField>
{
public:
std::vector<RealD> evalCoarse;
std::vector<CoarseField> evecCoarse;
public:
CoarseEigenPack(void) = default;
virtual ~CoarseEigenPack(void) = default;
CoarseEigenPack(const size_t sizeFine, const size_t sizeCoarse,
GridBase *gridFine, GridBase *gridCoarse)
{
resize(sizeFine, sizeCoarse, gridFine, gridCoarse);
}
void resize(const size_t sizeFine, const size_t sizeCoarse,
GridBase *gridFine, GridBase *gridCoarse)
{
EigenPack<FineField>::resize(sizeFine, gridFine);
evalCoarse.resize(sizeCoarse);
evecCoarse.resize(sizeCoarse, gridCoarse);
}
virtual void read(const std::string fileStem, const int traj = -1)
{
std::string evecFineFilename, evalFineFilename;
std::string evecCoarseFilename, evalCoarseFilename;
this->makeFilenames(evecFineFilename, evalFineFilename,
fileStem + "_fine", traj);
this->makeFilenames(evecCoarseFilename, evalCoarseFilename,
fileStem + "_coarse", traj);
XmlReader xmlFineReader(evalFineFilename);
XmlReader xmlCoarseReader(evalCoarseFilename);
LOG(Message) << "Reading " << this->evec.size() << " fine eigenvectors from '"
<< evecFineFilename << "'" << std::endl;
this->basicRead(this->evec, evecFineFilename, this->evec.size());
LOG(Message) << "Reading " << evecCoarse.size() << " coarse eigenvectors from '"
<< evecCoarseFilename << "'" << std::endl;
this->basicRead(evecCoarse, evecCoarseFilename, evecCoarse.size());
LOG(Message) << "Reading " << this->eval.size() << " fine eigenvalues from '"
<< evalFineFilename << "'" << std::endl;
Grid::read(xmlFineReader, "evals", this->eval);
LOG(Message) << "Reading " << evalCoarse.size() << " coarse eigenvalues from '"
<< evalCoarseFilename << "'" << std::endl;
Grid::read(xmlCoarseReader, "evals", evalCoarse);
}
virtual void write(const std::string fileStem, const int traj = -1)
{
std::string evecFineFilename, evalFineFilename;
std::string evecCoarseFilename, evalCoarseFilename;
this->makeFilenames(evecFineFilename, evalFineFilename,
fileStem + "_fine", traj);
this->makeFilenames(evecCoarseFilename, evalCoarseFilename,
fileStem + "_coarse", traj);
XmlWriter xmlFineWriter(evalFineFilename);
XmlWriter xmlCoarseWriter(evalCoarseFilename);
LOG(Message) << "Writing " << this->evec.size() << " fine eigenvectors to '"
<< evecFineFilename << "'" << std::endl;
this->basicWrite(evecFineFilename, this->evec, this->evec.size());
LOG(Message) << "Writing " << evecCoarse.size() << " coarse eigenvectors to '"
<< evecCoarseFilename << "'" << std::endl;
this->basicWrite(evecCoarseFilename, evecCoarse, evecCoarse.size());
LOG(Message) << "Writing " << this->eval.size() << " fine eigenvalues to '"
<< evalFineFilename << "'" << std::endl;
Grid::write(xmlFineWriter, "evals", this->eval);
LOG(Message) << "Writing " << evalCoarse.size() << " coarse eigenvalues to '"
<< evalCoarseFilename << "'" << std::endl;
Grid::write(xmlCoarseWriter, "evals", evalCoarse);
}
};
template <typename FImpl>
using FermionEigenPack = EigenPack<typename FImpl::FermionField>;
template <typename FImpl, int nBasis>
using CoarseFermionEigenPack = CoarseEigenPack<
typename FImpl::FermionField,
typename LocalCoherenceLanczos<typename FImpl::SiteSpinor,
typename FImpl::SiteComplex,
nBasis>::CoarseField>;
END_HADRONS_NAMESPACE
#endif // Hadrons_EigenPack_hpp_

View File

@ -61,7 +61,7 @@ Environment::Environment(void)
// grids ///////////////////////////////////////////////////////////////////////
void Environment::createGrid(const unsigned int Ls)
{
if (grid5d_.find(Ls) == grid5d_.end())
if ((Ls > 1) and (grid5d_.find(Ls) == grid5d_.end()))
{
auto g = getGrid();
@ -70,6 +70,53 @@ void Environment::createGrid(const unsigned int Ls)
}
}
void Environment::createCoarseGrid(const std::vector<int> &blockSize,
const unsigned int Ls)
{
int nd = getNd();
std::vector<int> fineDim = getDim(), coarseDim;
unsigned int cLs;
auto key4d = blockSize, key5d = blockSize;
createGrid(Ls);
coarseDim.resize(nd);
for (int d = 0; d < coarseDim.size(); d++)
{
coarseDim[d] = fineDim[d]/blockSize[d];
if (coarseDim[d]*blockSize[d] != fineDim[d])
{
HADRON_ERROR(Size, "Fine dimension " + std::to_string(d)
+ " (" + std::to_string(fineDim[d])
+ ") not divisible by coarse dimension ("
+ std::to_string(coarseDim[d]) + ")");
}
}
if (blockSize.size() > nd)
{
cLs = Ls/blockSize[nd];
if (cLs*blockSize[nd] != Ls)
{
HADRON_ERROR(Size, "Fine Ls (" + std::to_string(Ls)
+ ") not divisible by coarse Ls ("
+ std::to_string(cLs) + ")");
}
key4d.resize(nd);
key5d.push_back(Ls);
}
gridCoarse4d_[key4d].reset(
SpaceTimeGrid::makeFourDimGrid(coarseDim,
GridDefaultSimd(nd, vComplex::Nsimd()), GridDefaultMpi()));
gridCoarseRb4d_[key4d].reset(
SpaceTimeGrid::makeFourDimRedBlackGrid(gridCoarse4d_[key4d].get()));
if (Ls > 1)
{
gridCoarse5d_[key5d].reset(
SpaceTimeGrid::makeFiveDimGrid(cLs, gridCoarse4d_[key4d].get()));
gridCoarseRb5d_[key5d].reset(
SpaceTimeGrid::makeFiveDimRedBlackGrid(cLs, gridCoarse4d_[key4d].get()));
}
}
GridCartesian * Environment::getGrid(const unsigned int Ls) const
{
try
@ -104,7 +151,55 @@ GridRedBlackCartesian * Environment::getRbGrid(const unsigned int Ls) const
}
catch(std::out_of_range &)
{
HADRON_ERROR(Definition, "no red-black 5D grid with Ls= " + std::to_string(Ls));
HADRON_ERROR(Definition, "no red-black grid with Ls= " + std::to_string(Ls));
}
}
GridCartesian * Environment::getCoarseGrid(
const std::vector<int> &blockSize, const unsigned int Ls) const
{
auto key = blockSize;
try
{
if (Ls == 1)
{
key.resize(getNd());
return gridCoarse4d_.at(key).get();
}
else
{
key.push_back(Ls);
return gridCoarse5d_.at(key).get();
}
}
catch(std::out_of_range &)
{
HADRON_ERROR(Definition, "no coarse grid with Ls= " + std::to_string(Ls));
}
}
GridRedBlackCartesian * Environment::getRbCoarseGrid(
const std::vector<int> &blockSize, const unsigned int Ls) const
{
auto key = blockSize;
try
{
if (Ls == 1)
{
key.resize(getNd());
return gridCoarseRb4d_.at(key).get();
}
else
{
key.push_back(Ls);
return gridCoarseRb5d_.at(key).get();
}
}
catch(std::out_of_range &)
{
HADRON_ERROR(Definition, "no coarse red-black grid with Ls= " + std::to_string(Ls));
}
}

View File

@ -86,8 +86,14 @@ private:
public:
// grids
void createGrid(const unsigned int Ls);
void createCoarseGrid(const std::vector<int> &blockSize,
const unsigned int Ls = 1);
GridCartesian * getGrid(const unsigned int Ls = 1) const;
GridRedBlackCartesian * getRbGrid(const unsigned int Ls = 1) const;
GridCartesian * getCoarseGrid(const std::vector<int> &blockSize,
const unsigned int Ls = 1) const;
GridRedBlackCartesian * getRbCoarseGrid(const std::vector<int> &blockSize,
const unsigned int Ls = 1) const;
std::vector<int> getDim(void) const;
int getDim(const unsigned int mu) const;
unsigned long int getLocalVolume(void) const;
@ -155,6 +161,10 @@ private:
std::map<unsigned int, GridPt> grid5d_;
GridRbPt gridRb4d_;
std::map<unsigned int, GridRbPt> gridRb5d_;
std::map<std::vector<int>, GridPt> gridCoarse4d_;
std::map<std::vector<int>, GridRbPt> gridCoarseRb4d_;
std::map<std::vector<int>, GridPt> gridCoarse5d_;
std::map<std::vector<int>, GridRbPt> gridCoarseRb5d_;
unsigned int nd_;
// random number generator
RngPt rng4d_;

View File

@ -1,115 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: extras/Hadrons/LanczosUtils.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_LanczosUtils_hpp_
#define Hadrons_LanczosUtils_hpp_
#include <Grid/Hadrons/Global.hpp>
#include <Grid/algorithms/iterative/LocalCoherenceLanczos.h>
BEGIN_HADRONS_NAMESPACE
// Lanczos type
#ifndef HADRONS_DEFAULT_LANCZOS_NBASIS
#define HADRONS_DEFAULT_LANCZOS_NBASIS 60
#endif
template <typename T>
struct EigenPack
{
typedef T VectorType;
std::vector<RealD> eval;
std::vector<T> evec;
EigenPack(void) = default;
EigenPack(const size_t size, GridBase *grid)
{
resize(size, grid);
}
void resize(const size_t size, GridBase *grid)
{
eval.resize(size);
evec.resize(size, grid);
}
void read(const std::string fileStem)
{
std::string evecFilename = fileStem + "_evec.bin";
std::string evalFilename = fileStem + "_eval.xml";
emptyUserRecord record;
ScidacReader binReader;
XmlReader xmlReader(evalFilename);
LOG(Message) << "Reading " << evec.size() << " eigenvectors from '"
<< evecFilename << "'" << std::endl;
binReader.open(evecFilename);
for(int k = 0; k < evec.size(); ++k)
{
binReader.readScidacFieldRecord(evec[k], record);
}
binReader.close();
LOG(Message) << "Reading " << eval.size() << " eigenvalues from '"
<< evalFilename << "'" << std::endl;
Grid::read(xmlReader, "evals", eval);
}
void write(const std::string fileStem)
{
std::string evecFilename = fileStem + "_evec.bin";
std::string evalFilename = fileStem + "_eval.xml";
emptyUserRecord record;
ScidacWriter binWriter;
XmlWriter xmlWriter(evalFilename);
LOG(Message) << "Writing " << evec.size() << " eigenvectors to '"
<< evecFilename << "'" << std::endl;
binWriter.open(fileStem + "_evec.bin");
for(int k = 0; k < evec.size(); ++k)
{
binWriter.writeScidacFieldRecord(evec[k], record);
}
binWriter.close();
LOG(Message) << "Writing " << eval.size() << " eigenvalues to '"
<< evalFilename << "'" << std::endl;
Grid::write(xmlWriter, "evals", eval);
}
};
template <typename FImpl>
using FineEigenPack = EigenPack<typename FImpl::FermionField>;
template <typename FImpl, int nBasis>
using CoarseEigenPack = EigenPack<
typename LocalCoherenceLanczos<typename FImpl::SiteSpinor,
typename FImpl::SiteComplex,
nBasis>::CoarseField>;
END_HADRONS_NAMESPACE
#endif // Hadrons_LanczosUtils_hpp_

View File

@ -21,7 +21,7 @@ nobase_libHadrons_a_HEADERS = \
GeneticScheduler.hpp \
Global.hpp \
Graph.hpp \
LanczosUtils.hpp \
EigenPack.hpp \
Module.hpp \
Modules.hpp \
ModuleFactory.hpp \

View File

@ -31,7 +31,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/LanczosUtils.hpp>
#include <Grid/Hadrons/EigenPack.hpp>
BEGIN_HADRONS_NAMESPACE
@ -45,8 +45,7 @@ class LocalCoherenceLanczosPar: Serializable
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(LocalCoherenceLanczosPar,
std::string, action,
int, doFine,
int, doCoarse,
bool, doCoarse,
LanczosParams, fineParams,
LanczosParams, coarseParams,
ChebyParams, smoother,
@ -63,8 +62,8 @@ public:
typedef LocalCoherenceLanczos<typename FImpl::SiteSpinor,
typename FImpl::SiteComplex,
nBasis> LCL;
typedef FineEigenPack<FImpl> FinePack;
typedef CoarseEigenPack<FImpl, nBasis> CoarsePack;
typedef FermionEigenPack<FImpl> BasePack;
typedef CoarseFermionEigenPack<FImpl, nBasis> CoarsePack;
typedef HADRONS_DEFAULT_SCHUR_OP<FMat, FermionField> SchurFMat;
public:
// constructor
@ -79,15 +78,7 @@ public:
// execution
virtual void execute(void);
private:
void makeCoarseGrid(void);
private:
std::vector<int> coarseDim_;
int Ls_, cLs_{1};
std::unique_ptr<GridCartesian> coarseGrid4_{nullptr};
std::unique_ptr<GridCartesian> coarseGrid_{nullptr};
std::unique_ptr<GridRedBlackCartesian> coarseGrid4Rb_{nullptr};
std::unique_ptr<GridRedBlackCartesian> coarseGridRb_{nullptr};
std::string fineName_, coarseName_;
std::string fineName_, coarseName_;
};
MODULE_REGISTER_NS(LocalCoherenceLanczos,
@ -127,55 +118,6 @@ std::vector<std::string> TLocalCoherenceLanczos<FImpl, nBasis>::getOutput(void)
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl, int nBasis>
void TLocalCoherenceLanczos<FImpl, nBasis>::makeCoarseGrid(void)
{
int nd = env().getNd();
std::vector<int> blockSize = strToVec<int>(par().blockSize);
auto fineDim = env().getDim();
Ls_ = env().getObjectLs(par().action);
env().createGrid(Ls_);
coarseDim_.resize(nd);
for (int d = 0; d < coarseDim_.size(); d++)
{
coarseDim_[d] = fineDim[d]/blockSize[d];
if (coarseDim_[d]*blockSize[d] != fineDim[d])
{
HADRON_ERROR(Size, "Fine dimension " + std::to_string(d)
+ " (" + std::to_string(fineDim[d])
+ ") not divisible by coarse dimension ("
+ std::to_string(coarseDim_[d]) + ")");
}
}
if (blockSize.size() > nd)
{
cLs_ = Ls_/blockSize[nd];
if (cLs_*blockSize[nd] != Ls_)
{
HADRON_ERROR(Size, "Fine Ls (" + std::to_string(Ls_)
+ ") not divisible by coarse Ls ("
+ std::to_string(cLs_) + ")");
}
}
if (Ls_ > 1)
{
coarseGrid4_.reset(SpaceTimeGrid::makeFourDimGrid(
coarseDim_, GridDefaultSimd(nd, vComplex::Nsimd()),
GridDefaultMpi()));
coarseGrid4Rb_.reset(SpaceTimeGrid::makeFourDimRedBlackGrid(coarseGrid4_.get()));
coarseGrid_.reset(SpaceTimeGrid::makeFiveDimGrid(cLs_, coarseGrid4_.get()));
coarseGridRb_.reset(SpaceTimeGrid::makeFiveDimRedBlackGrid(cLs_, coarseGrid4_.get()));
}
else
{
coarseGrid_.reset(SpaceTimeGrid::makeFourDimGrid(
coarseDim_, GridDefaultSimd(nd, vComplex::Nsimd()),
GridDefaultMpi()));
coarseGridRb_.reset(SpaceTimeGrid::makeFourDimRedBlackGrid(coarseGrid_.get()));
}
}
template <typename FImpl, int nBasis>
void TLocalCoherenceLanczos<FImpl, nBasis>::setup(void)
{
@ -183,19 +125,25 @@ void TLocalCoherenceLanczos<FImpl, nBasis>::setup(void)
<< " action '" << par().action << "' (" << nBasis
<< " eigenvectors)..." << std::endl;
if (!coarseGrid_)
{
makeCoarseGrid();
}
LOG(Message) << "Coarse grid: " << coarseGrid_->GlobalDimensions() << std::endl;
envCreate(FinePack, fineName_, Ls_, par().fineParams.Nm, env().getRbGrid(Ls_));
envCreate(CoarsePack, coarseName_, Ls_, par().coarseParams.Nm, coarseGridRb_.get());
auto &fine = envGet(FinePack, fineName_);
auto &coarse = envGet(CoarsePack, coarseName_);
envTmp(SchurFMat, "mat", Ls_, envGet(FMat, par().action));
unsigned int Ls = env().getObjectLs(par().action);
auto blockSize = strToVec<int>(par().blockSize);
env().createCoarseGrid(blockSize, Ls);
auto cg = env().getCoarseGrid(blockSize, Ls);
auto cgrb = env().getRbCoarseGrid(blockSize, Ls);
int cNm = (par().doCoarse) ? par().coarseParams.Nm : 0;
LOG(Message) << "Coarse grid: " << cg->GlobalDimensions() << std::endl;
envCreateDerived(BasePack, CoarsePack, getName(), Ls,
par().fineParams.Nm, cNm, env().getRbGrid(Ls), cgrb);
auto &epack = envGet(CoarsePack, getName());
envTmp(SchurFMat, "mat", Ls, envGet(FMat, par().action));
envGetTmp(SchurFMat, mat);
envTmp(LCL, "solver", Ls_, env().getRbGrid(Ls_), coarseGridRb_.get(), mat,
Odd, fine.evec, coarse.evec, fine.eval, coarse.eval);
envTmp(LCL, "solver", Ls, env().getRbGrid(Ls), cgrb, mat,
Odd, epack.evec, epack.evecCoarse, epack.eval, epack.evalCoarse);
}
// execution ///////////////////////////////////////////////////////////////////
@ -204,41 +152,33 @@ void TLocalCoherenceLanczos<FImpl, nBasis>::execute(void)
{
auto &finePar = par().fineParams;
auto &coarsePar = par().coarseParams;
auto &fine = envGet(FinePack, fineName_);
auto &coarse = envGet(CoarsePack, coarseName_);
auto &epack = envGet(CoarsePack, getName());
envGetTmp(LCL, solver);
if (par().doFine)
{
LOG(Message) << "Performing fine grid IRL -- Nstop= "
<< finePar.Nstop << ", Nk= " << finePar.Nk << ", Nm= "
<< finePar.Nm << std::endl;
solver.calcFine(finePar.Cheby, finePar.Nstop, finePar.Nk, finePar.Nm,
finePar.resid,finePar.MaxIt, finePar.betastp,
finePar.MinRes);
solver.testFine(finePar.resid*100.0);
LOG(Message) << "Orthogonalising" << std::endl;
solver.Orthogonalise();
if (!par().output.empty())
{
fine.write(par().output + "_fine");
}
}
LOG(Message) << "Performing fine grid IRL -- Nstop= "
<< finePar.Nstop << ", Nk= " << finePar.Nk << ", Nm= "
<< finePar.Nm << std::endl;
solver.calcFine(finePar.Cheby, finePar.Nstop, finePar.Nk, finePar.Nm,
finePar.resid,finePar.MaxIt, finePar.betastp,
finePar.MinRes);
solver.testFine(finePar.resid*100.0);
if (par().doCoarse)
{
LOG(Message) << "Orthogonalising" << std::endl;
solver.Orthogonalise();
LOG(Message) << "Performing coarse grid IRL -- Nstop= "
<< coarsePar.Nstop << ", Nk= " << coarsePar.Nk << ", Nm= "
<< coarsePar.Nm << std::endl;
<< coarsePar.Nstop << ", Nk= " << coarsePar.Nk << ", Nm= "
<< coarsePar.Nm << std::endl;
solver.calcCoarse(coarsePar.Cheby, par().smoother, par().coarseRelaxTol,
coarsePar.Nstop, coarsePar.Nk, coarsePar.Nm,
coarsePar.Nstop, coarsePar.Nk, coarsePar.Nm,
coarsePar.resid, coarsePar.MaxIt, coarsePar.betastp,
coarsePar.MinRes);
solver.testCoarse(coarsePar.resid*100.0, par().smoother,
par().coarseRelaxTol);
if (!par().output.empty())
{
coarse.write(par().output + "_coarse");
}
par().coarseRelaxTol);
}
if (!par().output.empty())
{
epack.write(par().output, vm().getTrajectory());
}
}