mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-03 18:55:56 +01:00
Allowed EOFA to accept different solvers for the L and R operations in the heatbath step Fixed EOFA Meofa operating on member Phi rather than input field Added derived EOFA pseudofermion variant that allows for mixed prec CG to be used in the heatbath Added forces/Test_mobius_gparity_eofa_mixed testing the above reproduces the regular EOFA To Test_gamma, added checks for the various properties of the charge conjugation matrix C=-gamma2*gamma4 in Grid basis
704 lines
29 KiB
C++
704 lines
29 KiB
C++
/*************************************************************************************
|
|
|
|
Grid physics library, www.github.com/paboyle/Grid
|
|
|
|
Source file: ./HMC/Mobius2p1fIDSDRGparityEOFA.cc
|
|
|
|
Copyright (C) 2015-2016
|
|
|
|
Author: Christopher Kelly <ckelly@bnl.gov>
|
|
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License along
|
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
See the full license in the file "LICENSE" in the top level distribution
|
|
directory
|
|
*************************************************************************************/
|
|
/* END LEGAL */
|
|
#include <Grid/Grid.h>
|
|
|
|
using namespace Grid;
|
|
|
|
//We try to reproduce with G-parity BCs the 246 MeV 1.37 GeV ensemble
|
|
//To speed things up we will use Mobius DWF with b+c=32/12 and Ls=12 to match the Ls=32 of the original
|
|
//These parameters match those used in the 2020 K->pipi paper
|
|
|
|
struct RatQuoParameters: Serializable {
|
|
GRID_SERIALIZABLE_CLASS_MEMBERS(RatQuoParameters,
|
|
double, bnd_lo,
|
|
double, bnd_hi,
|
|
Integer, action_degree,
|
|
double, action_tolerance,
|
|
Integer, md_degree,
|
|
double, md_tolerance,
|
|
Integer, reliable_update_freq,
|
|
Integer, bnd_check_freq);
|
|
RatQuoParameters() {
|
|
bnd_lo = 1e-2;
|
|
bnd_hi = 30;
|
|
action_degree = 10;
|
|
action_tolerance = 1e-10;
|
|
md_degree = 10;
|
|
md_tolerance = 1e-8;
|
|
bnd_check_freq = 20;
|
|
reliable_update_freq = 50;
|
|
}
|
|
|
|
void Export(RationalActionParams &into) const{
|
|
into.lo = bnd_lo;
|
|
into.hi = bnd_hi;
|
|
into.action_degree = action_degree;
|
|
into.action_tolerance = action_tolerance;
|
|
into.md_degree = md_degree;
|
|
into.md_tolerance = md_tolerance;
|
|
into.BoundsCheckFreq = bnd_check_freq;
|
|
}
|
|
};
|
|
|
|
struct EOFAparameters: Serializable {
|
|
GRID_SERIALIZABLE_CLASS_MEMBERS(EOFAparameters,
|
|
OneFlavourRationalParams, rat_params,
|
|
double, action_tolerance,
|
|
double, action_mixcg_inner_tolerance,
|
|
double, md_tolerance,
|
|
double, md_mixcg_inner_tolerance);
|
|
|
|
EOFAparameters() {
|
|
action_mixcg_inner_tolerance = 1e-8;
|
|
action_tolerance = 1e-10;
|
|
md_tolerance = 1e-8;
|
|
md_mixcg_inner_tolerance = 1e-8;
|
|
|
|
rat_params.lo = 0.1;
|
|
rat_params.hi = 25.0;
|
|
rat_params.MaxIter = 10000;
|
|
rat_params.tolerance= 1.0e-9;
|
|
rat_params.degree = 14;
|
|
rat_params.precision= 50;
|
|
}
|
|
};
|
|
|
|
struct EvolParameters: Serializable {
|
|
GRID_SERIALIZABLE_CLASS_MEMBERS(EvolParameters,
|
|
Integer, StartTrajectory,
|
|
Integer, Trajectories,
|
|
Integer, SaveInterval,
|
|
Integer, Steps,
|
|
bool, MetropolisTest,
|
|
std::string, StartingType,
|
|
std::vector<Integer>, GparityDirs,
|
|
EOFAparameters, eofa_l,
|
|
RatQuoParameters, rat_quo_s,
|
|
RatQuoParameters, rat_quo_DSDR);
|
|
|
|
EvolParameters() {
|
|
//For initial thermalization; afterwards user should switch Metropolis on and use StartingType=CheckpointStart
|
|
MetropolisTest = false;
|
|
StartTrajectory = 0;
|
|
Trajectories = 50;
|
|
SaveInterval = 5;
|
|
StartingType = "ColdStart";
|
|
GparityDirs.resize(3, 1); //1 for G-parity, 0 for periodic
|
|
Steps = 5;
|
|
}
|
|
};
|
|
|
|
bool fileExists(const std::string &fn){
|
|
std::ifstream f(fn);
|
|
return f.good();
|
|
}
|
|
|
|
|
|
|
|
|
|
struct LanczosParameters: Serializable {
|
|
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
|
|
double, alpha,
|
|
double, beta,
|
|
double, mu,
|
|
int, ord,
|
|
int, n_stop,
|
|
int, n_want,
|
|
int, n_use,
|
|
double, tolerance);
|
|
|
|
LanczosParameters() {
|
|
alpha = 35;
|
|
beta = 5;
|
|
mu = 0;
|
|
ord = 100;
|
|
n_stop = 10;
|
|
n_want = 10;
|
|
n_use = 15;
|
|
tolerance = 1e-6;
|
|
}
|
|
};
|
|
|
|
|
|
|
|
template<typename FermionActionD, typename FermionFieldD>
|
|
void computeEigenvalues(std::string param_file,
|
|
GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
|
FermionActionD &action, GridParallelRNG &rng){
|
|
|
|
LanczosParameters params;
|
|
if(fileExists(param_file)){
|
|
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
|
Grid::XmlReader rd(param_file);
|
|
read(rd, "LanczosParameters", params);
|
|
}else if(!GlobalSharedMemory::WorldRank){
|
|
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
|
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
|
Grid::XmlWriter wr(param_file + ".templ");
|
|
write(wr, "LanczosParameters", params);
|
|
}
|
|
|
|
FermionFieldD gauss_o(rbGrid);
|
|
FermionFieldD gauss(Grid);
|
|
gaussian(rng, gauss);
|
|
pickCheckerboard(Odd, gauss_o, gauss);
|
|
|
|
action.ImportGauge(latt);
|
|
|
|
SchurDiagMooeeOperator<FermionActionD, FermionFieldD> hermop(action);
|
|
PlainHermOp<FermionFieldD> hermop_wrap(hermop);
|
|
//ChebyshevLanczos<FermionFieldD> Cheb(params.alpha, params.beta, params.mu, params.ord);
|
|
assert(params.mu == 0.0);
|
|
|
|
Chebyshev<FermionFieldD> Cheb(params.beta*params.beta, params.alpha*params.alpha, params.ord+1);
|
|
FunctionHermOp<FermionFieldD> Cheb_wrap(Cheb, hermop);
|
|
|
|
std::cout << "IRL: alpha=" << params.alpha << " beta=" << params.beta << " mu=" << params.mu << " ord=" << params.ord << std::endl;
|
|
ImplicitlyRestartedLanczos<FermionFieldD> IRL(Cheb_wrap, hermop_wrap, params.n_stop, params.n_want, params.n_use, params.tolerance, 10000);
|
|
|
|
std::vector<RealD> eval(params.n_use);
|
|
std::vector<FermionFieldD> evec(params.n_use, rbGrid);
|
|
int Nconv;
|
|
IRL.calc(eval, evec, gauss_o, Nconv);
|
|
|
|
std::cout << "Eigenvalues:" << std::endl;
|
|
for(int i=0;i<params.n_want;i++){
|
|
std::cout << i << " " << eval[i] << std::endl;
|
|
}
|
|
}
|
|
|
|
|
|
//Check the quality of the RHMC approx
|
|
template<typename FermionActionD, typename FermionFieldD, typename RHMCtype>
|
|
void checkRHMC(GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
|
FermionActionD &numOp, FermionActionD &denOp, RHMCtype &rhmc, GridParallelRNG &rng,
|
|
int inv_pow, const std::string &quark_descr){
|
|
|
|
FermionFieldD gauss_o(rbGrid);
|
|
FermionFieldD gauss(Grid);
|
|
gaussian(rng, gauss);
|
|
pickCheckerboard(Odd, gauss_o, gauss);
|
|
|
|
numOp.ImportGauge(latt);
|
|
denOp.ImportGauge(latt);
|
|
|
|
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
|
SchurDifferentiableOperator<FermionImplPolicyD> MdagM(numOp);
|
|
SchurDifferentiableOperator<FermionImplPolicyD> VdagV(denOp);
|
|
|
|
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
|
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerAction); //use large tolerance to prevent exit on fail; we are trying to tune here!
|
|
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
|
|
|
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
|
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
|
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
|
|
|
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
|
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerAction);
|
|
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
|
|
|
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
|
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
|
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
|
|
|
std::cout << "-------------------------------------------------------------------------------" << std::endl;
|
|
|
|
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
|
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerMD);
|
|
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
|
|
|
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
|
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
|
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
|
|
|
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
|
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerMD);
|
|
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
|
|
|
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
|
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
|
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
|
}
|
|
|
|
|
|
template<typename FermionImplPolicy>
|
|
void checkEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
|
GridCartesian* FGrid, GridParallelRNG &rng, const LatticeGaugeFieldD &latt){
|
|
std::cout << GridLogMessage << "Starting EOFA action/bounds check" << std::endl;
|
|
typename FermionImplPolicy::FermionField eta(FGrid);
|
|
RealD scale = std::sqrt(0.5);
|
|
gaussian(rng,eta); eta = eta * scale;
|
|
|
|
//Use the inbuilt check
|
|
EOFA.refresh(latt, eta);
|
|
EOFA.S(latt);
|
|
std::cout << GridLogMessage << "Finished EOFA upper action/bounds check" << std::endl;
|
|
}
|
|
|
|
|
|
template<typename FermionImplPolicy>
|
|
class EOFAlinop: public LinearOperatorBase<typename FermionImplPolicy::FermionField>{
|
|
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA;
|
|
LatticeGaugeFieldD &U;
|
|
public:
|
|
EOFAlinop(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA, LatticeGaugeFieldD &U): EOFA(EOFA), U(U){}
|
|
|
|
typedef typename FermionImplPolicy::FermionField Field;
|
|
void OpDiag (const Field &in, Field &out){ assert(0); }
|
|
void OpDir (const Field &in, Field &out,int dir,int disp){ assert(0); }
|
|
void OpDirAll (const Field &in, std::vector<Field> &out){ assert(0); }
|
|
|
|
void Op (const Field &in, Field &out){ assert(0); }
|
|
void AdjOp (const Field &in, Field &out){ assert(0); }
|
|
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); }
|
|
void HermOp(const Field &in, Field &out){ EOFA.Meofa(U, in, out); }
|
|
};
|
|
|
|
template<typename FermionImplPolicy>
|
|
void upperBoundEOFA(ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> &EOFA,
|
|
GridCartesian* FGrid, GridParallelRNG &rng, LatticeGaugeFieldD &latt){
|
|
std::cout << GridLogMessage << "Starting EOFA upper bound compute" << std::endl;
|
|
EOFAlinop<FermionImplPolicy> linop(EOFA, latt);
|
|
typename FermionImplPolicy::FermionField eta(FGrid);
|
|
gaussian(rng,eta);
|
|
PowerMethod<typename FermionImplPolicy::FermionField> power_method;
|
|
auto lambda_max = power_method(linop,eta);
|
|
std::cout << GridLogMessage << "Upper bound of EOFA operator " << lambda_max << std::endl;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
NAMESPACE_BEGIN(Grid);
|
|
|
|
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
|
|
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
|
|
public:
|
|
typedef typename FermionOperatorD::FermionField FieldD;
|
|
typedef typename FermionOperatorF::FermionField FieldF;
|
|
|
|
using OperatorFunction<FieldD>::operator();
|
|
|
|
RealD Tolerance;
|
|
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
|
|
Integer MaxInnerIterations;
|
|
Integer MaxOuterIterations;
|
|
GridBase* SinglePrecGrid4; //Grid for single-precision fields
|
|
GridBase* SinglePrecGrid5; //Grid for single-precision fields
|
|
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
|
|
|
|
FermionOperatorF &FermOpF;
|
|
FermionOperatorD &FermOpD;;
|
|
SchurOperatorF &LinOpF;
|
|
SchurOperatorD &LinOpD;
|
|
|
|
Integer TotalInnerIterations; //Number of inner CG iterations
|
|
Integer TotalOuterIterations; //Number of restarts
|
|
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
|
|
|
|
MixedPrecisionConjugateGradientOperatorFunction(RealD tol,
|
|
Integer maxinnerit,
|
|
Integer maxouterit,
|
|
GridBase* _sp_grid4,
|
|
GridBase* _sp_grid5,
|
|
FermionOperatorF &_FermOpF,
|
|
FermionOperatorD &_FermOpD,
|
|
SchurOperatorF &_LinOpF,
|
|
SchurOperatorD &_LinOpD):
|
|
LinOpF(_LinOpF),
|
|
LinOpD(_LinOpD),
|
|
FermOpF(_FermOpF),
|
|
FermOpD(_FermOpD),
|
|
Tolerance(tol),
|
|
InnerTolerance(tol),
|
|
MaxInnerIterations(maxinnerit),
|
|
MaxOuterIterations(maxouterit),
|
|
SinglePrecGrid4(_sp_grid4),
|
|
SinglePrecGrid5(_sp_grid5),
|
|
OuterLoopNormMult(100.)
|
|
{
|
|
};
|
|
|
|
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
|
|
|
|
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
|
|
|
|
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
|
|
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
|
|
|
|
precisionChange(FermOpF.Umu, FermOpD.Umu);
|
|
|
|
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
|
|
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
|
|
|
|
////////////////////////////////////////////////////////////////////////////////////
|
|
// Make a mixed precision conjugate gradient
|
|
////////////////////////////////////////////////////////////////////////////////////
|
|
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid5,LinOpF,LinOpD);
|
|
MPCG.InnerTolerance = InnerTolerance;
|
|
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient" <<std::endl;
|
|
MPCG(src,psi);
|
|
}
|
|
};
|
|
|
|
NAMESPACE_END(Grid);
|
|
|
|
|
|
|
|
|
|
|
|
int main(int argc, char **argv) {
|
|
Grid_init(&argc, &argv);
|
|
int threads = GridThread::GetThreads();
|
|
// here make a routine to print all the relevant information on the run
|
|
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
|
|
|
|
std::string param_file = "params.xml";
|
|
bool file_load_check = false;
|
|
for(int i=1;i<argc;i++){
|
|
std::string sarg(argv[i]);
|
|
if(sarg == "--param_file"){
|
|
assert(i!=argc-1);
|
|
param_file = argv[i+1];
|
|
}else if(sarg == "--read_check"){ //check the fields load correctly and pass checksum/plaquette repro
|
|
file_load_check = true;
|
|
}
|
|
}
|
|
|
|
//Read the user parameters
|
|
EvolParameters user_params;
|
|
|
|
if(fileExists(param_file)){
|
|
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
|
Grid::XmlReader rd(param_file);
|
|
read(rd, "Params", user_params);
|
|
}else if(!GlobalSharedMemory::WorldRank){
|
|
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
|
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
|
{
|
|
Grid::XmlWriter wr(param_file + ".templ");
|
|
write(wr, "Params", user_params);
|
|
}
|
|
std::cout << GridLogMessage << " Done" << std::endl;
|
|
Grid_finalize();
|
|
return 0;
|
|
}
|
|
|
|
//Check the parameters
|
|
if(user_params.GparityDirs.size() != Nd-1){
|
|
std::cerr << "Error in input parameters: expect GparityDirs to have size = " << Nd-1 << std::endl;
|
|
exit(1);
|
|
}
|
|
for(int i=0;i<Nd-1;i++)
|
|
if(user_params.GparityDirs[i] != 0 && user_params.GparityDirs[i] != 1){
|
|
std::cerr << "Error in input parameters: expect GparityDirs values to be 0 (periodic) or 1 (G-parity)" << std::endl;
|
|
exit(1);
|
|
}
|
|
|
|
|
|
typedef GparityMobiusEOFAFermionD EOFAactionD;
|
|
typedef GparityMobiusFermionD FermionActionD;
|
|
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
|
typedef typename FermionActionD::FermionField FermionFieldD;
|
|
|
|
typedef GparityMobiusEOFAFermionF EOFAactionF;
|
|
typedef GparityMobiusFermionF FermionActionF;
|
|
typedef typename FermionActionF::Impl_t FermionImplPolicyF;
|
|
typedef typename FermionActionF::FermionField FermionFieldF;
|
|
|
|
typedef GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicyD,FermionImplPolicyF> MixedPrecRHMC;
|
|
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicyD> DoublePrecRHMC;
|
|
|
|
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
|
IntegratorParameters MD;
|
|
typedef ConjugateHMCRunnerD<MinimumNorm2> HMCWrapper; //NB: This is the "Omelyan integrator"
|
|
typedef HMCWrapper::ImplPolicy GaugeImplPolicy;
|
|
MD.name = std::string("MinimumNorm2");
|
|
MD.MDsteps = user_params.Steps;
|
|
MD.trajL = 1.0;
|
|
|
|
HMCparameters HMCparams;
|
|
HMCparams.StartTrajectory = user_params.StartTrajectory;
|
|
HMCparams.Trajectories = user_params.Trajectories;
|
|
HMCparams.NoMetropolisUntil= 0;
|
|
HMCparams.StartingType = user_params.StartingType;
|
|
HMCparams.MetropolisTest = user_params.MetropolisTest;
|
|
HMCparams.MD = MD;
|
|
HMCWrapper TheHMC(HMCparams);
|
|
|
|
// Grid from the command line arguments --grid and --mpi
|
|
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
|
|
|
|
CheckpointerParameters CPparams;
|
|
CPparams.config_prefix = "ckpoint_lat";
|
|
CPparams.rng_prefix = "ckpoint_rng";
|
|
CPparams.saveInterval = user_params.SaveInterval;
|
|
CPparams.format = "IEEE64BIG";
|
|
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
|
|
|
//Note that checkpointing saves the RNG state so that this initialization is required only for the very first configuration
|
|
RNGModuleParameters RNGpar;
|
|
RNGpar.serial_seeds = "1 2 3 4 5";
|
|
RNGpar.parallel_seeds = "6 7 8 9 10";
|
|
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
|
|
|
typedef PlaquetteMod<GaugeImplPolicy> PlaqObs;
|
|
TheHMC.Resources.AddObservable<PlaqObs>();
|
|
//////////////////////////////////////////////
|
|
|
|
const int Ls = 12;
|
|
Real beta = 1.75;
|
|
Real light_mass = 0.0042; //240 MeV
|
|
Real strange_mass = 0.045;
|
|
Real pv_mass = 1.0;
|
|
RealD M5 = 1.8;
|
|
RealD mobius_scale = 32./12.; //b+c
|
|
|
|
RealD mob_bmc = 1.0;
|
|
RealD mob_b = (mobius_scale + mob_bmc)/2.;
|
|
RealD mob_c = (mobius_scale - mob_bmc)/2.;
|
|
|
|
//Setup the Grids
|
|
auto UGridD = TheHMC.Resources.GetCartesian();
|
|
auto UrbGridD = TheHMC.Resources.GetRBCartesian();
|
|
auto FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridD);
|
|
auto FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridD);
|
|
|
|
GridCartesian* UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
|
|
GridRedBlackCartesian* UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
|
|
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridF);
|
|
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridF);
|
|
|
|
ConjugateIwasakiGaugeActionD GaugeAction(beta);
|
|
|
|
// temporarily need a gauge field
|
|
LatticeGaugeFieldD Ud(UGridD);
|
|
LatticeGaugeFieldF Uf(UGridF);
|
|
|
|
//Setup the BCs
|
|
FermionActionD::ImplParams Params;
|
|
for(int i=0;i<Nd-1;i++) Params.twists[i] = user_params.GparityDirs[i]; //G-parity directions
|
|
Params.twists[Nd-1] = 1; //APBC in time direction
|
|
|
|
std::vector<int> dirs4(Nd);
|
|
for(int i=0;i<Nd-1;i++) dirs4[i] = user_params.GparityDirs[i];
|
|
dirs4[Nd-1] = 0; //periodic gauge BC in time
|
|
|
|
GaugeImplPolicy::setDirections(dirs4); //gauge BC
|
|
|
|
//Run optional gauge field checksum checker and exit
|
|
if(file_load_check){
|
|
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
|
std::cout << GridLogMessage << " Done" << std::endl;
|
|
Grid_finalize();
|
|
return 0;
|
|
}
|
|
|
|
|
|
////////////////////////////////////
|
|
// Collect actions
|
|
////////////////////////////////////
|
|
ActionLevel<HMCWrapper::Field> Level1(1); //light quark + strange quark
|
|
ActionLevel<HMCWrapper::Field> Level2(1); //DSDR
|
|
ActionLevel<HMCWrapper::Field> Level3(8); //gauge (8 increments per step)
|
|
|
|
|
|
/////////////////////////////////////////////////////////////
|
|
// Light EOFA action
|
|
// have to be careful with the parameters, cf. Test_dwf_gpforce_eofa.cc
|
|
/////////////////////////////////////////////////////////////
|
|
|
|
EOFAactionD LopD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, light_mass, light_mass, pv_mass, 0.0, -1, M5, mob_b, mob_c, Params);
|
|
EOFAactionF LopF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, light_mass, light_mass, pv_mass, 0.0, -1, M5, mob_b, mob_c, Params);
|
|
EOFAactionD RopD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, pv_mass, light_mass, pv_mass, -1.0, 1, M5, mob_b, mob_c, Params);
|
|
EOFAactionF RopF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, pv_mass, light_mass, pv_mass, -1.0, 1, M5, mob_b, mob_c, Params);
|
|
|
|
typedef SchurDiagMooeeOperator<EOFAactionD,FermionFieldD> EOFAschuropD;
|
|
typedef SchurDiagMooeeOperator<EOFAactionF,FermionFieldF> EOFAschuropF;
|
|
|
|
EOFAschuropD linopL_D(LopD);
|
|
EOFAschuropD linopR_D(RopD);
|
|
|
|
EOFAschuropF linopL_F(LopF);
|
|
EOFAschuropF linopR_F(RopF);
|
|
|
|
typedef MixedPrecisionConjugateGradientOperatorFunction<EOFAactionD, EOFAactionF, EOFAschuropD, EOFAschuropF> EOFA_mxCG;
|
|
|
|
EOFA_mxCG ActionMCG_L(user_params.eofa_l.action_tolerance, 10000, 1000, UGridF, FrbGridF, LopF, LopD, linopL_F, linopL_D);
|
|
ActionMCG_L.InnerTolerance = user_params.eofa_l.action_mixcg_inner_tolerance;
|
|
|
|
EOFA_mxCG ActionMCG_R(user_params.eofa_l.action_tolerance, 10000, 1000, UGridF, FrbGridF, RopF, RopD, linopR_F, linopR_D);
|
|
ActionMCG_R.InnerTolerance = user_params.eofa_l.action_mixcg_inner_tolerance;
|
|
|
|
EOFA_mxCG DerivMCG_L(user_params.eofa_l.md_tolerance, 10000, 1000, UGridF, FrbGridF, LopF, LopD, linopL_F, linopL_D);
|
|
DerivMCG_L.InnerTolerance = user_params.eofa_l.md_mixcg_inner_tolerance;
|
|
|
|
EOFA_mxCG DerivMCG_R(user_params.eofa_l.md_tolerance, 10000, 1000, UGridF, FrbGridF, RopF, RopD, linopR_F, linopR_D);
|
|
DerivMCG_R.InnerTolerance = user_params.eofa_l.md_mixcg_inner_tolerance;
|
|
|
|
std::cout << GridLogMessage << "Set EOFA action solver action tolerance outer=" << ActionMCG_L.Tolerance << " inner=" << ActionMCG_L.InnerTolerance << std::endl;
|
|
std::cout << GridLogMessage << "Set EOFA MD solver tolerance outer=" << DerivMCG_L.Tolerance << " inner=" << DerivMCG_L.InnerTolerance << std::endl;
|
|
|
|
ConjugateGradient<FermionFieldD> ActionCG(user_params.eofa_l.action_tolerance, 10000);
|
|
ConjugateGradient<FermionFieldD> DerivativeCG(user_params.eofa_l.md_tolerance, 10000);
|
|
|
|
// ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicyD> EOFA(LopD, RopD,
|
|
// ActionCG, ActionCG, ActionCG,
|
|
// DerivativeCG, DerivativeCG,
|
|
// user_params.eofa_l.rat_params, true);
|
|
|
|
// ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicyD> EOFA(LopD, RopD,
|
|
// ActionMCG_L, ActionMCG_R,
|
|
// ActionMCG_L, ActionMCG_R,
|
|
// DerivMCG_L, DerivMCG_R,
|
|
// user_params.eofa_l.rat_params, true);
|
|
|
|
ExactOneFlavourRatioMixedPrecHeatbathPseudoFermionAction<FermionImplPolicyD, FermionImplPolicyF> EOFA(LopF, RopF,
|
|
LopD, RopD,
|
|
ActionMCG_L, ActionMCG_R,
|
|
ActionMCG_L, ActionMCG_R,
|
|
DerivMCG_L, DerivMCG_R,
|
|
user_params.eofa_l.rat_params, true);
|
|
|
|
|
|
Level1.push_back(&EOFA);
|
|
|
|
|
|
////////////////////////////////////
|
|
// Strange action
|
|
////////////////////////////////////
|
|
FermionActionD Numerator_sD(Ud,*FGridD,*FrbGridD,*UGridD,*UrbGridD,strange_mass,M5,mob_b,mob_c,Params);
|
|
FermionActionD Denominator_sD(Ud,*FGridD,*FrbGridD,*UGridD,*UrbGridD, pv_mass,M5,mob_b,mob_c,Params);
|
|
|
|
FermionActionF Numerator_sF(Uf,*FGridF,*FrbGridF,*UGridF,*UrbGridF,strange_mass,M5,mob_b,mob_c,Params);
|
|
FermionActionF Denominator_sF(Uf,*FGridF,*FrbGridF,*UGridF,*UrbGridF, pv_mass,M5,mob_b,mob_c,Params);
|
|
|
|
RationalActionParams rat_act_params_s;
|
|
rat_act_params_s.inv_pow = 4; // (M^dag M)^{1/4}
|
|
rat_act_params_s.precision= 60;
|
|
rat_act_params_s.MaxIter = 10000;
|
|
user_params.rat_quo_s.Export(rat_act_params_s);
|
|
std::cout << GridLogMessage << " Heavy quark bounds check every " << rat_act_params_s.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
|
|
|
//MixedPrecRHMC Quotient_s(Denominator_sD, Numerator_sD, Denominator_sF, Numerator_sF, rat_act_params_s, user_params.rat_quo_s.reliable_update_freq);
|
|
DoublePrecRHMC Quotient_s(Denominator_sD, Numerator_sD, rat_act_params_s);
|
|
Level1.push_back(&Quotient_s);
|
|
|
|
///////////////////////////////////
|
|
// DSDR action
|
|
///////////////////////////////////
|
|
RealD dsdr_mass=-1.8;
|
|
//Use same DSDR twists as https://arxiv.org/pdf/1208.4412.pdf
|
|
RealD dsdr_epsilon_f = 0.02; //numerator (in determinant)
|
|
RealD dsdr_epsilon_b = 0.5;
|
|
GparityWilsonTMFermionD Numerator_DSDR_D(Ud, *UGridD, *UrbGridD, dsdr_mass, dsdr_epsilon_f, Params);
|
|
GparityWilsonTMFermionF Numerator_DSDR_F(Uf, *UGridF, *UrbGridF, dsdr_mass, dsdr_epsilon_f, Params);
|
|
|
|
GparityWilsonTMFermionD Denominator_DSDR_D(Ud, *UGridD, *UrbGridD, dsdr_mass, dsdr_epsilon_b, Params);
|
|
GparityWilsonTMFermionF Denominator_DSDR_F(Uf, *UGridF, *UrbGridF, dsdr_mass, dsdr_epsilon_b, Params);
|
|
|
|
RationalActionParams rat_act_params_DSDR;
|
|
rat_act_params_DSDR.inv_pow = 2; // (M^dag M)^{1/2}
|
|
rat_act_params_DSDR.precision= 60;
|
|
rat_act_params_DSDR.MaxIter = 10000;
|
|
user_params.rat_quo_DSDR.Export(rat_act_params_DSDR);
|
|
std::cout << GridLogMessage << "DSDR quark bounds check every " << rat_act_params_DSDR.BoundsCheckFreq << " trajectories (avg)" << std::endl;
|
|
|
|
DoublePrecRHMC Quotient_DSDR(Denominator_DSDR_D, Numerator_DSDR_D, rat_act_params_DSDR);
|
|
Level2.push_back(&Quotient_DSDR);
|
|
|
|
/////////////////////////////////////////////////////////////
|
|
// Gauge action
|
|
/////////////////////////////////////////////////////////////
|
|
Level3.push_back(&GaugeAction);
|
|
|
|
TheHMC.TheAction.push_back(Level1);
|
|
TheHMC.TheAction.push_back(Level2);
|
|
TheHMC.TheAction.push_back(Level3);
|
|
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
|
|
|
|
|
//Action tuning
|
|
bool tune_rhmc_s=false, eigenrange_s=false,
|
|
tune_rhmc_DSDR=false, eigenrange_DSDR=false,
|
|
check_eofa=false, upper_bound_eofa=false;
|
|
std::string lanc_params_s;
|
|
std::string lanc_params_DSDR;
|
|
for(int i=1;i<argc;i++){
|
|
std::string sarg(argv[i]);
|
|
if(sarg == "--tune_rhmc_s") tune_rhmc_s=true;
|
|
else if(sarg == "--eigenrange_s"){
|
|
assert(i < argc-1);
|
|
eigenrange_s=true;
|
|
lanc_params_s = argv[i+1];
|
|
}
|
|
else if(sarg == "--tune_rhmc_DSDR") tune_rhmc_DSDR=true;
|
|
else if(sarg == "--eigenrange_DSDR"){
|
|
assert(i < argc-1);
|
|
eigenrange_DSDR=true;
|
|
lanc_params_DSDR = argv[i+1];
|
|
}
|
|
else if(sarg == "--check_eofa") check_eofa = true;
|
|
else if(sarg == "--upper_bound_eofa") upper_bound_eofa = true;
|
|
}
|
|
if(tune_rhmc_s || eigenrange_s || tune_rhmc_DSDR || eigenrange_DSDR ||check_eofa || upper_bound_eofa) {
|
|
std::cout << GridLogMessage << "Running checks" << std::endl;
|
|
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
|
|
|
std::cout << GridLogMessage << "EOFA action solver action tolerance outer=" << ActionMCG_L.Tolerance << " inner=" << ActionMCG_L.InnerTolerance << std::endl;
|
|
std::cout << GridLogMessage << "EOFA MD solver tolerance outer=" << DerivMCG_L.Tolerance << " inner=" << DerivMCG_L.InnerTolerance << std::endl;
|
|
|
|
|
|
if(check_eofa) checkEOFA(EOFA, FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
|
if(upper_bound_eofa) upperBoundEOFA(EOFA, FGridD, TheHMC.Resources.GetParallelRNG(), Ud);
|
|
if(eigenrange_s) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_s, FGridD, FrbGridD, Ud, Numerator_sD, TheHMC.Resources.GetParallelRNG());
|
|
if(tune_rhmc_s) checkRHMC<FermionActionD, FermionFieldD, decltype(Quotient_s)>(FGridD, FrbGridD, Ud, Numerator_sD, Denominator_sD, Quotient_s, TheHMC.Resources.GetParallelRNG(), 4, "strange");
|
|
if(eigenrange_DSDR) computeEigenvalues<GparityWilsonTMFermionD, GparityWilsonTMFermionD::FermionField>(lanc_params_DSDR, UGridD, UrbGridD, Ud, Numerator_DSDR_D, TheHMC.Resources.GetParallelRNG());
|
|
if(tune_rhmc_DSDR) checkRHMC<GparityWilsonTMFermionD, GparityWilsonTMFermionD::FermionField, decltype(Quotient_DSDR)>(UGridD, UrbGridD, Ud, Numerator_DSDR_D, Denominator_DSDR_D, Quotient_DSDR, TheHMC.Resources.GetParallelRNG(), 2, "DSDR");
|
|
|
|
|
|
std::cout << GridLogMessage << " Done" << std::endl;
|
|
Grid_finalize();
|
|
return 0;
|
|
}
|
|
|
|
|
|
//Run the HMC
|
|
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
|
TheHMC.Run();
|
|
|
|
std::cout << GridLogMessage << " Done" << std::endl;
|
|
Grid_finalize();
|
|
return 0;
|
|
} // main
|