mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
Added a logging tag for HMC
As the integrator logger is active by default the cmdline option to activate had no effect. Changed option to *de*activate on request ("NoIntegrator") Cleaned up generating rational approxs in the general RHMC code As the tolerance of the rational approx is not related to the CG tolerance, regenerating approxs for MD and MC if they differ only by the CG tolerance is not necessary; this has been fixed In DWF+I Gparity evolution code, added cmdline options to check the rational approximations and compute the lowest/highest eigenvalues of M^dagM for RHMC tuning In the above, changed the integrator layout to a much simpler one that completes much faster; may need additional tuning
This commit is contained in:
parent
6cc3ad110c
commit
cee6a37639
@ -292,6 +292,7 @@ public:
|
||||
template<class Field>
|
||||
class ChebyshevLanczos : public Chebyshev<Field> {
|
||||
private:
|
||||
|
||||
std::vector<RealD> Coeffs;
|
||||
int order;
|
||||
RealD alpha;
|
||||
|
@ -69,6 +69,7 @@ GridLogger GridLogDebug (1, "Debug", GridLogColours, "PURPLE");
|
||||
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN");
|
||||
GridLogger GridLogIterative (1, "Iterative", GridLogColours, "BLUE");
|
||||
GridLogger GridLogIntegrator (1, "Integrator", GridLogColours, "BLUE");
|
||||
GridLogger GridLogHMC (1, "HMC", GridLogColours, "BLUE");
|
||||
|
||||
void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
GridLogError.Active(0);
|
||||
@ -79,6 +80,7 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
GridLogPerformance.Active(0);
|
||||
GridLogIntegrator.Active(1);
|
||||
GridLogColours.Active(0);
|
||||
GridLogHMC.Active(1);
|
||||
|
||||
for (int i = 0; i < logstreams.size(); i++) {
|
||||
if (logstreams[i] == std::string("Error")) GridLogError.Active(1);
|
||||
@ -87,7 +89,8 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
|
||||
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
|
||||
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
|
||||
if (logstreams[i] == std::string("Integrator")) GridLogIntegrator.Active(1);
|
||||
if (logstreams[i] == std::string("NoIntegrator")) GridLogIntegrator.Active(0);
|
||||
if (logstreams[i] == std::string("NoHMC")) GridLogHMC.Active(0);
|
||||
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
|
||||
}
|
||||
}
|
||||
|
@ -182,6 +182,7 @@ extern GridLogger GridLogDebug ;
|
||||
extern GridLogger GridLogPerformance;
|
||||
extern GridLogger GridLogIterative ;
|
||||
extern GridLogger GridLogIntegrator ;
|
||||
extern GridLogger GridLogHMC;
|
||||
extern Colours GridLogColours;
|
||||
|
||||
std::string demangle(const char* name) ;
|
||||
|
@ -79,6 +79,19 @@ NAMESPACE_BEGIN(Grid);
|
||||
FermionField PhiEven; // the pseudo fermion field for this trajectory
|
||||
FermionField PhiOdd; // the pseudo fermion field for this trajectory
|
||||
|
||||
//Generate the approximation to x^{1/inv_pow} (->approx) and x^{-1/inv_pow} (-> approx_inv) by an approx_degree degree rational approximation
|
||||
//CG_tolerance is used to issue a warning if the approximation error is larger than the tolerance of the CG and is otherwise just stored in the MultiShiftFunction for use by the multi-shift
|
||||
static void generateApprox(MultiShiftFunction &approx, MultiShiftFunction &approx_inv, int inv_pow, int approx_degree, double CG_tolerance, AlgRemez &remez){
|
||||
std::cout<<GridLogMessage << "Generating degree "<< approx_degree<<" approximation for x^(1/" << inv_pow << ")"<<std::endl;
|
||||
double error = remez.generateApprox(approx_degree,1,inv_pow);
|
||||
if(error > CG_tolerance)
|
||||
std::cout<<GridLogMessage << "WARNING: Remez approximation has a larger error " << error << " than the CG tolerance " << CG_tolerance << "! Try increasing the number of poles" << std::endl;
|
||||
|
||||
approx.Init(remez, CG_tolerance,false);
|
||||
approx_inv.Init(remez, CG_tolerance,true);
|
||||
}
|
||||
|
||||
|
||||
protected:
|
||||
static constexpr bool Numerator = true;
|
||||
static constexpr bool Denominator = false;
|
||||
@ -115,38 +128,14 @@ NAMESPACE_BEGIN(Grid);
|
||||
std::cout<<GridLogMessage << action_name() << " initialize: starting" << std::endl;
|
||||
AlgRemez remez(param.lo,param.hi,param.precision);
|
||||
|
||||
int inv_pow = param.inv_pow;
|
||||
int _2_inv_pow = 2*inv_pow;
|
||||
|
||||
//Generate approximations for action eval
|
||||
|
||||
// MdagM^(+- 1/inv_pow)
|
||||
std::cout<<GridLogMessage << "Generating degree "<<param.action_degree<<" and tolerance " << param.action_tolerance << " for x^(1/" << inv_pow << ")"<<std::endl;
|
||||
remez.generateApprox(param.action_degree,1,inv_pow);
|
||||
ApproxPowerAction.Init(remez,param.action_tolerance,false);
|
||||
ApproxNegPowerAction.Init(remez,param.action_tolerance,true);
|
||||
|
||||
// VdagV^(+- 1/(2*inv_pow))
|
||||
std::cout<<GridLogMessage << "Generating degree "<<param.action_degree<<" and tolerance " << param.action_tolerance <<" for x^(1/" << _2_inv_pow << ")"<<std::endl;
|
||||
remez.generateApprox(param.action_degree,1,_2_inv_pow);
|
||||
ApproxHalfPowerAction.Init(remez,param.action_tolerance,false);
|
||||
ApproxNegHalfPowerAction.Init(remez,param.action_tolerance,true);
|
||||
generateApprox(ApproxPowerAction, ApproxNegPowerAction, param.inv_pow, param.action_degree, param.action_tolerance, remez);
|
||||
generateApprox(ApproxHalfPowerAction, ApproxNegHalfPowerAction, 2*param.inv_pow, param.action_degree, param.action_tolerance, remez);
|
||||
|
||||
//Generate approximations for MD
|
||||
if(param.md_degree != param.action_degree ||
|
||||
param.md_tolerance < param.action_tolerance //no point in finding less precise polynomial if the degree is the same
|
||||
){
|
||||
// MdagM^(+- 1/inv_pow)
|
||||
std::cout<<GridLogMessage << "Generating degree "<<param.md_degree<<" and tolerance " << param.md_tolerance <<" for x^(1/" << inv_pow << ")"<<std::endl;
|
||||
remez.generateApprox(param.md_degree,1,inv_pow);
|
||||
ApproxPowerMD.Init(remez,param.md_tolerance,false);
|
||||
ApproxNegPowerMD.Init(remez,param.md_tolerance,true);
|
||||
|
||||
// VdagV^(+- 1/(2*inv_pow))
|
||||
std::cout<<GridLogMessage << "Generating degree "<<param.md_degree<<" and tolerance " << param.md_tolerance <<" for x^(1/" << _2_inv_pow << ")"<<std::endl;
|
||||
remez.generateApprox(param.md_degree,1,_2_inv_pow);
|
||||
ApproxHalfPowerMD.Init(remez,param.md_tolerance,false);
|
||||
ApproxNegHalfPowerMD.Init(remez,param.md_tolerance,true);
|
||||
if(param.md_degree != param.action_degree){ //note the CG tolerance is unrelated to the stopping condition of the Remez algorithm
|
||||
generateApprox(ApproxPowerMD, ApproxNegPowerMD, param.inv_pow, param.md_degree, param.md_tolerance, remez);
|
||||
generateApprox(ApproxHalfPowerMD, ApproxNegHalfPowerMD, 2*param.inv_pow, param.md_degree, param.md_tolerance, remez);
|
||||
}else{
|
||||
std::cout<<GridLogMessage << "Using same rational approximations for MD as for action evaluation" << std::endl;
|
||||
ApproxPowerMD = ApproxPowerAction;
|
||||
@ -156,7 +145,6 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
ApproxHalfPowerMD = ApproxHalfPowerAction;
|
||||
ApproxNegHalfPowerMD = ApproxNegHalfPowerAction;
|
||||
|
||||
for(int i=0;i<ApproxPowerMD.tolerances.size();i++)
|
||||
ApproxNegHalfPowerMD.tolerances[i] = ApproxHalfPowerMD.tolerances[i] = param.md_tolerance;
|
||||
}
|
||||
|
@ -115,21 +115,21 @@ private:
|
||||
|
||||
random(sRNG, rn_test);
|
||||
|
||||
std::cout << GridLogMessage
|
||||
std::cout << GridLogHMC
|
||||
<< "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "exp(-dH) = " << prob
|
||||
std::cout << GridLogHMC << "exp(-dH) = " << prob
|
||||
<< " Random = " << rn_test << "\n";
|
||||
std::cout << GridLogMessage
|
||||
std::cout << GridLogHMC
|
||||
<< "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
|
||||
|
||||
if ((prob > 1.0) || (rn_test <= prob)) { // accepted
|
||||
std::cout << GridLogMessage << "Metropolis_test -- ACCEPTED\n";
|
||||
std::cout << GridLogMessage
|
||||
std::cout << GridLogHMC << "Metropolis_test -- ACCEPTED\n";
|
||||
std::cout << GridLogHMC
|
||||
<< "--------------------------------------------------\n";
|
||||
return true;
|
||||
} else { // rejected
|
||||
std::cout << GridLogMessage << "Metropolis_test -- REJECTED\n";
|
||||
std::cout << GridLogMessage
|
||||
std::cout << GridLogHMC << "Metropolis_test -- REJECTED\n";
|
||||
std::cout << GridLogHMC
|
||||
<< "--------------------------------------------------\n";
|
||||
return false;
|
||||
}
|
||||
@ -145,7 +145,7 @@ private:
|
||||
|
||||
std::streamsize current_precision = std::cout.precision();
|
||||
std::cout.precision(15);
|
||||
std::cout << GridLogMessage << "Total H before trajectory = " << H0 << "\n";
|
||||
std::cout << GridLogHMC << "Total H before trajectory = " << H0 << "\n";
|
||||
std::cout.precision(current_precision);
|
||||
|
||||
TheIntegrator.integrate(U);
|
||||
@ -165,7 +165,7 @@ private:
|
||||
|
||||
|
||||
std::cout.precision(15);
|
||||
std::cout << GridLogMessage << "Total H after trajectory = " << H1
|
||||
std::cout << GridLogHMC << "Total H after trajectory = " << H1
|
||||
<< " dH = " << H1 - H0 << "\n";
|
||||
std::cout.precision(current_precision);
|
||||
|
||||
@ -196,9 +196,9 @@ public:
|
||||
// Actual updates (evolve a copy Ucopy then copy back eventually)
|
||||
unsigned int FinalTrajectory = Params.Trajectories + Params.NoMetropolisUntil + Params.StartTrajectory;
|
||||
for (int traj = Params.StartTrajectory; traj < FinalTrajectory; ++traj) {
|
||||
std::cout << GridLogMessage << "-- # Trajectory = " << traj << "\n";
|
||||
std::cout << GridLogHMC << "-- # Trajectory = " << traj << "\n";
|
||||
if (traj < Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||
std::cout << GridLogMessage << "-- Thermalization" << std::endl;
|
||||
std::cout << GridLogHMC << "-- Thermalization" << std::endl;
|
||||
}
|
||||
|
||||
double t0=usecond();
|
||||
@ -210,7 +210,7 @@ public:
|
||||
if (Params.MetropolisTest && traj >= Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||
accept = metropolis_test(DeltaH);
|
||||
} else {
|
||||
std::cout << GridLogMessage << "Skipping Metropolis test" << std::endl;
|
||||
std::cout << GridLogHMC << "Skipping Metropolis test" << std::endl;
|
||||
}
|
||||
|
||||
if (accept)
|
||||
@ -219,7 +219,7 @@ public:
|
||||
|
||||
|
||||
double t1=usecond();
|
||||
std::cout << GridLogMessage << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
|
||||
std::cout << GridLogHMC << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
|
||||
|
||||
|
||||
for (int obs = 0; obs < Observables.size(); obs++) {
|
||||
@ -228,7 +228,7 @@ public:
|
||||
std::cout << GridLogDebug << "Observables pointer " << Observables[obs] << std::endl;
|
||||
Observables[obs]->TrajectoryComplete(traj + 1, Ucur, sRNG, pRNG);
|
||||
}
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||
std::cout << GridLogHMC << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ protected:
|
||||
force = FieldImplementation::projectForce(force); // Ta for gauge fields
|
||||
double end_force = usecond();
|
||||
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites());
|
||||
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] Force average: " << force_abs << std::endl;
|
||||
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] Force average: " << force_abs << " Time step: " << ep << " Impulse average: " << force_abs * ep * HMC_MOMENTUM_DENOMINATOR << std::endl;
|
||||
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
|
||||
double end_full = usecond();
|
||||
double time_full = (end_full - start_full) / 1e3;
|
||||
|
@ -94,6 +94,142 @@ bool fileExists(const std::string &fn){
|
||||
return f.good();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
struct LanczosParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
|
||||
double, alpha,
|
||||
double, beta,
|
||||
double, mu,
|
||||
int, ord,
|
||||
int, n_stop,
|
||||
int, n_want,
|
||||
int, n_use,
|
||||
double, tolerance);
|
||||
|
||||
LanczosParameters() {
|
||||
alpha = 35;
|
||||
beta = 5;
|
||||
mu = 0;
|
||||
ord = 100;
|
||||
n_stop = 10;
|
||||
n_want = 10;
|
||||
n_use = 15;
|
||||
tolerance = 1e-6;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
template<typename FermionActionD, typename FermionFieldD>
|
||||
void computeEigenvalues(std::string param_file,
|
||||
GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &action, GridParallelRNG &rng){
|
||||
|
||||
LanczosParameters params;
|
||||
if(fileExists(param_file)){
|
||||
std::cout << GridLogMessage << " Reading " << param_file << std::endl;
|
||||
Grid::XmlReader rd(param_file);
|
||||
read(rd, "LanczosParameters", params);
|
||||
}else if(!GlobalSharedMemory::WorldRank){
|
||||
std::cout << GridLogMessage << " File " << param_file << " does not exist" << std::endl;
|
||||
std::cout << GridLogMessage << " Writing xml template to " << param_file << ".templ" << std::endl;
|
||||
Grid::XmlWriter wr(param_file + ".templ");
|
||||
write(wr, "LanczosParameters", params);
|
||||
}
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
action.ImportGauge(latt);
|
||||
|
||||
SchurDiagMooeeOperator<FermionActionD, FermionFieldD> hermop(action);
|
||||
PlainHermOp<FermionFieldD> hermop_wrap(hermop);
|
||||
//ChebyshevLanczos<FermionFieldD> Cheb(params.alpha, params.beta, params.mu, params.ord);
|
||||
assert(params.mu == 0.0);
|
||||
|
||||
Chebyshev<FermionFieldD> Cheb(params.beta*params.beta, params.alpha*params.alpha, params.ord+1);
|
||||
FunctionHermOp<FermionFieldD> Cheb_wrap(Cheb, hermop);
|
||||
|
||||
std::cout << "IRL: alpha=" << params.alpha << " beta=" << params.beta << " mu=" << params.mu << " ord=" << params.ord << std::endl;
|
||||
ImplicitlyRestartedLanczos<FermionFieldD> IRL(Cheb_wrap, hermop_wrap, params.n_stop, params.n_want, params.n_use, params.tolerance, 10000);
|
||||
|
||||
std::vector<RealD> eval(params.n_use);
|
||||
std::vector<FermionFieldD> evec(params.n_use, rbGrid);
|
||||
int Nconv;
|
||||
IRL.calc(eval, evec, gauss_o, Nconv);
|
||||
|
||||
std::cout << "Eigenvalues:" << std::endl;
|
||||
for(int i=0;i<params.n_want;i++){
|
||||
std::cout << i << " " << eval[i] << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Check the quality of the RHMC approx
|
||||
template<typename FermionActionD, typename FermionFieldD, typename RHMCtype>
|
||||
void checkRHMC(GridCartesian* Grid, GridRedBlackCartesian* rbGrid, const LatticeGaugeFieldD &latt, //expect lattice to have been initialized to something
|
||||
FermionActionD &numOp, FermionActionD &denOp, RHMCtype &rhmc, GridParallelRNG &rng,
|
||||
int inv_pow, const std::string &quark_descr){
|
||||
|
||||
FermionFieldD gauss_o(rbGrid);
|
||||
FermionFieldD gauss(Grid);
|
||||
gaussian(rng, gauss);
|
||||
pickCheckerboard(Odd, gauss_o, gauss);
|
||||
|
||||
numOp.ImportGauge(latt);
|
||||
denOp.ImportGauge(latt);
|
||||
|
||||
typedef typename FermionActionD::Impl_t FermionImplPolicyD;
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> MdagM(numOp);
|
||||
SchurDifferentiableOperator<FermionImplPolicyD> VdagV(denOp);
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerAction); //use large tolerance to prevent exit on fail; we are trying to tune here!
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerAction);
|
||||
std::cout << "Finished: Checking quality of RHMC action approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "-------------------------------------------------------------------------------" << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, MdagM,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark numerator and power -1/" << 2*inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << inv_pow << std::endl;
|
||||
|
||||
std::cout << "Starting: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
InversePowerBoundsCheck(2*inv_pow, 10000, 1e16, VdagV,gauss_o, rhmc.ApproxNegHalfPowerMD);
|
||||
std::cout << "Finished: Checking quality of RHMC MD approx for " << quark_descr << " quark denominator and power -1/" << 2*inv_pow << std::endl;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
@ -152,6 +288,7 @@ int main(int argc, char **argv) {
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
IntegratorParameters MD;
|
||||
typedef ConjugateHMCRunnerD<MinimumNorm2> HMCWrapper; //NB: This is the "Omelyan integrator"
|
||||
typedef HMCWrapper::ImplPolicy GaugeImplPolicy;
|
||||
MD.name = std::string("MinimumNorm2");
|
||||
MD.MDsteps = 5; //5 steps of 0.2 for GP* ensembles
|
||||
MD.trajL = 1.0;
|
||||
@ -181,7 +318,7 @@ int main(int argc, char **argv) {
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
|
||||
typedef PlaquetteMod<GaugeImplPolicy> PlaqObs;
|
||||
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||
//////////////////////////////////////////////
|
||||
|
||||
@ -208,8 +345,7 @@ int main(int argc, char **argv) {
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeFieldD Ud(GridPtrD);
|
||||
LatticeGaugeFieldF Uf(GridPtrF);
|
||||
|
||||
|
||||
|
||||
//Setup the BCs
|
||||
FermionActionD::ImplParams Params;
|
||||
for(int i=0;i<Nd-1;i++) Params.twists = user_params.GparityDirs[i]; //G-parity directions
|
||||
@ -219,64 +355,25 @@ int main(int argc, char **argv) {
|
||||
for(int i=0;i<Nd-1;i++) dirs4[i] = user_params.GparityDirs[i];
|
||||
dirs4[Nd-1] = 0; //periodic gauge BC in time
|
||||
|
||||
ConjugateGimplD::setDirections(dirs4); //gauge BC
|
||||
GaugeImplPolicy::setDirections(dirs4); //gauge BC
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Collect actions
|
||||
////////////////////////////////////
|
||||
ActionLevel<HMCWrapper::Field> Level1(1); //light quark
|
||||
ActionLevel<HMCWrapper::Field> Level2(1); //strange quark
|
||||
ActionLevel<HMCWrapper::Field> Level3(8); //gauge (8 increments per step)
|
||||
ActionLevel<HMCWrapper::Field> Level1(1); //light quark + strange quark
|
||||
ActionLevel<HMCWrapper::Field> Level2(8); //gauge (8 increments per step)
|
||||
|
||||
////////////////////////////////////
|
||||
// Strange action
|
||||
////////////////////////////////////
|
||||
|
||||
//Use same parameters as used for 16GPX ensembles
|
||||
RationalActionParams rat_act_params_s;
|
||||
|
||||
rat_act_params_s.inv_pow = 4; // (M^dag M)^{1/4}
|
||||
rat_act_params_s.precision= 60;
|
||||
rat_act_params_s.MaxIter = 10000;
|
||||
user_params.rat_quo_s.Export(rat_act_params_s);
|
||||
|
||||
//For the 16GPX ensembles we used Hasenbusch mass splitting:
|
||||
// det[ (M^dag(0.032) M(0.032)) / (M^dag(1.0) M(1.0)) ]^{1/4} * det[ (M^dag(0.01) M(0.01)) / (M^dag(1.0) M(1.0)) ]^{1/2}
|
||||
//=
|
||||
// [ det[ (M^dag(0.032) M(0.032)) / (M^dag(1.0) M(1.0)) ]^{1/4} ]^3 * det[ (M^dag(0.01) M(0.01)) / (M^dag(0.032) M(0.032)) ]^{1/2}
|
||||
|
||||
//I don't know if it's actually necessary for the action objects to be independent instances...
|
||||
int n_hasenbusch_s = 3;
|
||||
std::vector<FermionActionD*> Numerators_sD(n_hasenbusch_s);
|
||||
std::vector<FermionActionD*> Denominators_sD(n_hasenbusch_s);
|
||||
std::vector<FermionActionF*> Numerators_sF(n_hasenbusch_s);
|
||||
std::vector<FermionActionF*> Denominators_sF(n_hasenbusch_s);
|
||||
|
||||
std::vector<MixedPrecRHMC*> Quotients_s(n_hasenbusch_s);
|
||||
|
||||
for(int h=0;h<n_hasenbusch_s;h++){
|
||||
Numerators_sD[h] = new FermionActionD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD,strange_mass,M5,Params);
|
||||
Denominators_sD[h] = new FermionActionD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD,pv_mass, M5,Params);
|
||||
|
||||
Numerators_sF[h] = new FermionActionF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,strange_mass,M5,Params);
|
||||
Denominators_sF[h] = new FermionActionF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,pv_mass, M5,Params);
|
||||
|
||||
//note I define the numerator operator wrt how they appear in the determinant
|
||||
Quotients_s[h] = new MixedPrecRHMC(*Denominators_sD[h], *Numerators_sD[h], *Denominators_sF[h], *Numerators_sF[h], rat_act_params_s, user_params.rat_quo_s.reliable_update_freq);
|
||||
Level2.push_back(Quotients_s[h]);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Light action
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
//We don't Hasenbusch the light quark directly, instead the denominator mass is set equal to the strange mass; cf above
|
||||
|
||||
FermionActionD Numerator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, light_mass,M5,Params);
|
||||
FermionActionD Denominator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, strange_mass,M5,Params);
|
||||
FermionActionD Denominator_lD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, pv_mass,M5,Params);
|
||||
|
||||
FermionActionF Numerator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, light_mass,M5,Params);
|
||||
FermionActionF Denominator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, strange_mass,M5,Params);
|
||||
FermionActionF Denominator_lF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, pv_mass,M5,Params);
|
||||
|
||||
RationalActionParams rat_act_params_l;
|
||||
rat_act_params_l.inv_pow = 2; // (M^dag M)^{1/2}
|
||||
@ -287,36 +384,69 @@ int main(int argc, char **argv) {
|
||||
MixedPrecRHMC Quotient_l(Denominator_lD, Numerator_lD, Denominator_lF, Numerator_lF, rat_act_params_l, user_params.rat_quo_l.reliable_update_freq);
|
||||
Level1.push_back(&Quotient_l);
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Strange action
|
||||
////////////////////////////////////
|
||||
FermionActionD Numerator_sD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD,strange_mass,M5,Params);
|
||||
FermionActionD Denominator_sD(Ud,*FGridD,*FrbGridD,*GridPtrD,*GridRBPtrD, pv_mass,M5,Params);
|
||||
|
||||
FermionActionF Numerator_sF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,strange_mass,M5,Params);
|
||||
FermionActionF Denominator_sF(Uf,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF, pv_mass,M5,Params);
|
||||
|
||||
RationalActionParams rat_act_params_s;
|
||||
rat_act_params_s.inv_pow = 4; // (M^dag M)^{1/4}
|
||||
rat_act_params_s.precision= 60;
|
||||
rat_act_params_s.MaxIter = 10000;
|
||||
user_params.rat_quo_s.Export(rat_act_params_s);
|
||||
|
||||
MixedPrecRHMC Quotient_s(Denominator_sD, Numerator_sD, Denominator_sF, Numerator_sF, rat_act_params_s, user_params.rat_quo_s.reliable_update_freq);
|
||||
Level1.push_back(&Quotient_s);
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Gauge action
|
||||
/////////////////////////////////////////////////////////////
|
||||
Level3.push_back(&GaugeAction);
|
||||
Level2.push_back(&GaugeAction);
|
||||
TheHMC.TheAction.push_back(Level1);
|
||||
TheHMC.TheAction.push_back(Level2);
|
||||
TheHMC.TheAction.push_back(Level3);
|
||||
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// HMC parameters are serialisable
|
||||
|
||||
if(0){
|
||||
TheHMC.Resources.AddRNGs();
|
||||
ConjugateGimplR::HotConfiguration(TheHMC.Resources.GetParallelRNG(), Ud);
|
||||
Quotient_l.refresh(Ud, TheHMC.Resources.GetParallelRNG());
|
||||
LatticeGaugeFieldD out(Ud);
|
||||
std::cout << GridLogMessage << " Running the derivative "<< std::endl;
|
||||
Quotient_l.deriv(Ud,out);
|
||||
std::cout << GridLogMessage << " Finished running the derivative "<< std::endl;
|
||||
Numerator_lD.Report();
|
||||
Denominator_lD.Report();
|
||||
|
||||
//Action tuning
|
||||
bool tune_rhmc_l=false, tune_rhmc_s=false, eigenrange_l=false, eigenrange_s=false;
|
||||
std::string lanc_params_l, lanc_params_s;
|
||||
for(int i=1;i<argc;i++){
|
||||
std::string sarg(argv[i]);
|
||||
if(sarg == "--tune_rhmc_l") tune_rhmc_l=true;
|
||||
else if(sarg == "--tune_rhmc_s") tune_rhmc_s=true;
|
||||
else if(sarg == "--eigenrange_l"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_l=true;
|
||||
lanc_params_l = argv[i+1];
|
||||
}
|
||||
else if(sarg == "--eigenrange_s"){
|
||||
assert(i < argc-1);
|
||||
eigenrange_s=true;
|
||||
lanc_params_s = argv[i+1];
|
||||
}
|
||||
}
|
||||
if(tune_rhmc_l || tune_rhmc_s || eigenrange_l || eigenrange_s){
|
||||
TheHMC.initializeGaugeFieldAndRNGs(Ud);
|
||||
if(eigenrange_l) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_l, FGridD, FrbGridD, Ud, Numerator_lD, TheHMC.Resources.GetParallelRNG());
|
||||
if(eigenrange_s) computeEigenvalues<FermionActionD, FermionFieldD>(lanc_params_s, FGridD, FrbGridD, Ud, Numerator_sD, TheHMC.Resources.GetParallelRNG());
|
||||
if(tune_rhmc_l) checkRHMC<FermionActionD, FermionFieldD, MixedPrecRHMC>(FGridD, FrbGridD, Ud, Numerator_lD, Denominator_lD, Quotient_l, TheHMC.Resources.GetParallelRNG(), 2, "light");
|
||||
if(tune_rhmc_s) checkRHMC<FermionActionD, FermionFieldD, MixedPrecRHMC>(FGridD, FrbGridD, Ud, Numerator_sD, Denominator_sD, Quotient_s, TheHMC.Resources.GetParallelRNG(), 4, "strange");
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
if(1){
|
||||
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||
TheHMC.Run(); // no smearing
|
||||
}
|
||||
|
||||
//Run the HMC
|
||||
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||
TheHMC.Run();
|
||||
|
||||
std::cout << GridLogMessage << " Done" << std::endl;
|
||||
Grid_finalize();
|
||||
|
Loading…
Reference in New Issue
Block a user