1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-14 01:35:36 +00:00

Added support for hmc and binary IO for a general field

This commit is contained in:
Guido Cossu 2016-10-07 13:37:29 +01:00
parent c065e454c3
commit 11b4c80b27
8 changed files with 517 additions and 187 deletions

View File

@ -83,7 +83,9 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#include <Grid/FFT.h> #include <Grid/FFT.h>
#include <Grid/qcd/hmc/NerscCheckpointer.h> #include <Grid/qcd/hmc/NerscCheckpointer.h>
#include <Grid/qcd/hmc/BinaryCheckpointer.h>
#include <Grid/qcd/hmc/HmcRunner.h> #include <Grid/qcd/hmc/HmcRunner.h>
#include <Grid/qcd/hmc/GenericHMCrunner.h>

View File

@ -160,6 +160,47 @@ class BinaryIO {
} }
} }
// Simple classes for precision conversion
template <class fobj, class sobj>
struct BinarySimpleUnmunger {
typedef typename getPrecision<fobj>::real_scalar_type fobj_stype;
typedef typename getPrecision<sobj>::real_scalar_type sobj_stype;
void operator()(sobj &in, fobj &out, uint32_t &csum) {
// take word by word and transform accoding to the status
fobj_stype *out_buffer = (fobj_stype *)&out;
sobj_stype *in_buffer = (sobj_stype *)&in;
size_t fobj_words = sizeof(out) / sizeof(fobj_stype);
size_t sobj_words = sizeof(in) / sizeof(sobj_stype);
assert(fobj_words == sobj_words);
for (unsigned int word = 0; word < sobj_words; word++)
out_buffer[word] = in_buffer[word]; // type conversion on the fly
BinaryIO::Uint32Checksum((uint32_t *)&out, sizeof(out), csum);
}
};
template <class fobj, class sobj>
struct BinarySimpleMunger {
typedef typename getPrecision<fobj>::real_scalar_type fobj_stype;
typedef typename getPrecision<sobj>::real_scalar_type sobj_stype;
void operator()(fobj &in, sobj &out, uint32_t &csum) {
// take word by word and transform accoding to the status
fobj_stype *in_buffer = (fobj_stype *)&in;
sobj_stype *out_buffer = (sobj_stype *)&out;
size_t fobj_words = sizeof(in) / sizeof(fobj_stype);
size_t sobj_words = sizeof(out) / sizeof(sobj_stype);
assert(fobj_words == sobj_words);
for (unsigned int word = 0; word < sobj_words; word++)
out_buffer[word] = in_buffer[word]; // type conversion on the fly
BinaryIO::Uint32Checksum((uint32_t *)&in, sizeof(in), csum);
}
};
template<class vobj,class fobj,class munger> template<class vobj,class fobj,class munger>
static inline uint32_t readObjectSerial(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string &format) static inline uint32_t readObjectSerial(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string &format)
{ {
@ -272,40 +313,44 @@ class BinaryIO {
return csum; return csum;
} }
static inline uint32_t writeRNGSerial(GridSerialRNG &serial,GridParallelRNG &parallel,std::string file,int offset) static inline uint32_t writeRNGSerial(GridSerialRNG &serial,
{ GridParallelRNG &parallel,
std::string file, int offset) {
typedef typename GridSerialRNG::RngStateType RngStateType; typedef typename GridSerialRNG::RngStateType RngStateType;
const int RngStateCount = GridSerialRNG::RngStateCount; const int RngStateCount = GridSerialRNG::RngStateCount;
GridBase *grid = parallel._grid; GridBase *grid = parallel._grid;
int gsites = grid->_gsites; int gsites = grid->_gsites;
////////////////////////////////////////////////// //////////////////////////////////////////////////
// Serialise through node zero // Serialise through node zero
////////////////////////////////////////////////// //////////////////////////////////////////////////
std::cout<< GridLogMessage<< "Serial RNG write I/O "<< file<<std::endl;
std::ofstream fout; std::ofstream fout;
if (grid->IsBoss()) { if (grid->IsBoss()) {
fout.open(file, std::ios::binary | std::ios::out | std::ios::in); fout.open(file, std::ios::binary | std::ios::out | std::ios::in);
if (!fout.is_open()) {
std::cout << GridLogMessage << "writeRNGSerial: Error opening file "
<< file << std::endl;
exit(0);
}
fout.seekp(offset); fout.seekp(offset);
} }
std::cout << GridLogMessage << "Serial RNG write I/O " << file << std::endl;
uint32_t csum = 0; uint32_t csum = 0;
std::vector<RngStateType> saved(RngStateCount); std::vector<RngStateType> saved(RngStateCount);
int bytes = sizeof(RngStateType) * saved.size(); int bytes = sizeof(RngStateType) * saved.size();
std::vector<int> gcoor; std::vector<int> gcoor;
for (int gidx = 0; gidx < gsites; gidx++) { for (int gidx = 0; gidx < gsites; gidx++) {
int rank, o_idx, i_idx; int rank, o_idx, i_idx;
grid->GlobalIndexToGlobalCoor(gidx, gcoor); grid->GlobalIndexToGlobalCoor(gidx, gcoor);
grid->GlobalCoorToRankIndex(rank, o_idx, i_idx, gcoor); grid->GlobalCoorToRankIndex(rank, o_idx, i_idx, gcoor);
int l_idx = parallel.generator_idx(o_idx, i_idx); int l_idx = parallel.generator_idx(o_idx, i_idx);
if (rank == grid->ThisRank()) { if (rank == grid->ThisRank()) {
// std::cout << "rank" << rank<<" Getting state for index "<<l_idx<<std::endl; // std::cout << "rank" << rank<<" Getting state for index
// "<<l_idx<<std::endl;
parallel.GetState(saved, l_idx); parallel.GetState(saved, l_idx);
} }
@ -315,7 +360,6 @@ class BinaryIO {
Uint32Checksum((uint32_t *)&saved[0], bytes, csum); Uint32Checksum((uint32_t *)&saved[0], bytes, csum);
fout.write((char *)&saved[0], bytes); fout.write((char *)&saved[0], bytes);
} }
} }
if (grid->IsBoss()) { if (grid->IsBoss()) {
@ -324,6 +368,10 @@ class BinaryIO {
fout.write((char *)&saved[0], bytes); fout.write((char *)&saved[0], bytes);
} }
grid->Broadcast(0, (void *)&csum, sizeof(csum)); grid->Broadcast(0, (void *)&csum, sizeof(csum));
if (grid->IsBoss())
fout.close();
return csum; return csum;
} }
static inline uint32_t readRNGSerial(GridSerialRNG &serial,GridParallelRNG &parallel,std::string file,int offset) static inline uint32_t readRNGSerial(GridSerialRNG &serial,GridParallelRNG &parallel,std::string file,int offset)
@ -529,8 +577,10 @@ class BinaryIO {
// Parallel writer // Parallel writer
////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////
template <class vobj, class fobj, class munger> template <class vobj, class fobj, class munger>
static inline uint32_t writeObjectParallel(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string & format) static inline uint32_t writeObjectParallel(Lattice<vobj> &Umu,
{ std::string file, munger munge,
int offset,
const std::string &format) {
typedef typename vobj::scalar_object sobj; typedef typename vobj::scalar_object sobj;
GridBase *grid = Umu._grid; GridBase *grid = Umu._grid;
@ -540,8 +590,11 @@ class BinaryIO {
int ieee64 = (format == std::string("IEEE64")); int ieee64 = (format == std::string("IEEE64"));
if (!(ieee32big || ieee32 || ieee64big || ieee64)) { if (!(ieee32big || ieee32 || ieee64big || ieee64)) {
std::cout << GridLogError << "Unrecognized file format " << format << std::endl; std::cout << GridLogError << "Unrecognized file format " << format
std::cout << GridLogError << "Allowed: IEEE32BIG | IEEE32 | IEEE64BIG | IEEE64" << std::endl; << std::endl;
std::cout << GridLogError
<< "Allowed: IEEE32BIG | IEEE32 | IEEE64BIG | IEEE64"
<< std::endl;
exit(0); exit(0);
} }
@ -560,7 +613,6 @@ class BinaryIO {
int IOnode = 1; int IOnode = 1;
for (int d = 0; d < grid->_ndimension; d++) { for (int d = 0; d < grid->_ndimension; d++) {
if (d != grid->_ndimension - 1) parallel[d] = 0; if (d != grid->_ndimension - 1) parallel[d] = 0;
if (parallel[d]) { if (parallel[d]) {
@ -581,16 +633,17 @@ class BinaryIO {
{ {
uint32_t tmp = IOnode; uint32_t tmp = IOnode;
grid->GlobalSum(tmp); grid->GlobalSum(tmp);
std::cout<< GridLogMessage<< "Parallel write I/O from "<< file << " with " <<tmp<< " IOnodes for subslice "; std::cout << GridLogMessage << "Parallel write I/O from " << file
<< " with " << tmp << " IOnodes for subslice ";
for (int d = 0; d < grid->_ndimension; d++) { for (int d = 0; d < grid->_ndimension; d++) {
std::cout << range[d]; std::cout << range[d];
if( d< grid->_ndimension-1 ) if (d < grid->_ndimension - 1) std::cout << " x ";
std::cout<< " x ";
} }
std::cout << std::endl; std::cout << std::endl;
} }
GridStopWatch timer; timer.Start(); GridStopWatch timer;
timer.Start();
uint64_t bytes = 0; uint64_t bytes = 0;
int myrank = grid->ThisRank(); int myrank = grid->ThisRank();
@ -601,22 +654,31 @@ class BinaryIO {
// Ideally one reader/writer per xy plane and read these contiguously // Ideally one reader/writer per xy plane and read these contiguously
// with comms from nominated I/O nodes. // with comms from nominated I/O nodes.
std::ofstream fout; std::ofstream fout;
if ( IOnode ) fout.open(file,std::ios::binary|std::ios::in|std::ios::out); if (IOnode){
fout.open(file, std::ios::binary | std::ios::in | std::ios::out);
if (!fout.is_open()) {
std::cout << GridLogMessage << "writeObjectParallel: Error opening file " << file
<< std::endl;
exit(0);
}
}
////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////
// Find the location of each site and send to primary node // Find the location of each site and send to primary node
// Take loop order from Chroma; defines loop order now that NERSC doc no longer // Take loop order from Chroma; defines loop order now that NERSC doc no
// longer
// available (how short sighted is that?) // available (how short sighted is that?)
////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////
uint32_t csum = 0; uint32_t csum = 0;
fobj fileObj; fobj fileObj;
static sobj siteObj; // static for SHMEM target; otherwise dynamic allocate with AlignedAllocator static sobj siteObj; // static for SHMEM target; otherwise dynamic allocate
// with AlignedAllocator
// should aggregate a whole chunk and then write. // should aggregate a whole chunk and then write.
// need to implement these loops in Nd independent way with a lexico conversion // need to implement these loops in Nd independent way with a lexico
// conversion
for (int tlex = 0; tlex < slice_vol; tlex++) { for (int tlex = 0; tlex < slice_vol; tlex++) {
std::vector<int> tsite(nd); // temporary mixed up site std::vector<int> tsite(nd); // temporary mixed up site
std::vector<int> gsite(nd); std::vector<int> gsite(nd);
std::vector<int> lsite(nd); std::vector<int> lsite(nd);
@ -629,7 +691,6 @@ class BinaryIO {
gsite[d] = tsite[d] + start[d]; // global site gsite[d] = tsite[d] + start[d]; // global site
} }
///////////////////////// /////////////////////////
// Get the rank of owner of data // Get the rank of owner of data
///////////////////////// /////////////////////////
@ -648,14 +709,14 @@ class BinaryIO {
if (rank != iorank) { // comms is necessary if (rank != iorank) { // comms is necessary
if ((myrank == rank) || (myrank == iorank)) { // and we have to do it if ((myrank == rank) || (myrank == iorank)) { // and we have to do it
// Send to IOrank // Send to IOrank
grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,rank,iorank,sizeof(siteObj)); grid->SendRecvPacket((void *)&siteObj, (void *)&siteObj, rank, iorank,
sizeof(siteObj));
} }
} }
grid->Barrier(); // necessary? grid->Barrier(); // necessary?
if (myrank == iorank) { if (myrank == iorank) {
munge(siteObj, fileObj, csum); munge(siteObj, fileObj, csum);
if (ieee32big) htobe32_v((void *)&fileObj, sizeof(fileObj)); if (ieee32big) htobe32_v((void *)&fileObj, sizeof(fileObj));
@ -673,14 +734,19 @@ class BinaryIO {
grid->GlobalSum(bytes); grid->GlobalSum(bytes);
timer.Stop(); timer.Stop();
std::cout<<GridLogPerformance<<"writeObjectParallel: wrote "<< bytes <<" bytes in "<<timer.Elapsed() <<" " std::cout << GridLogPerformance << "writeObjectParallel: wrote " << bytes
<< " bytes in " << timer.Elapsed() << " "
<< (double)bytes / timer.useconds() << " MB/s " << std::endl; << (double)bytes / timer.useconds() << " MB/s " << std::endl;
grid->Barrier(); // necessary?
if (IOnode)
fout.close();
return csum; return csum;
} }
}; };
} }
#endif #endif

View File

@ -45,7 +45,7 @@ namespace QCD {
static const int Zm = 6; static const int Zm = 6;
static const int Tm = 7; static const int Tm = 7;
static const int Nc=3; static const int Nc=2;
static const int Ns=4; static const int Ns=4;
static const int Nd=4; static const int Nd=4;
static const int Nhs=2; // half spinor static const int Nhs=2; // half spinor

View File

@ -75,6 +75,8 @@ public:
} }
} }
///////////////////////////////////////////////////////////
// Move these to another class
// HMC auxiliary functions // HMC auxiliary functions
static inline void generate_momenta(Field& P, GridParallelRNG& pRNG){ static inline void generate_momenta(Field& P, GridParallelRNG& pRNG){
// specific for SU gauge fields // specific for SU gauge fields
@ -108,6 +110,17 @@ public:
return Hsum.real(); return Hsum.real();
} }
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
SU<Nc>::HotConfiguration(pRNG, U);
}
static inline void TepidConfiguration(GridParallelRNG &pRNG, Field &U) {
SU<Nc>::TepidConfiguration(pRNG, U);
}
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
SU<Nc>::ColdConfiguration(pRNG, U);
}
}; };
// Composition with smeared link, bc's etc.. probably need multiple inheritance // Composition with smeared link, bc's etc.. probably need multiple inheritance

View File

@ -36,67 +36,32 @@ directory
namespace Grid { namespace Grid {
namespace QCD { namespace QCD {
template <class fobj, class sobj> // Simple checkpointer, only binary file
struct BinarySimpleUnmunger {
typedef typename getPrecision<fobj>::real_scalar_type fobj_stype;
typedef typename getPrecision<sobj>::real_scalar_type sobj_stype;
void operator()(sobj &in, fobj &out, uint32_t &csum) {
// take word by word and transform accoding to the status
fobj_stype* out_buffer = (fobj_stype*)&out;
sobj_stype* in_buffer = (sobj_stype*)&in;
size_t fobj_words = sizeof(out)/sizeof(fobj_stype);
size_t sobj_words = sizeof(in)/sizeof(sobj_stype);
assert(fobj_words == sobj_words);
for (unsigned int word = 0; word < sobj_words; word++)
out_buffer[word] = in_buffer[word]; // type conversion on the fly
BinaryIO::Uint32Checksum((uint32_t*)&out,sizeof(out),csum);
};
template <class fobj, class sobj>
struct BinarySimpleMunger {
typedef typename getPrecision<fobj>::real_scalar_type fobj_stype;
typedef typename getPrecision<sobj>::real_scalar_type sobj_stype;
void operator()(sobj &out, fobj &in, uint32_t &csum) {
// take word by word and transform accoding to the status
fobj_stype* in_buffer = (fobj_stype*)&in;
sobj_stype* out_buffer = (sobj_stype*)&out;
size_t fobj_words = sizeof(in)/sizeof(fobj_stype);
size_t sobj_words = sizeof(out)/sizeof(sobj_stype);
assert(fobj_words == sobj_words);
for (unsigned int word = 0; word < sobj_words; word++)
out_buffer[word] = in_buffer[word]; // type conversion on the fly
BinaryIO::Uint32Checksum((uint32_t*)&in,sizeof(in),csum);
};
// Only for the main field in the hmc
template <class Impl> template <class Impl>
class BinaryHmcCheckpointer : public HmcObservable<typename Impl::Field> { class BinaryHmcCheckpointer : public HmcObservable<typename Impl::Field> {
private: private:
std::string configStem; std::string configStem;
std::string rngStem; std::string rngStem;
int SaveInterval; int SaveInterval;
std::string format;
public: public:
INHERIT_FIELD_TYPES(Impl); // The Field is a Lattice object INHERIT_FIELD_TYPES(Impl); // Gets the Field type, a Lattice object
// Extract types from the Field
typedef typename Field::vector_object vobj; typedef typename Field::vector_object vobj;
typedef typename vobj::scalar_object sobj; typedef typename vobj::scalar_object sobj;
typedef typename getPrecision<sobj>::real_scalar_type sobj_stype; typedef typename getPrecision<sobj>::real_scalar_type sobj_stype;
typedef typename sobj::DoublePrecision sobj_double; typedef typename sobj::DoublePrecision sobj_double;
BinaryHmcCheckpointer(std::string cf, std::string rn, int savemodulo, const std::string &format) BinaryHmcCheckpointer(std::string cf, std::string rn, int savemodulo,
: configStem(cf), const std::string &f)
rngStem(rn), : configStem(cf), rngStem(rn), SaveInterval(savemodulo), format(f){};
SaveInterval(savemodulo){};
void truncate(std::string file) {
std::ofstream fout(file, std::ios::out);
fout.close();
}
void TrajectoryComplete(int traj, Field &U, GridSerialRNG &sRNG, void TrajectoryComplete(int traj, Field &U, GridSerialRNG &sRNG,
GridParallelRNG &pRNG) { GridParallelRNG &pRNG) {
@ -114,10 +79,15 @@ struct BinarySimpleMunger {
config = os.str(); config = os.str();
} }
// Save always in double precision BinaryIO::BinarySimpleUnmunger<sobj_double, sobj> munge;
BinarySimpleUnmunger<sobj_double, sobj> munge; truncate(rng);
BinaryIO::writeRNGSerial(sRNG, pRNG, rng, 0); BinaryIO::writeRNGSerial(sRNG, pRNG, rng, 0);
BinaryIO::writeObjectParallel<vobj, sobj_double>(U, config, munge, 0, format); truncate(config);
uint32_t csum = BinaryIO::writeObjectParallel<vobj, sobj_double>(
U, config, munge, 0, format);
std::cout << GridLogMessage << "Written Binary Configuration " << config
<< " checksum " << std::hex << csum << std::dec << std::endl;
} }
}; };
@ -136,9 +106,13 @@ struct BinarySimpleMunger {
config = os.str(); config = os.str();
} }
BinarySimpleMunger<sobj_double, sobj> munge; BinaryIO::BinarySimpleMunger<sobj_double, sobj> munge;
BinaryIO::readRNGSerial(sRNG, pRNG, rng, header); BinaryIO::readRNGSerial(sRNG, pRNG, rng, 0);
BinaryIO::readObjectParallel<vobj, sobj_double>(U, config, munge, 0, format); uint32_t csum = BinaryIO::readObjectParallel<vobj, sobj_double>(
U, config, munge, 0, format);
std::cout << GridLogMessage << "Read Binary Configuration " << config
<< " checksum " << std::hex << csum << std::dec << std::endl;
}; };
}; };
} }

View File

@ -0,0 +1,173 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/hmc/GenericHmcRunner.h
Copyright (C) 2015
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GENERIC_HMC_RUNNER
#define GENERIC_HMC_RUNNER
namespace Grid {
namespace QCD {
// Virtual Class for HMC specific for gauge theories
// implement a specific theory by defining the BuildTheAction
template <class Implementation, class RepresentationsPolicy = NoHirep>
class BinaryHmcRunnerTemplate {
public:
INHERIT_FIELD_TYPES(Implementation);
enum StartType_t { ColdStart, HotStart, TepidStart, CheckpointStart };
ActionSet<Field, RepresentationsPolicy> TheAction;
// Add here a vector of HmcObservable
// that can be injected from outside
GridCartesian *UGrid;
GridCartesian *FGrid;
GridRedBlackCartesian *UrbGrid;
GridRedBlackCartesian *FrbGrid;
virtual void BuildTheAction(int argc, char **argv) = 0; // necessary?
void Run(int argc, char **argv) {
StartType_t StartType = HotStart;
std::string arg;
if (GridCmdOptionExists(argv, argv + argc, "--StartType")) {
arg = GridCmdOptionPayload(argv, argv + argc, "--StartType");
if (arg == "HotStart") {
StartType = HotStart;
} else if (arg == "ColdStart") {
StartType = ColdStart;
} else if (arg == "TepidStart") {
StartType = TepidStart;
} else if (arg == "CheckpointStart") {
StartType = CheckpointStart;
} else {
std::cout << GridLogError << "Unrecognized option in --StartType\n";
std::cout
<< GridLogError
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
assert(0);
}
}
int StartTraj = 0;
if (GridCmdOptionExists(argv, argv + argc, "--StartTrajectory")) {
arg = GridCmdOptionPayload(argv, argv + argc, "--StartTrajectory");
std::vector<int> ivec(0);
GridCmdOptionIntVector(arg, ivec);
StartTraj = ivec[0];
}
int NumTraj = 1;
if (GridCmdOptionExists(argv, argv + argc, "--Trajectories")) {
arg = GridCmdOptionPayload(argv, argv + argc, "--Trajectories");
std::vector<int> ivec(0);
GridCmdOptionIntVector(arg, ivec);
NumTraj = ivec[0];
}
int NumThermalizations = 10;
if (GridCmdOptionExists(argv, argv + argc, "--Thermalizations")) {
arg = GridCmdOptionPayload(argv, argv + argc, "--Thermalizations");
std::vector<int> ivec(0);
GridCmdOptionIntVector(arg, ivec);
NumThermalizations = ivec[0];
}
GridSerialRNG sRNG;
GridParallelRNG pRNG(UGrid);
Field U(UGrid);
std::vector<int> SerSeed({1, 2, 3, 4, 5});
std::vector<int> ParSeed({6, 7, 8, 9, 10});
NoSmearing<Implementation> SmearingPolicy;
typedef MinimumNorm2<Implementation, NoSmearing<Implementation>,
RepresentationsPolicy>
IntegratorType; // change here to change the algorithm
IntegratorParameters MDpar(20, 1.0);
IntegratorType MDynamics(UGrid, MDpar, TheAction, SmearingPolicy);
// Checkpoint strategy
int SaveInterval = 1;
std::string format = std::string("IEEE64BIG");
std::string conf_prefix = std::string("ckpoint_lat");
std::string rng_prefix = std::string("ckpoint_rng");
BinaryHmcCheckpointer<Implementation> Checkpoint(conf_prefix, rng_prefix,
SaveInterval, format);
HMCparameters HMCpar;
HMCpar.StartTrajectory = StartTraj;
HMCpar.Trajectories = NumTraj;
HMCpar.NoMetropolisUntil = NumThermalizations;
if (StartType == HotStart) {
// Hot start
HMCpar.MetropolisTest = true;
sRNG.SeedFixedIntegers(SerSeed);
pRNG.SeedFixedIntegers(ParSeed);
Implementation::HotConfiguration(pRNG, U);
} else if (StartType == ColdStart) {
// Cold start
HMCpar.MetropolisTest = true;
sRNG.SeedFixedIntegers(SerSeed);
pRNG.SeedFixedIntegers(ParSeed);
Implementation::ColdConfiguration(pRNG, U);
} else if (StartType == TepidStart) {
// Tepid start
HMCpar.MetropolisTest = true;
sRNG.SeedFixedIntegers(SerSeed);
pRNG.SeedFixedIntegers(ParSeed);
Implementation::TepidConfiguration(pRNG, U);
} else if (StartType == CheckpointStart) {
HMCpar.MetropolisTest = true;
// CheckpointRestart
Checkpoint.CheckpointRestore(StartTraj, U, sRNG, pRNG);
}
SmearingPolicy.set_Field(U);
HybridMonteCarlo<IntegratorType> HMC(HMCpar, MDynamics, sRNG, pRNG, U);
HMC.AddObservable(&Checkpoint);
// Run it
HMC.evolve();
}
};
typedef BinaryHmcRunnerTemplate<PeriodicGimplR> BinaryHmcRunner;
typedef BinaryHmcRunnerTemplate<PeriodicGimplF> BinaryHmcRunnerF;
typedef BinaryHmcRunnerTemplate<PeriodicGimplD> BinaryHmcRunnerD;
template <class RepresentationsPolicy>
using BinaryHmcRunnerTemplateHirep =
BinaryHmcRunnerTemplate<PeriodicGimplR, RepresentationsPolicy>;
}
}
#endif

View File

@ -1,5 +1,5 @@
tests: Test_hmc_EODWFRatio Test_hmc_EODWFRatio_Gparity Test_hmc_EOWilsonFermionGauge Test_hmc_EOWilsonRatio Test_hmc_GparityIwasakiGauge Test_hmc_GparityWilsonGauge Test_hmc_IwasakiGauge Test_hmc_RectGauge Test_hmc_WilsonAdjointFermionGauge Test_hmc_WilsonFermionGauge Test_hmc_WilsonGauge Test_hmc_WilsonMixedRepresentationsFermionGauge Test_hmc_WilsonRatio Test_hmc_WilsonTwoIndexSymmetricFermionGauge Test_multishift_sqrt Test_remez Test_rhmc_EOWilson1p1 Test_rhmc_EOWilsonRatio Test_rhmc_Wilson1p1 Test_rhmc_WilsonRatio tests: Test_hmc_EODWFRatio Test_hmc_EODWFRatio_Gparity Test_hmc_EOWilsonFermionGauge Test_hmc_EOWilsonRatio Test_hmc_GparityIwasakiGauge Test_hmc_GparityWilsonGauge Test_hmc_IwasakiGauge Test_hmc_RectGauge Test_hmc_WilsonAdjointFermionGauge Test_hmc_WilsonFermionGauge_Binary Test_hmc_WilsonFermionGauge Test_hmc_WilsonGauge Test_hmc_WilsonMixedRepresentationsFermionGauge Test_hmc_WilsonRatio Test_hmc_WilsonTwoIndexSymmetricFermionGauge Test_multishift_sqrt Test_remez Test_rhmc_EOWilson1p1 Test_rhmc_EOWilsonRatio Test_rhmc_Wilson1p1 Test_rhmc_WilsonRatio
EXTRA_PROGRAMS = Test_hmc_EODWFRatio Test_hmc_EODWFRatio_Gparity Test_hmc_EOWilsonFermionGauge Test_hmc_EOWilsonRatio Test_hmc_GparityIwasakiGauge Test_hmc_GparityWilsonGauge Test_hmc_IwasakiGauge Test_hmc_RectGauge Test_hmc_WilsonAdjointFermionGauge Test_hmc_WilsonFermionGauge Test_hmc_WilsonGauge Test_hmc_WilsonMixedRepresentationsFermionGauge Test_hmc_WilsonRatio Test_hmc_WilsonTwoIndexSymmetricFermionGauge Test_multishift_sqrt Test_remez Test_rhmc_EOWilson1p1 Test_rhmc_EOWilsonRatio Test_rhmc_Wilson1p1 Test_rhmc_WilsonRatio EXTRA_PROGRAMS = Test_hmc_EODWFRatio Test_hmc_EODWFRatio_Gparity Test_hmc_EOWilsonFermionGauge Test_hmc_EOWilsonRatio Test_hmc_GparityIwasakiGauge Test_hmc_GparityWilsonGauge Test_hmc_IwasakiGauge Test_hmc_RectGauge Test_hmc_WilsonAdjointFermionGauge Test_hmc_WilsonFermionGauge_Binary Test_hmc_WilsonFermionGauge Test_hmc_WilsonGauge Test_hmc_WilsonMixedRepresentationsFermionGauge Test_hmc_WilsonRatio Test_hmc_WilsonTwoIndexSymmetricFermionGauge Test_multishift_sqrt Test_remez Test_rhmc_EOWilson1p1 Test_rhmc_EOWilsonRatio Test_rhmc_Wilson1p1 Test_rhmc_WilsonRatio
Test_hmc_EODWFRatio_SOURCES=Test_hmc_EODWFRatio.cc Test_hmc_EODWFRatio_SOURCES=Test_hmc_EODWFRatio.cc
Test_hmc_EODWFRatio_LDADD=-lGrid Test_hmc_EODWFRatio_LDADD=-lGrid
@ -28,6 +28,9 @@ Test_hmc_RectGauge_LDADD=-lGrid
Test_hmc_WilsonAdjointFermionGauge_SOURCES=Test_hmc_WilsonAdjointFermionGauge.cc Test_hmc_WilsonAdjointFermionGauge_SOURCES=Test_hmc_WilsonAdjointFermionGauge.cc
Test_hmc_WilsonAdjointFermionGauge_LDADD=-lGrid Test_hmc_WilsonAdjointFermionGauge_LDADD=-lGrid
Test_hmc_WilsonFermionGauge_Binary_SOURCES=Test_hmc_WilsonFermionGauge_Binary.cc
Test_hmc_WilsonFermionGauge_Binary_LDADD=-lGrid
Test_hmc_WilsonFermionGauge_SOURCES=Test_hmc_WilsonFermionGauge.cc Test_hmc_WilsonFermionGauge_SOURCES=Test_hmc_WilsonFermionGauge.cc
Test_hmc_WilsonFermionGauge_LDADD=-lGrid Test_hmc_WilsonFermionGauge_LDADD=-lGrid

View File

@ -0,0 +1,99 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_hmc_WilsonFermionGauge.cc
Copyright (C) 2015
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
namespace Grid {
namespace QCD {
class HmcRunner : public BinaryHmcRunner {
public:
void BuildTheAction(int argc, char **argv)
{
typedef WilsonImplR ImplPolicy;
typedef WilsonFermionR FermionAction;
typedef typename FermionAction::FermionField FermionField;
UGrid = SpaceTimeGrid::makeFourDimGrid(
GridDefaultLatt(), GridDefaultSimd(Nd, vComplex::Nsimd()),
GridDefaultMpi());
UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
FGrid = UGrid;
FrbGrid = UrbGrid;
// temporarily need a gauge field
LatticeGaugeField U(UGrid);
// Gauge action
WilsonGaugeActionR Waction(5.6);
Real mass = -0.77;
FermionAction FermOp(U, *FGrid, *FrbGrid, mass);
ConjugateGradient<FermionField> CG(1.0e-8, 10000);
TwoFlavourPseudoFermionAction<ImplPolicy> Nf2(FermOp, CG, CG);
// Set smearing (true/false), default: false
Nf2.is_smeared = true;
// Collect actions
ActionLevel<LatticeGaugeField> Level1(1);
Level1.push_back(&Nf2);
ActionLevel<LatticeGaugeField> Level2(4);
Level2.push_back(&Waction);
TheAction.push_back(Level1);
TheAction.push_back(Level2);
Run(argc, argv);
};
};
}
}
int main(int argc, char **argv) {
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
std::cout << GridLogMessage << "Grid is setup to use " << threads
<< " threads" << std::endl;
HmcRunner TheHMC;
TheHMC.BuildTheAction(argc, argv);
}