1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 07:55:35 +00:00

Added support for hmc and binary IO for a general field

This commit is contained in:
Guido Cossu 2016-10-07 13:37:29 +01:00
parent c065e454c3
commit 11b4c80b27
8 changed files with 517 additions and 187 deletions

View File

@ -83,7 +83,9 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#include <Grid/FFT.h>
#include <Grid/qcd/hmc/NerscCheckpointer.h>
#include <Grid/qcd/hmc/BinaryCheckpointer.h>
#include <Grid/qcd/hmc/HmcRunner.h>
#include <Grid/qcd/hmc/GenericHMCrunner.h>

View File

@ -159,7 +159,48 @@ class BinaryIO {
csum=csum+buf[i];
}
}
// Simple classes for precision conversion
template <class fobj, class sobj>
struct BinarySimpleUnmunger {
typedef typename getPrecision<fobj>::real_scalar_type fobj_stype;
typedef typename getPrecision<sobj>::real_scalar_type sobj_stype;
void operator()(sobj &in, fobj &out, uint32_t &csum) {
// take word by word and transform accoding to the status
fobj_stype *out_buffer = (fobj_stype *)&out;
sobj_stype *in_buffer = (sobj_stype *)&in;
size_t fobj_words = sizeof(out) / sizeof(fobj_stype);
size_t sobj_words = sizeof(in) / sizeof(sobj_stype);
assert(fobj_words == sobj_words);
for (unsigned int word = 0; word < sobj_words; word++)
out_buffer[word] = in_buffer[word]; // type conversion on the fly
BinaryIO::Uint32Checksum((uint32_t *)&out, sizeof(out), csum);
}
};
template <class fobj, class sobj>
struct BinarySimpleMunger {
typedef typename getPrecision<fobj>::real_scalar_type fobj_stype;
typedef typename getPrecision<sobj>::real_scalar_type sobj_stype;
void operator()(fobj &in, sobj &out, uint32_t &csum) {
// take word by word and transform accoding to the status
fobj_stype *in_buffer = (fobj_stype *)&in;
sobj_stype *out_buffer = (sobj_stype *)&out;
size_t fobj_words = sizeof(in) / sizeof(fobj_stype);
size_t sobj_words = sizeof(out) / sizeof(sobj_stype);
assert(fobj_words == sobj_words);
for (unsigned int word = 0; word < sobj_words; word++)
out_buffer[word] = in_buffer[word]; // type conversion on the fly
BinaryIO::Uint32Checksum((uint32_t *)&in, sizeof(in), csum);
}
};
template<class vobj,class fobj,class munger>
static inline uint32_t readObjectSerial(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string &format)
{
@ -272,58 +313,65 @@ class BinaryIO {
return csum;
}
static inline uint32_t writeRNGSerial(GridSerialRNG &serial,GridParallelRNG &parallel,std::string file,int offset)
{
static inline uint32_t writeRNGSerial(GridSerialRNG &serial,
GridParallelRNG &parallel,
std::string file, int offset) {
typedef typename GridSerialRNG::RngStateType RngStateType;
const int RngStateCount = GridSerialRNG::RngStateCount;
GridBase *grid = parallel._grid;
int gsites = grid->_gsites;
//////////////////////////////////////////////////
// Serialise through node zero
//////////////////////////////////////////////////
std::cout<< GridLogMessage<< "Serial RNG write I/O "<< file<<std::endl;
std::ofstream fout;
if ( grid->IsBoss() ) {
fout.open(file,std::ios::binary|std::ios::out|std::ios::in);
if (grid->IsBoss()) {
fout.open(file, std::ios::binary | std::ios::out | std::ios::in);
if (!fout.is_open()) {
std::cout << GridLogMessage << "writeRNGSerial: Error opening file "
<< file << std::endl;
exit(0);
}
fout.seekp(offset);
}
uint32_t csum=0;
std::cout << GridLogMessage << "Serial RNG write I/O " << file << std::endl;
uint32_t csum = 0;
std::vector<RngStateType> saved(RngStateCount);
int bytes = sizeof(RngStateType)*saved.size();
int bytes = sizeof(RngStateType) * saved.size();
std::vector<int> gcoor;
for(int gidx=0;gidx<gsites;gidx++){
for (int gidx = 0; gidx < gsites; gidx++) {
int rank, o_idx, i_idx;
grid->GlobalIndexToGlobalCoor(gidx, gcoor);
grid->GlobalCoorToRankIndex(rank, o_idx, i_idx, gcoor);
int l_idx = parallel.generator_idx(o_idx, i_idx);
int rank,o_idx,i_idx;
grid->GlobalIndexToGlobalCoor(gidx,gcoor);
grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
int l_idx=parallel.generator_idx(o_idx,i_idx);
if( rank == grid->ThisRank() ){
// std::cout << "rank" << rank<<" Getting state for index "<<l_idx<<std::endl;
parallel.GetState(saved,l_idx);
if (rank == grid->ThisRank()) {
// std::cout << "rank" << rank<<" Getting state for index
// "<<l_idx<<std::endl;
parallel.GetState(saved, l_idx);
}
grid->Broadcast(rank,(void *)&saved[0],bytes);
grid->Broadcast(rank, (void *)&saved[0], bytes);
if ( grid->IsBoss() ) {
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
fout.write((char *)&saved[0],bytes);
if (grid->IsBoss()) {
Uint32Checksum((uint32_t *)&saved[0], bytes, csum);
fout.write((char *)&saved[0], bytes);
}
}
if ( grid->IsBoss() ) {
serial.GetState(saved,0);
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
fout.write((char *)&saved[0],bytes);
if (grid->IsBoss()) {
serial.GetState(saved, 0);
Uint32Checksum((uint32_t *)&saved[0], bytes, csum);
fout.write((char *)&saved[0], bytes);
}
grid->Broadcast(0,(void *)&csum,sizeof(csum));
grid->Broadcast(0, (void *)&csum, sizeof(csum));
if (grid->IsBoss())
fout.close();
return csum;
}
static inline uint32_t readRNGSerial(GridSerialRNG &serial,GridParallelRNG &parallel,std::string file,int offset)
@ -528,30 +576,35 @@ class BinaryIO {
//////////////////////////////////////////////////////////
// Parallel writer
//////////////////////////////////////////////////////////
template<class vobj,class fobj,class munger>
static inline uint32_t writeObjectParallel(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string & format)
{
template <class vobj, class fobj, class munger>
static inline uint32_t writeObjectParallel(Lattice<vobj> &Umu,
std::string file, munger munge,
int offset,
const std::string &format) {
typedef typename vobj::scalar_object sobj;
GridBase *grid = Umu._grid;
int ieee32big = (format == std::string("IEEE32BIG"));
int ieee32 = (format == std::string("IEEE32"));
int ieee32 = (format == std::string("IEEE32"));
int ieee64big = (format == std::string("IEEE64BIG"));
int ieee64 = (format == std::string("IEEE64"));
int ieee64 = (format == std::string("IEEE64"));
if(!(ieee32big || ieee32 || ieee64big || ieee64)){
std::cout << GridLogError << "Unrecognized file format " << format << std::endl;
std::cout << GridLogError << "Allowed: IEEE32BIG | IEEE32 | IEEE64BIG | IEEE64" << std::endl;
if (!(ieee32big || ieee32 || ieee64big || ieee64)) {
std::cout << GridLogError << "Unrecognized file format " << format
<< std::endl;
std::cout << GridLogError
<< "Allowed: IEEE32BIG | IEEE32 | IEEE64BIG | IEEE64"
<< std::endl;
exit(0);
}
int nd = grid->_ndimension;
for(int d=0;d<nd;d++){
for (int d = 0; d < nd; d++) {
assert(grid->CheckerBoarded(d) == 0);
}
std::vector<int> parallel(nd,1);
std::vector<int> ioproc (nd);
std::vector<int> parallel(nd, 1);
std::vector<int> ioproc(nd);
std::vector<int> start(nd);
std::vector<int> range(nd);
@ -559,39 +612,39 @@ class BinaryIO {
int IOnode = 1;
for(int d=0;d<grid->_ndimension;d++) {
if ( d!= grid->_ndimension-1 ) parallel[d] = 0;
for (int d = 0; d < grid->_ndimension; d++) {
if (d != grid->_ndimension - 1) parallel[d] = 0;
if (parallel[d]) {
range[d] = grid->_ldimensions[d];
start[d] = grid->_processor_coor[d]*range[d];
ioproc[d]= grid->_processor_coor[d];
range[d] = grid->_ldimensions[d];
start[d] = grid->_processor_coor[d] * range[d];
ioproc[d] = grid->_processor_coor[d];
} else {
range[d] = grid->_gdimensions[d];
start[d] = 0;
ioproc[d]= 0;
range[d] = grid->_gdimensions[d];
start[d] = 0;
ioproc[d] = 0;
if ( grid->_processor_coor[d] != 0 ) IOnode = 0;
if (grid->_processor_coor[d] != 0) IOnode = 0;
}
slice_vol = slice_vol * range[d];
}
{
uint32_t tmp = IOnode;
grid->GlobalSum(tmp);
std::cout<< GridLogMessage<< "Parallel write I/O from "<< file << " with " <<tmp<< " IOnodes for subslice ";
for(int d=0;d<grid->_ndimension;d++){
std::cout<< range[d];
if( d< grid->_ndimension-1 )
std::cout<< " x ";
std::cout << GridLogMessage << "Parallel write I/O from " << file
<< " with " << tmp << " IOnodes for subslice ";
for (int d = 0; d < grid->_ndimension; d++) {
std::cout << range[d];
if (d < grid->_ndimension - 1) std::cout << " x ";
}
std::cout << std::endl;
}
GridStopWatch timer; timer.Start();
uint64_t bytes=0;
GridStopWatch timer;
timer.Start();
uint64_t bytes = 0;
int myrank = grid->ThisRank();
int iorank = grid->RankFromProcessorCoor(ioproc);
@ -601,71 +654,79 @@ class BinaryIO {
// Ideally one reader/writer per xy plane and read these contiguously
// with comms from nominated I/O nodes.
std::ofstream fout;
if ( IOnode ) fout.open(file,std::ios::binary|std::ios::in|std::ios::out);
if (IOnode){
fout.open(file, std::ios::binary | std::ios::in | std::ios::out);
if (!fout.is_open()) {
std::cout << GridLogMessage << "writeObjectParallel: Error opening file " << file
<< std::endl;
exit(0);
}
}
//////////////////////////////////////////////////////////
// Find the location of each site and send to primary node
// Take loop order from Chroma; defines loop order now that NERSC doc no longer
// Take loop order from Chroma; defines loop order now that NERSC doc no
// longer
// available (how short sighted is that?)
//////////////////////////////////////////////////////////
uint32_t csum=0;
uint32_t csum = 0;
fobj fileObj;
static sobj siteObj; // static for SHMEM target; otherwise dynamic allocate with AlignedAllocator
static sobj siteObj; // static for SHMEM target; otherwise dynamic allocate
// with AlignedAllocator
// should aggregate a whole chunk and then write.
// need to implement these loops in Nd independent way with a lexico conversion
for(int tlex=0;tlex<slice_vol;tlex++){
std::vector<int> tsite(nd); // temporary mixed up site
// need to implement these loops in Nd independent way with a lexico
// conversion
for (int tlex = 0; tlex < slice_vol; tlex++) {
std::vector<int> tsite(nd); // temporary mixed up site
std::vector<int> gsite(nd);
std::vector<int> lsite(nd);
std::vector<int> iosite(nd);
Lexicographic::CoorFromIndex(tsite,tlex,range);
Lexicographic::CoorFromIndex(tsite, tlex, range);
for(int d=0;d<nd;d++){
lsite[d] = tsite[d]%grid->_ldimensions[d]; // local site
gsite[d] = tsite[d]+start[d]; // global site
for (int d = 0; d < nd; d++) {
lsite[d] = tsite[d] % grid->_ldimensions[d]; // local site
gsite[d] = tsite[d] + start[d]; // global site
}
/////////////////////////
// Get the rank of owner of data
/////////////////////////
int rank, o_idx,i_idx, g_idx;
grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gsite);
grid->GlobalCoorToGlobalIndex(gsite,g_idx);
int rank, o_idx, i_idx, g_idx;
grid->GlobalCoorToRankIndex(rank, o_idx, i_idx, gsite);
grid->GlobalCoorToGlobalIndex(gsite, g_idx);
////////////////////////////////
// iorank writes from the seek
////////////////////////////////
// Owner of data peeks it
peekLocalSite(siteObj,Umu,lsite);
peekLocalSite(siteObj, Umu, lsite);
// Pair of nodes may need to do pt2pt send
if ( rank != iorank ) { // comms is necessary
if ( (myrank == rank) || (myrank==iorank) ) { // and we have to do it
// Send to IOrank
grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,rank,iorank,sizeof(siteObj));
}
if (rank != iorank) { // comms is necessary
if ((myrank == rank) || (myrank == iorank)) { // and we have to do it
// Send to IOrank
grid->SendRecvPacket((void *)&siteObj, (void *)&siteObj, rank, iorank,
sizeof(siteObj));
}
}
grid->Barrier(); // necessary?
grid->Barrier(); // necessary?
if (myrank == iorank) {
munge(siteObj,fileObj,csum);
munge(siteObj, fileObj, csum);
if(ieee32big) htobe32_v((void *)&fileObj,sizeof(fileObj));
if(ieee32) htole32_v((void *)&fileObj,sizeof(fileObj));
if(ieee64big) htobe64_v((void *)&fileObj,sizeof(fileObj));
if(ieee64) htole64_v((void *)&fileObj,sizeof(fileObj));
fout.seekp(offset+g_idx*sizeof(fileObj));
fout.write((char *)&fileObj,sizeof(fileObj));
bytes+=sizeof(fileObj);
if (ieee32big) htobe32_v((void *)&fileObj, sizeof(fileObj));
if (ieee32) htole32_v((void *)&fileObj, sizeof(fileObj));
if (ieee64big) htobe64_v((void *)&fileObj, sizeof(fileObj));
if (ieee64) htole64_v((void *)&fileObj, sizeof(fileObj));
fout.seekp(offset + g_idx * sizeof(fileObj));
fout.write((char *)&fileObj, sizeof(fileObj));
bytes += sizeof(fileObj);
}
}
@ -673,14 +734,19 @@ class BinaryIO {
grid->GlobalSum(bytes);
timer.Stop();
std::cout<<GridLogPerformance<<"writeObjectParallel: wrote "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
std::cout << GridLogPerformance << "writeObjectParallel: wrote " << bytes
<< " bytes in " << timer.Elapsed() << " "
<< (double)bytes / timer.useconds() << " MB/s " << std::endl;
grid->Barrier(); // necessary?
if (IOnode)
fout.close();
return csum;
}
};
}
#endif

View File

@ -45,7 +45,7 @@ namespace QCD {
static const int Zm = 6;
static const int Tm = 7;
static const int Nc=3;
static const int Nc=2;
static const int Ns=4;
static const int Nd=4;
static const int Nhs=2; // half spinor

View File

@ -75,6 +75,8 @@ public:
}
}
///////////////////////////////////////////////////////////
// Move these to another class
// HMC auxiliary functions
static inline void generate_momenta(Field& P, GridParallelRNG& pRNG){
// specific for SU gauge fields
@ -108,6 +110,17 @@ public:
return Hsum.real();
}
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
SU<Nc>::HotConfiguration(pRNG, U);
}
static inline void TepidConfiguration(GridParallelRNG &pRNG, Field &U) {
SU<Nc>::TepidConfiguration(pRNG, U);
}
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
SU<Nc>::ColdConfiguration(pRNG, U);
}
};
// Composition with smeared link, bc's etc.. probably need multiple inheritance

View File

@ -36,93 +36,36 @@ directory
namespace Grid {
namespace QCD {
template <class fobj, class sobj>
struct BinarySimpleUnmunger {
typedef typename getPrecision<fobj>::real_scalar_type fobj_stype;
// Simple checkpointer, only binary file
template <class Impl>
class BinaryHmcCheckpointer : public HmcObservable<typename Impl::Field> {
private:
std::string configStem;
std::string rngStem;
int SaveInterval;
std::string format;
public:
INHERIT_FIELD_TYPES(Impl); // Gets the Field type, a Lattice object
// Extract types from the Field
typedef typename Field::vector_object vobj;
typedef typename vobj::scalar_object sobj;
typedef typename getPrecision<sobj>::real_scalar_type sobj_stype;
typedef typename sobj::DoublePrecision sobj_double;
void operator()(sobj &in, fobj &out, uint32_t &csum) {
// take word by word and transform accoding to the status
fobj_stype* out_buffer = (fobj_stype*)&out;
sobj_stype* in_buffer = (sobj_stype*)&in;
size_t fobj_words = sizeof(out)/sizeof(fobj_stype);
size_t sobj_words = sizeof(in)/sizeof(sobj_stype);
assert(fobj_words == sobj_words);
BinaryHmcCheckpointer(std::string cf, std::string rn, int savemodulo,
const std::string &f)
: configStem(cf), rngStem(rn), SaveInterval(savemodulo), format(f){};
for (unsigned int word = 0; word < sobj_words; word++)
out_buffer[word] = in_buffer[word]; // type conversion on the fly
void truncate(std::string file) {
std::ofstream fout(file, std::ios::out);
fout.close();
}
BinaryIO::Uint32Checksum((uint32_t*)&out,sizeof(out),csum);
};
template <class fobj, class sobj>
struct BinarySimpleMunger {
typedef typename getPrecision<fobj>::real_scalar_type fobj_stype;
typedef typename getPrecision<sobj>::real_scalar_type sobj_stype;
void operator()(sobj &out, fobj &in, uint32_t &csum) {
// take word by word and transform accoding to the status
fobj_stype* in_buffer = (fobj_stype*)&in;
sobj_stype* out_buffer = (sobj_stype*)&out;
size_t fobj_words = sizeof(in)/sizeof(fobj_stype);
size_t sobj_words = sizeof(out)/sizeof(sobj_stype);
assert(fobj_words == sobj_words);
for (unsigned int word = 0; word < sobj_words; word++)
out_buffer[word] = in_buffer[word]; // type conversion on the fly
BinaryIO::Uint32Checksum((uint32_t*)&in,sizeof(in),csum);
};
// Only for the main field in the hmc
template <class Impl>
class BinaryHmcCheckpointer : public HmcObservable<typename Impl::Field> {
private:
std::string configStem;
std::string rngStem;
int SaveInterval;
public:
INHERIT_FIELD_TYPES(Impl); // The Field is a Lattice object
typedef typename Field::vector_object vobj;
typedef typename vobj::scalar_object sobj;
typedef typename getPrecision<sobj>::real_scalar_type sobj_stype;
typedef typename sobj::DoublePrecision sobj_double;
BinaryHmcCheckpointer(std::string cf, std::string rn, int savemodulo, const std::string &format)
: configStem(cf),
rngStem(rn),
SaveInterval(savemodulo){};
void TrajectoryComplete(int traj, Field &U, GridSerialRNG &sRNG,
GridParallelRNG &pRNG) {
if ((traj % SaveInterval) == 0) {
std::string rng;
{
std::ostringstream os;
os << rngStem << "." << traj;
rng = os.str();
}
std::string config;
{
std::ostringstream os;
os << configStem << "." << traj;
config = os.str();
}
// Save always in double precision
BinarySimpleUnmunger<sobj_double, sobj> munge;
BinaryIO::writeRNGSerial(sRNG, pRNG, rng, 0);
BinaryIO::writeObjectParallel<vobj, sobj_double>(U, config, munge, 0, format);
}
};
void CheckpointRestore(int traj, Field &U, GridSerialRNG &sRNG,
GridParallelRNG &pRNG) {
void TrajectoryComplete(int traj, Field &U, GridSerialRNG &sRNG,
GridParallelRNG &pRNG) {
if ((traj % SaveInterval) == 0) {
std::string rng;
{
std::ostringstream os;
@ -136,11 +79,42 @@ struct BinarySimpleMunger {
config = os.str();
}
BinarySimpleMunger<sobj_double, sobj> munge;
BinaryIO::readRNGSerial(sRNG, pRNG, rng, header);
BinaryIO::readObjectParallel<vobj, sobj_double>(U, config, munge, 0, format);
};
BinaryIO::BinarySimpleUnmunger<sobj_double, sobj> munge;
truncate(rng);
BinaryIO::writeRNGSerial(sRNG, pRNG, rng, 0);
truncate(config);
uint32_t csum = BinaryIO::writeObjectParallel<vobj, sobj_double>(
U, config, munge, 0, format);
std::cout << GridLogMessage << "Written Binary Configuration " << config
<< " checksum " << std::hex << csum << std::dec << std::endl;
}
};
void CheckpointRestore(int traj, Field &U, GridSerialRNG &sRNG,
GridParallelRNG &pRNG) {
std::string rng;
{
std::ostringstream os;
os << rngStem << "." << traj;
rng = os.str();
}
std::string config;
{
std::ostringstream os;
os << configStem << "." << traj;
config = os.str();
}
BinaryIO::BinarySimpleMunger<sobj_double, sobj> munge;
BinaryIO::readRNGSerial(sRNG, pRNG, rng, 0);
uint32_t csum = BinaryIO::readObjectParallel<vobj, sobj_double>(
U, config, munge, 0, format);
std::cout << GridLogMessage << "Read Binary Configuration " << config
<< " checksum " << std::hex << csum << std::dec << std::endl;
};
};
}
}
#endif

View File

@ -0,0 +1,173 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/hmc/GenericHmcRunner.h
Copyright (C) 2015
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GENERIC_HMC_RUNNER
#define GENERIC_HMC_RUNNER
namespace Grid {
namespace QCD {
// Virtual Class for HMC specific for gauge theories
// implement a specific theory by defining the BuildTheAction
template <class Implementation, class RepresentationsPolicy = NoHirep>
class BinaryHmcRunnerTemplate {
public:
INHERIT_FIELD_TYPES(Implementation);
enum StartType_t { ColdStart, HotStart, TepidStart, CheckpointStart };
ActionSet<Field, RepresentationsPolicy> TheAction;
// Add here a vector of HmcObservable
// that can be injected from outside
GridCartesian *UGrid;
GridCartesian *FGrid;
GridRedBlackCartesian *UrbGrid;
GridRedBlackCartesian *FrbGrid;
virtual void BuildTheAction(int argc, char **argv) = 0; // necessary?
void Run(int argc, char **argv) {
StartType_t StartType = HotStart;
std::string arg;
if (GridCmdOptionExists(argv, argv + argc, "--StartType")) {
arg = GridCmdOptionPayload(argv, argv + argc, "--StartType");
if (arg == "HotStart") {
StartType = HotStart;
} else if (arg == "ColdStart") {
StartType = ColdStart;
} else if (arg == "TepidStart") {
StartType = TepidStart;
} else if (arg == "CheckpointStart") {
StartType = CheckpointStart;
} else {
std::cout << GridLogError << "Unrecognized option in --StartType\n";
std::cout
<< GridLogError
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
assert(0);
}
}
int StartTraj = 0;
if (GridCmdOptionExists(argv, argv + argc, "--StartTrajectory")) {
arg = GridCmdOptionPayload(argv, argv + argc, "--StartTrajectory");
std::vector<int> ivec(0);
GridCmdOptionIntVector(arg, ivec);
StartTraj = ivec[0];
}
int NumTraj = 1;
if (GridCmdOptionExists(argv, argv + argc, "--Trajectories")) {
arg = GridCmdOptionPayload(argv, argv + argc, "--Trajectories");
std::vector<int> ivec(0);
GridCmdOptionIntVector(arg, ivec);
NumTraj = ivec[0];
}
int NumThermalizations = 10;
if (GridCmdOptionExists(argv, argv + argc, "--Thermalizations")) {
arg = GridCmdOptionPayload(argv, argv + argc, "--Thermalizations");
std::vector<int> ivec(0);
GridCmdOptionIntVector(arg, ivec);
NumThermalizations = ivec[0];
}
GridSerialRNG sRNG;
GridParallelRNG pRNG(UGrid);
Field U(UGrid);
std::vector<int> SerSeed({1, 2, 3, 4, 5});
std::vector<int> ParSeed({6, 7, 8, 9, 10});
NoSmearing<Implementation> SmearingPolicy;
typedef MinimumNorm2<Implementation, NoSmearing<Implementation>,
RepresentationsPolicy>
IntegratorType; // change here to change the algorithm
IntegratorParameters MDpar(20, 1.0);
IntegratorType MDynamics(UGrid, MDpar, TheAction, SmearingPolicy);
// Checkpoint strategy
int SaveInterval = 1;
std::string format = std::string("IEEE64BIG");
std::string conf_prefix = std::string("ckpoint_lat");
std::string rng_prefix = std::string("ckpoint_rng");
BinaryHmcCheckpointer<Implementation> Checkpoint(conf_prefix, rng_prefix,
SaveInterval, format);
HMCparameters HMCpar;
HMCpar.StartTrajectory = StartTraj;
HMCpar.Trajectories = NumTraj;
HMCpar.NoMetropolisUntil = NumThermalizations;
if (StartType == HotStart) {
// Hot start
HMCpar.MetropolisTest = true;
sRNG.SeedFixedIntegers(SerSeed);
pRNG.SeedFixedIntegers(ParSeed);
Implementation::HotConfiguration(pRNG, U);
} else if (StartType == ColdStart) {
// Cold start
HMCpar.MetropolisTest = true;
sRNG.SeedFixedIntegers(SerSeed);
pRNG.SeedFixedIntegers(ParSeed);
Implementation::ColdConfiguration(pRNG, U);
} else if (StartType == TepidStart) {
// Tepid start
HMCpar.MetropolisTest = true;
sRNG.SeedFixedIntegers(SerSeed);
pRNG.SeedFixedIntegers(ParSeed);
Implementation::TepidConfiguration(pRNG, U);
} else if (StartType == CheckpointStart) {
HMCpar.MetropolisTest = true;
// CheckpointRestart
Checkpoint.CheckpointRestore(StartTraj, U, sRNG, pRNG);
}
SmearingPolicy.set_Field(U);
HybridMonteCarlo<IntegratorType> HMC(HMCpar, MDynamics, sRNG, pRNG, U);
HMC.AddObservable(&Checkpoint);
// Run it
HMC.evolve();
}
};
typedef BinaryHmcRunnerTemplate<PeriodicGimplR> BinaryHmcRunner;
typedef BinaryHmcRunnerTemplate<PeriodicGimplF> BinaryHmcRunnerF;
typedef BinaryHmcRunnerTemplate<PeriodicGimplD> BinaryHmcRunnerD;
template <class RepresentationsPolicy>
using BinaryHmcRunnerTemplateHirep =
BinaryHmcRunnerTemplate<PeriodicGimplR, RepresentationsPolicy>;
}
}
#endif

View File

@ -1,5 +1,5 @@
tests: Test_hmc_EODWFRatio Test_hmc_EODWFRatio_Gparity Test_hmc_EOWilsonFermionGauge Test_hmc_EOWilsonRatio Test_hmc_GparityIwasakiGauge Test_hmc_GparityWilsonGauge Test_hmc_IwasakiGauge Test_hmc_RectGauge Test_hmc_WilsonAdjointFermionGauge Test_hmc_WilsonFermionGauge Test_hmc_WilsonGauge Test_hmc_WilsonMixedRepresentationsFermionGauge Test_hmc_WilsonRatio Test_hmc_WilsonTwoIndexSymmetricFermionGauge Test_multishift_sqrt Test_remez Test_rhmc_EOWilson1p1 Test_rhmc_EOWilsonRatio Test_rhmc_Wilson1p1 Test_rhmc_WilsonRatio
EXTRA_PROGRAMS = Test_hmc_EODWFRatio Test_hmc_EODWFRatio_Gparity Test_hmc_EOWilsonFermionGauge Test_hmc_EOWilsonRatio Test_hmc_GparityIwasakiGauge Test_hmc_GparityWilsonGauge Test_hmc_IwasakiGauge Test_hmc_RectGauge Test_hmc_WilsonAdjointFermionGauge Test_hmc_WilsonFermionGauge Test_hmc_WilsonGauge Test_hmc_WilsonMixedRepresentationsFermionGauge Test_hmc_WilsonRatio Test_hmc_WilsonTwoIndexSymmetricFermionGauge Test_multishift_sqrt Test_remez Test_rhmc_EOWilson1p1 Test_rhmc_EOWilsonRatio Test_rhmc_Wilson1p1 Test_rhmc_WilsonRatio
tests: Test_hmc_EODWFRatio Test_hmc_EODWFRatio_Gparity Test_hmc_EOWilsonFermionGauge Test_hmc_EOWilsonRatio Test_hmc_GparityIwasakiGauge Test_hmc_GparityWilsonGauge Test_hmc_IwasakiGauge Test_hmc_RectGauge Test_hmc_WilsonAdjointFermionGauge Test_hmc_WilsonFermionGauge_Binary Test_hmc_WilsonFermionGauge Test_hmc_WilsonGauge Test_hmc_WilsonMixedRepresentationsFermionGauge Test_hmc_WilsonRatio Test_hmc_WilsonTwoIndexSymmetricFermionGauge Test_multishift_sqrt Test_remez Test_rhmc_EOWilson1p1 Test_rhmc_EOWilsonRatio Test_rhmc_Wilson1p1 Test_rhmc_WilsonRatio
EXTRA_PROGRAMS = Test_hmc_EODWFRatio Test_hmc_EODWFRatio_Gparity Test_hmc_EOWilsonFermionGauge Test_hmc_EOWilsonRatio Test_hmc_GparityIwasakiGauge Test_hmc_GparityWilsonGauge Test_hmc_IwasakiGauge Test_hmc_RectGauge Test_hmc_WilsonAdjointFermionGauge Test_hmc_WilsonFermionGauge_Binary Test_hmc_WilsonFermionGauge Test_hmc_WilsonGauge Test_hmc_WilsonMixedRepresentationsFermionGauge Test_hmc_WilsonRatio Test_hmc_WilsonTwoIndexSymmetricFermionGauge Test_multishift_sqrt Test_remez Test_rhmc_EOWilson1p1 Test_rhmc_EOWilsonRatio Test_rhmc_Wilson1p1 Test_rhmc_WilsonRatio
Test_hmc_EODWFRatio_SOURCES=Test_hmc_EODWFRatio.cc
Test_hmc_EODWFRatio_LDADD=-lGrid
@ -28,6 +28,9 @@ Test_hmc_RectGauge_LDADD=-lGrid
Test_hmc_WilsonAdjointFermionGauge_SOURCES=Test_hmc_WilsonAdjointFermionGauge.cc
Test_hmc_WilsonAdjointFermionGauge_LDADD=-lGrid
Test_hmc_WilsonFermionGauge_Binary_SOURCES=Test_hmc_WilsonFermionGauge_Binary.cc
Test_hmc_WilsonFermionGauge_Binary_LDADD=-lGrid
Test_hmc_WilsonFermionGauge_SOURCES=Test_hmc_WilsonFermionGauge.cc
Test_hmc_WilsonFermionGauge_LDADD=-lGrid

View File

@ -0,0 +1,99 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_hmc_WilsonFermionGauge.cc
Copyright (C) 2015
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
namespace Grid {
namespace QCD {
class HmcRunner : public BinaryHmcRunner {
public:
void BuildTheAction(int argc, char **argv)
{
typedef WilsonImplR ImplPolicy;
typedef WilsonFermionR FermionAction;
typedef typename FermionAction::FermionField FermionField;
UGrid = SpaceTimeGrid::makeFourDimGrid(
GridDefaultLatt(), GridDefaultSimd(Nd, vComplex::Nsimd()),
GridDefaultMpi());
UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
FGrid = UGrid;
FrbGrid = UrbGrid;
// temporarily need a gauge field
LatticeGaugeField U(UGrid);
// Gauge action
WilsonGaugeActionR Waction(5.6);
Real mass = -0.77;
FermionAction FermOp(U, *FGrid, *FrbGrid, mass);
ConjugateGradient<FermionField> CG(1.0e-8, 10000);
TwoFlavourPseudoFermionAction<ImplPolicy> Nf2(FermOp, CG, CG);
// Set smearing (true/false), default: false
Nf2.is_smeared = true;
// Collect actions
ActionLevel<LatticeGaugeField> Level1(1);
Level1.push_back(&Nf2);
ActionLevel<LatticeGaugeField> Level2(4);
Level2.push_back(&Waction);
TheAction.push_back(Level1);
TheAction.push_back(Level2);
Run(argc, argv);
};
};
}
}
int main(int argc, char **argv) {
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
std::cout << GridLogMessage << "Grid is setup to use " << threads
<< " threads" << std::endl;
HmcRunner TheHMC;
TheHMC.BuildTheAction(argc, argv);
}