1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-04-04 19:25:56 +01:00

Some fix for the GenericHMCrunner

This commit is contained in:
Guido Cossu 2016-10-10 09:43:05 +01:00
parent 6eb873dd96
commit 26b9740d53
5 changed files with 80 additions and 60 deletions

View File

@ -38,51 +38,56 @@ namespace Grid {
//////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////
// Deterministic Reduction operations // Deterministic Reduction operations
//////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////
template<class vobj> inline RealD norm2(const Lattice<vobj> &arg){ template <class vobj>
ComplexD nrm = innerProduct(arg,arg); inline RealD norm2(const Lattice<vobj> &arg) {
return std::real(nrm); ComplexD nrm = innerProduct(arg, arg);
return std::real(nrm);
}
template <class vobj>
inline ComplexD innerProduct(const Lattice<vobj> &left,
const Lattice<vobj> &right) {
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
scalar_type nrm;
GridBase *grid = left._grid;
std::vector<vector_type, alignedAllocator<vector_type> > sumarray(
grid->SumArraySize());
for (int i = 0; i < grid->SumArraySize(); i++) {
sumarray[i] = zero;
} }
template<class vobj> PARALLEL_FOR_LOOP
inline ComplexD innerProduct(const Lattice<vobj> &left,const Lattice<vobj> &right) for (int thr = 0; thr < grid->SumArraySize(); thr++) {
{ int nwork, mywork, myoff;
typedef typename vobj::scalar_type scalar_type; GridThread::GetWork(left._grid->oSites(), thr, mywork, myoff);
typedef typename vobj::vector_type vector_type;
scalar_type nrm;
GridBase *grid = left._grid; decltype(innerProduct(left._odata[0], right._odata[0])) vnrm =
zero; // private to thread; sub summation
std::vector<vector_type,alignedAllocator<vector_type> > sumarray(grid->SumArraySize()); for (int ss = myoff; ss < mywork + myoff; ss++) {
for(int i=0;i<grid->SumArraySize();i++){ vnrm = vnrm + innerProduct(left._odata[ss], right._odata[ss]);
sumarray[i]=zero;
}
PARALLEL_FOR_LOOP
for(int thr=0;thr<grid->SumArraySize();thr++){
int nwork, mywork, myoff;
GridThread::GetWork(left._grid->oSites(),thr,mywork,myoff);
decltype(innerProduct(left._odata[0],right._odata[0])) vnrm=zero; // private to thread; sub summation
for(int ss=myoff;ss<mywork+myoff; ss++){
vnrm = vnrm + innerProduct(left._odata[ss],right._odata[ss]);
}
sumarray[thr]=TensorRemove(vnrm) ;
}
vector_type vvnrm; vvnrm=zero; // sum across threads
for(int i=0;i<grid->SumArraySize();i++){
vvnrm = vvnrm+sumarray[i];
}
nrm = Reduce(vvnrm);// sum across simd
right._grid->GlobalSum(nrm);
return nrm;
} }
sumarray[thr] = TensorRemove(vnrm);
}
template<class Op,class T1> vector_type vvnrm;
inline auto sum(const LatticeUnaryExpression<Op,T1> & expr) vvnrm = zero; // sum across threads
->typename decltype(expr.first.func(eval(0,std::get<0>(expr.second))))::scalar_object for (int i = 0; i < grid->SumArraySize(); i++) {
{ vvnrm = vvnrm + sumarray[i];
return sum(closure(expr)); }
nrm = Reduce(vvnrm); // sum across simd
right._grid->GlobalSum(nrm);
return nrm;
}
template <class Op, class T1>
inline auto sum(const LatticeUnaryExpression<Op, T1> &expr) ->
typename decltype(
expr.first.func(eval(0, std::get<0>(expr.second))))::scalar_object {
return sum(closure(expr));
} }
template<class Op,class T1,class T2> template<class Op,class T1,class T2>
@ -96,9 +101,9 @@ PARALLEL_FOR_LOOP
template<class Op,class T1,class T2,class T3> template<class Op,class T1,class T2,class T3>
inline auto sum(const LatticeTrinaryExpression<Op,T1,T2,T3> & expr) inline auto sum(const LatticeTrinaryExpression<Op,T1,T2,T3> & expr)
->typename decltype(expr.first.func(eval(0,std::get<0>(expr.second)), ->typename decltype(expr.first.func(eval(0,std::get<0>(expr.second)),
eval(0,std::get<1>(expr.second)), eval(0,std::get<1>(expr.second)),
eval(0,std::get<2>(expr.second)) eval(0,std::get<2>(expr.second))
))::scalar_object ))::scalar_object
{ {
return sum(closure(expr)); return sum(closure(expr));
} }
@ -111,24 +116,24 @@ PARALLEL_FOR_LOOP
std::vector<vobj,alignedAllocator<vobj> > sumarray(grid->SumArraySize()); std::vector<vobj,alignedAllocator<vobj> > sumarray(grid->SumArraySize());
for(int i=0;i<grid->SumArraySize();i++){ for(int i=0;i<grid->SumArraySize();i++){
sumarray[i]=zero; sumarray[i]=zero;
} }
PARALLEL_FOR_LOOP PARALLEL_FOR_LOOP
for(int thr=0;thr<grid->SumArraySize();thr++){ for(int thr=0;thr<grid->SumArraySize();thr++){
int nwork, mywork, myoff; int nwork, mywork, myoff;
GridThread::GetWork(grid->oSites(),thr,mywork,myoff); GridThread::GetWork(grid->oSites(),thr,mywork,myoff);
vobj vvsum=zero; vobj vvsum=zero;
for(int ss=myoff;ss<mywork+myoff; ss++){ for(int ss=myoff;ss<mywork+myoff; ss++){
vvsum = vvsum + arg._odata[ss]; vvsum = vvsum + arg._odata[ss];
} }
sumarray[thr]=vvsum; sumarray[thr]=vvsum;
} }
vobj vsum=zero; // sum across threads vobj vsum=zero; // sum across threads
for(int i=0;i<grid->SumArraySize();i++){ for(int i=0;i<grid->SumArraySize();i++){
vsum = vsum+sumarray[i]; vsum = vsum+sumarray[i];
} }
typedef typename vobj::scalar_object sobj; typedef typename vobj::scalar_object sobj;

View File

@ -47,7 +47,7 @@ class BinaryHmcRunnerTemplate {
ActionSet<Field, RepresentationsPolicy> TheAction; ActionSet<Field, RepresentationsPolicy> TheAction;
// Add here a vector of HmcObservable // Add here a vector of HmcObservable
// that can be injected from outside // that can be injected from outside
std::vector< HmcObservable<typename Implementation::Field> > ObservablesList; std::vector< HmcObservable<typename Implementation::Field>* > ObservablesList;
GridCartesian *UGrid; GridCartesian *UGrid;
@ -119,12 +119,13 @@ class BinaryHmcRunnerTemplate {
IntegratorParameters MDpar(20, 1.0); IntegratorParameters MDpar(20, 1.0);
IntegratorType MDynamics(UGrid, MDpar, TheAction, SmearingPolicy); IntegratorType MDynamics(UGrid, MDpar, TheAction, SmearingPolicy);
// Checkpoint strategy // Checkpoint strategy
int SaveInterval = 1; int SaveInterval = 1;
std::string format = std::string("IEEE64BIG"); std::string format = std::string("IEEE64BIG");
std::string conf_prefix = std::string("ckpoint_lat"); std::string conf_prefix = std::string("ckpoint_lat");
std::string rng_prefix = std::string("ckpoint_rng"); std::string rng_prefix = std::string("ckpoint_rng");
IOCheckpointer Checkpoint(conf_prefix, rng_prefix, SaveInterval, format); IOCheckpointer Checkpoint(conf_prefix, rng_prefix, SaveInterval, format);
HMCparameters HMCpar; HMCparameters HMCpar;
HMCpar.StartTrajectory = StartTraj; HMCpar.StartTrajectory = StartTraj;
@ -158,10 +159,10 @@ class BinaryHmcRunnerTemplate {
SmearingPolicy.set_Field(U); SmearingPolicy.set_Field(U);
HybridMonteCarlo<IntegratorType> HMC(HMCpar, MDynamics, sRNG, pRNG, U); HybridMonteCarlo<IntegratorType> HMC(HMCpar, MDynamics, sRNG, pRNG, U);
//HMC.AddObservable(&Checkpoint); HMC.AddObservable(&Checkpoint);
for (int obs = 0; obs < ObservablesList.size(); obs++) for (int obs = 0; obs < ObservablesList.size(); obs++)
HMC.AddObservable(&ObservablesList[obs]); HMC.AddObservable(ObservablesList[obs]);
// Run it // Run it
HMC.evolve(); HMC.evolve();

View File

@ -143,6 +143,7 @@ int main(int argc, char **argv) {
random(FineRNG, Foo); random(FineRNG, Foo);
gaussian(FineRNG, Bar); gaussian(FineRNG, Bar);
random(FineRNG, scFoo); random(FineRNG, scFoo);
random(FineRNG, scBar); random(FineRNG, scBar);
@ -169,6 +170,18 @@ int main(int argc, char **argv) {
abort(); abort();
} }
// Norm2 check
LatticeReal BarReal(&Fine);
LatticeComplex BarComplex(&Fine);
BarReal = 1.0;
BarComplex = 1.0;
std::cout << "Norm2 LatticeReal : "<< norm2(BarReal) << std::endl;
std::cout << "Norm2 LatticeComplex : "<< norm2(BarComplex) << std::endl;
exit(0);
TComplex tr = trace(cmat); TComplex tr = trace(cmat);
cVec = cMat * cVec; // LatticeColourVector = LatticeColourMatrix cVec = cMat * cVec; // LatticeColourVector = LatticeColourMatrix

View File

@ -43,12 +43,13 @@ class HmcRunner : public ScalarBinaryHmcRunner {
void BuildTheAction(int argc, char **argv) void BuildTheAction(int argc, char **argv)
{ {
// Notice that the Grid is for reals now
UGrid = SpaceTimeGrid::makeFourDimGrid( UGrid = SpaceTimeGrid::makeFourDimGrid(
GridDefaultLatt(), GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultLatt(), GridDefaultSimd(Nd, vReal::Nsimd()),
GridDefaultMpi()); GridDefaultMpi());
UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
// Scalar action // Real Scalar action
ScalarActionR Saction(0.11,0.); ScalarActionR Saction(0.11,0.);
// Collect actions // Collect actions
@ -59,7 +60,7 @@ class HmcRunner : public ScalarBinaryHmcRunner {
Run(argc, argv); Run(argc, argv);
}; };
}; };
} }
} }

View File

@ -82,8 +82,8 @@ class HmcRunner : public BinaryHmcRunner {
TheAction.push_back(Level2); TheAction.push_back(Level2);
// Add observables // Add observables
//PlaquetteLogger<BinaryHmcRunner::ImplPolicy> PlaqLog(std::string("plaq")); PlaquetteLogger<BinaryHmcRunner::ImplPolicy> PlaqLog(std::string("plaq"));
//ObservablesList.push_back(PlaqLog); ObservablesList.push_back(&PlaqLog);
Run(argc, argv); Run(argc, argv);
}; };