1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-09-20 01:05:38 +01:00

Merge branch 'develop' into feature/gpu-port

This commit is contained in:
Peter Boyle 2019-07-16 11:55:17 +01:00
commit fa9cd50c5b
274 changed files with 7120 additions and 4663 deletions

1
.gitignore vendored
View File

@ -114,3 +114,4 @@ gh-pages/
#####################
Grid/qcd/spin/gamma-gen/*.h
Grid/qcd/spin/gamma-gen/*.cc
Grid/util/Version.h

View File

@ -0,0 +1,5 @@
Version : 0.8.0
- Clang 3.5 and above, ICPC v16 and above, GCC 6.3 and above recommended
- MPI and MPI3 comms optimisations for KNL and OPA finished
- Half precision comms

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -25,8 +25,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
//
// Grid.h
// simd
@ -42,9 +42,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#include <Grid/GridQCDcore.h>
#include <Grid/qcd/action/Action.h>
#include <Grid/qcd/utils/GaugeFix.h>
NAMESPACE_CHECK(GaugeFix);
#include <Grid/qcd/utils/CovariantSmearing.h>
#include <Grid/qcd/smearing/Smearing.h>
NAMESPACE_CHECK(Smearing);
#include <Grid/parallelIO/MetaData.h>
#include <Grid/qcd/hmc/HMC_aggregate.h>

View File

@ -55,13 +55,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/algorithms/iterative/FlexibleCommunicationAvoidingGeneralisedMinimalResidual.h>
#include <Grid/algorithms/iterative/MixedPrecisionFlexibleGeneralisedMinimalResidual.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
#include <Grid/algorithms/iterative/PowerMethod.h>
#include <Grid/algorithms/CoarsenedMatrix.h>
#include <Grid/algorithms/FFT.h>
// EigCg
// Pcg
// Hdcg
// GCR
// etc..
#endif

View File

@ -171,144 +171,142 @@ public:
}
};
//////////////////////////////////////////////////////////
// Even Odd Schur decomp operators; there are several
// ways to introduce the even odd checkerboarding
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
// Even Odd Schur decomp operators; there are several
// ways to introduce the even odd checkerboarding
//////////////////////////////////////////////////////////
template<class Field>
class SchurOperatorBase : public LinearOperatorBase<Field> {
public:
virtual RealD Mpc (const Field &in, Field &out) =0;
virtual RealD MpcDag (const Field &in, Field &out) =0;
virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no)
{
Field tmp(in.Grid());
tmp.Checkerboard() = in.Checkerboard();
ni=Mpc(in,tmp);
no=MpcDag(tmp,out);
}
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
out.Checkerboard() = in.Checkerboard();
MpcDagMpc(in,out,n1,n2);
}
virtual void HermOp(const Field &in, Field &out){
RealD n1,n2;
HermOpAndNorm(in,out,n1,n2);
}
void Op (const Field &in, Field &out){
Mpc(in,out);
}
void AdjOp (const Field &in, Field &out){
MpcDag(in,out);
}
// Support for coarsening to a multigrid
void OpDiag (const Field &in, Field &out) {
assert(0); // must coarsen the unpreconditioned system
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
assert(0);
}
};
template<class Matrix,class Field>
class SchurDiagMooeeOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
public:
SchurDiagMooeeOperator (Matrix &Mat): _Mat(Mat){};
virtual RealD Mpc (const Field &in, Field &out) {
Field tmp(in.Grid());
// std::cout <<"grid pointers: in.Grid()="<< in.Grid() << " out.Grid()=" << out.Grid() << " _Mat.Grid=" << _Mat.Grid() << " _Mat.RedBlackGrid=" << _Mat.RedBlackGrid() << std::endl;
tmp.Checkerboard() = !in.Checkerboard();
template<class Field>
class SchurOperatorBase : public LinearOperatorBase<Field> {
public:
virtual RealD Mpc (const Field &in, Field &out) =0;
virtual RealD MpcDag (const Field &in, Field &out) =0;
virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) {
Field tmp(in.Grid());
tmp.Checkerboard() = in.Checkerboard();
ni=Mpc(in,tmp);
no=MpcDag(tmp,out);
}
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
out.Checkerboard() = in.Checkerboard();
MpcDagMpc(in,out,n1,n2);
}
virtual void HermOp(const Field &in, Field &out){
RealD n1,n2;
HermOpAndNorm(in,out,n1,n2);
}
void Op (const Field &in, Field &out){
Mpc(in,out);
}
void AdjOp (const Field &in, Field &out){
MpcDag(in,out);
}
// Support for coarsening to a multigrid
void OpDiag (const Field &in, Field &out) {
assert(0); // must coarsen the unpreconditioned system
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
assert(0);
}
};
template<class Matrix,class Field>
class SchurDiagMooeeOperator : public SchurOperatorBase<Field> {
public:
Matrix &_Mat;
SchurDiagMooeeOperator (Matrix &Mat): _Mat(Mat){};
virtual RealD Mpc (const Field &in, Field &out) {
Field tmp(in.Grid());
tmp.Checkerboard() = !in.Checkerboard();
//std::cout <<"grid pointers: in._grid="<< in._grid << " out._grid=" << out._grid << " _Mat.Grid=" << _Mat.Grid() << " _Mat.RedBlackGrid=" << _Mat.RedBlackGrid() << std::endl;
_Mat.Meooe(in,tmp);
_Mat.MooeeInv(tmp,out);
_Mat.Meooe(out,tmp);
_Mat.Meooe(in,tmp);
_Mat.MooeeInv(tmp,out);
_Mat.Meooe(out,tmp);
//std::cout << "cb in " << in.Checkerboard() << " cb out " << out.Checkerboard() << std::endl;
_Mat.Mooee(in,out);
return axpy_norm(out,-1.0,tmp,out);
}
virtual RealD MpcDag (const Field &in, Field &out){
Field tmp(in.Grid());
_Mat.Mooee(in,out);
return axpy_norm(out,-1.0,tmp,out);
}
virtual RealD MpcDag (const Field &in, Field &out){
Field tmp(in.Grid());
_Mat.MeooeDag(in,tmp);
_Mat.MooeeInvDag(tmp,out);
_Mat.MeooeDag(out,tmp);
_Mat.MeooeDag(in,tmp);
_Mat.MooeeInvDag(tmp,out);
_Mat.MeooeDag(out,tmp);
_Mat.MooeeDag(in,out);
return axpy_norm(out,-1.0,tmp,out);
}
};
template<class Matrix,class Field>
class SchurDiagOneOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
public:
SchurDiagOneOperator (Matrix &Mat): _Mat(Mat){};
_Mat.MooeeDag(in,out);
return axpy_norm(out,-1.0,tmp,out);
}
};
template<class Matrix,class Field>
class SchurDiagOneOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
public:
SchurDiagOneOperator (Matrix &Mat): _Mat(Mat){};
virtual RealD Mpc (const Field &in, Field &out) {
Field tmp(in.Grid());
virtual RealD Mpc (const Field &in, Field &out) {
Field tmp(in.Grid());
_Mat.Meooe(in,out);
_Mat.MooeeInv(out,tmp);
_Mat.Meooe(tmp,out);
_Mat.MooeeInv(out,tmp);
_Mat.Meooe(in,out);
_Mat.MooeeInv(out,tmp);
_Mat.Meooe(tmp,out);
_Mat.MooeeInv(out,tmp);
return axpy_norm(out,-1.0,tmp,in);
}
virtual RealD MpcDag (const Field &in, Field &out){
Field tmp(in.Grid());
return axpy_norm(out,-1.0,tmp,in);
}
virtual RealD MpcDag (const Field &in, Field &out){
Field tmp(in.Grid());
_Mat.MooeeInvDag(in,out);
_Mat.MeooeDag(out,tmp);
_Mat.MooeeInvDag(tmp,out);
_Mat.MeooeDag(out,tmp);
_Mat.MooeeInvDag(in,out);
_Mat.MeooeDag(out,tmp);
_Mat.MooeeInvDag(tmp,out);
_Mat.MeooeDag(out,tmp);
return axpy_norm(out,-1.0,tmp,in);
}
};
template<class Matrix,class Field>
class SchurDiagTwoOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
public:
SchurDiagTwoOperator (Matrix &Mat): _Mat(Mat){};
return axpy_norm(out,-1.0,tmp,in);
}
};
template<class Matrix,class Field>
class SchurDiagTwoOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
public:
SchurDiagTwoOperator (Matrix &Mat): _Mat(Mat){};
virtual RealD Mpc (const Field &in, Field &out) {
Field tmp(in.Grid());
virtual RealD Mpc (const Field &in, Field &out) {
Field tmp(in.Grid());
_Mat.MooeeInv(in,out);
_Mat.Meooe(out,tmp);
_Mat.MooeeInv(tmp,out);
_Mat.Meooe(out,tmp);
_Mat.MooeeInv(in,out);
_Mat.Meooe(out,tmp);
_Mat.MooeeInv(tmp,out);
_Mat.Meooe(out,tmp);
return axpy_norm(out,-1.0,tmp,in);
}
virtual RealD MpcDag (const Field &in, Field &out){
Field tmp(in.Grid());
return axpy_norm(out,-1.0,tmp,in);
}
virtual RealD MpcDag (const Field &in, Field &out){
Field tmp(in.Grid());
_Mat.MeooeDag(in,out);
_Mat.MooeeInvDag(out,tmp);
_Mat.MeooeDag(tmp,out);
_Mat.MooeeInvDag(out,tmp);
_Mat.MeooeDag(in,out);
_Mat.MooeeInvDag(out,tmp);
_Mat.MeooeDag(tmp,out);
_Mat.MooeeInvDag(out,tmp);
return axpy_norm(out,-1.0,tmp,in);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Left handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) psi = eta --> ( 1 - Moo^-1 Moe Mee^-1 Meo ) psi = Moo^-1 eta
// Right handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) Moo^-1 Moo psi = eta --> ( 1 - Moe Mee^-1 Meo ) Moo^-1 phi=eta ; psi = Moo^-1 phi
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class Matrix,class Field> using SchurDiagOneRH = SchurDiagTwoOperator<Matrix,Field> ;
template<class Matrix,class Field> using SchurDiagOneLH = SchurDiagOneOperator<Matrix,Field> ;
///////////////////////////////////////////////////////////////////////////////////////////////////
// Staggered use
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class Matrix,class Field>
class SchurStaggeredOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
return axpy_norm(out,-1.0,tmp,in);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Left handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) psi = eta --> ( 1 - Moo^-1 Moe Mee^-1 Meo ) psi = Moo^-1 eta
// Right handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) Moo^-1 Moo psi = eta --> ( 1 - Moe Mee^-1 Meo ) Moo^-1 phi=eta ; psi = Moo^-1 phi
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class Matrix,class Field> using SchurDiagOneRH = SchurDiagTwoOperator<Matrix,Field> ;
template<class Matrix,class Field> using SchurDiagOneLH = SchurDiagOneOperator<Matrix,Field> ;
///////////////////////////////////////////////////////////////////////////////////////////////////
// Staggered use
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class Matrix,class Field>
class SchurStaggeredOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
Field tmp;
RealD mass;
double tMpc;

View File

@ -60,7 +60,7 @@ public:
// Query the even even properties to make algorithmic decisions
//////////////////////////////////////////////////////////////////////
virtual RealD Mass(void) { return 0.0; };
virtual int ConstEE(void) { return 0; }; // Disable assumptions unless overridden
virtual int ConstEE(void) { return 1; }; // Disable assumptions unless overridden
virtual int isTrivialEE(void) { return 0; }; // by a derived class that knows better
// half checkerboard operaions

View File

@ -93,6 +93,8 @@ public:
// Check if guess is really REALLY good :)
if (cp <= rsq) {
std::cout << GridLogMessage << "ConjugateGradient guess is converged already " << std::endl;
IterationsToComplete = 0;
return;
}
@ -108,7 +110,7 @@ public:
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations*1000; k++) {
for (k = 1; k <= MaxIterations; k++) {
c = cp;
MatrixTimer.Start();
@ -172,8 +174,7 @@ public:
return;
}
}
std::cout << GridLogMessage << "ConjugateGradient did NOT converge"
<< std::endl;
std::cout << GridLogMessage << "ConjugateGradient did NOT converge "<<k<<" / "<< MaxIterations<< std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;

View File

@ -30,36 +30,41 @@ Author: Christopher Kelly <ckelly@phys.columbia.edu>
NAMESPACE_BEGIN(Grid);
//Mixed precision restarted defect correction CG
template<class FieldD,class FieldF,
typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,
typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class MixedPrecisionConjugateGradient : public LinearFunction<FieldD> {
public:
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
Integer MaxOuterIterations;
GridBase* SinglePrecGrid; //Grid for single-precision fields
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
LinearOperatorBase<FieldF> &Linop_f;
LinearOperatorBase<FieldD> &Linop_d;
//Mixed precision restarted defect correction CG
template<class FieldD,class FieldF,
typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,
typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class MixedPrecisionConjugateGradient : public LinearFunction<FieldD> {
public:
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
Integer MaxOuterIterations;
GridBase* SinglePrecGrid; //Grid for single-precision fields
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
LinearOperatorBase<FieldF> &Linop_f;
LinearOperatorBase<FieldD> &Linop_d;
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
//Option to speed up *inner single precision* solves using a LinearFunction that produces a guess
LinearFunction<FieldF> *guesser;
//Option to speed up *inner single precision* solves using a LinearFunction that produces a guess
LinearFunction<FieldF> *guesser;
MixedPrecisionConjugateGradient(RealD tol, Integer maxinnerit, Integer maxouterit, GridBase* _sp_grid, LinearOperatorBase<FieldF> &_Linop_f, LinearOperatorBase<FieldD> &_Linop_d) :
Linop_f(_Linop_f), Linop_d(_Linop_d),
Tolerance(tol), InnerTolerance(tol), MaxInnerIterations(maxinnerit), MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid),
OuterLoopNormMult(100.), guesser(NULL){ };
MixedPrecisionConjugateGradient(RealD tol,
Integer maxinnerit,
Integer maxouterit,
GridBase* _sp_grid,
LinearOperatorBase<FieldF> &_Linop_f,
LinearOperatorBase<FieldD> &_Linop_d) :
Linop_f(_Linop_f), Linop_d(_Linop_d),
Tolerance(tol), InnerTolerance(tol), MaxInnerIterations(maxinnerit), MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid),
OuterLoopNormMult(100.), guesser(NULL){ };
void useGuesser(LinearFunction<FieldF> &g){
guesser = &g;
}
void useGuesser(LinearFunction<FieldF> &g){
guesser = &g;
}
void operator() (const FieldD &src_d_in, FieldD &sol_d){
TotalInnerIterations = 0;

View File

@ -35,7 +35,11 @@ class ZeroGuesser: public LinearFunction<Field> {
public:
virtual void operator()(const Field &src, Field &guess) { guess = Zero(); };
};
template<class Field>
class DoNothingGuesser: public LinearFunction<Field> {
public:
virtual void operator()(const Field &src, Field &guess) { };
};
template<class Field>
class SourceGuesser: public LinearFunction<Field> {
public:

View File

@ -0,0 +1,45 @@
#pragma once
namespace Grid {
template<class Field> class PowerMethod
{
public:
template<typename T> static RealD normalise(T& v)
{
RealD nn = norm2(v);
nn = sqrt(nn);
v = v * (1.0/nn);
return nn;
}
RealD operator()(LinearOperatorBase<Field> &HermOp, const Field &src)
{
GridBase *grid = src._grid;
// quickly get an idea of the largest eigenvalue to more properly normalize the residuum
RealD evalMaxApprox = 0.0;
auto src_n = src;
auto tmp = src;
const int _MAX_ITER_EST_ = 50;
for (int i=0;i<_MAX_ITER_EST_;i++) {
normalise(src_n);
HermOp.HermOp(src_n,tmp);
RealD vnum = real(innerProduct(src_n,tmp)); // HermOp.
RealD vden = norm2(src_n);
RealD na = vnum/vden;
if ( (fabs(evalMaxApprox/na - 1.0) < 0.01) || (i==_MAX_ITER_EST_-1) ) {
evalMaxApprox = na;
return evalMaxApprox;
}
evalMaxApprox = na;
std::cout << GridLogMessage << " Approximation of largest eigenvalue: " << evalMaxApprox << std::endl;
src_n = tmp;
}
assert(0);
return 0;
}
};
}

View File

@ -99,10 +99,13 @@ namespace Grid {
OperatorFunction<Field> & _HermitianRBSolver;
int CBfactorise;
bool subGuess;
bool useSolnAsInitGuess; // if true user-supplied solution vector is used as initial guess for solver
public:
SchurRedBlackBase(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false) :
_HermitianRBSolver(HermitianRBSolver)
SchurRedBlackBase(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false) :
_HermitianRBSolver(HermitianRBSolver),
useSolnAsInitGuess(_solnAsInitGuess)
{
CBfactorise = 0;
subtractGuess(initSubGuess);
@ -156,7 +159,11 @@ namespace Grid {
if ( subGuess ) guess_save.resize(nblock,grid);
for(int b=0;b<nblock;b++){
guess(src_o[b],sol_o[b]);
if(useSolnAsInitGuess) {
pickCheckerboard(Odd, sol_o[b], out[b]);
} else {
guess(src_o[b],sol_o[b]);
}
if ( subGuess ) {
guess_save[b] = sol_o[b];
@ -216,8 +223,11 @@ namespace Grid {
////////////////////////////////
// Construct the guess
////////////////////////////////
Field tmp(grid);
guess(src_o,sol_o);
if(useSolnAsInitGuess) {
pickCheckerboard(Odd, sol_o, out);
} else {
guess(src_o,sol_o);
}
Field guess_save(grid);
guess_save = sol_o;
@ -251,7 +261,7 @@ namespace Grid {
}
/////////////////////////////////////////////////////////////
// Override in derived. Not virtual as template methods
// Override in derived.
/////////////////////////////////////////////////////////////
virtual void RedBlackSource (Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o) =0;
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol) =0;
@ -264,8 +274,9 @@ namespace Grid {
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
SchurRedBlackStaggeredSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess)
SchurRedBlackStaggeredSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess,_solnAsInitGuess)
{
}
@ -333,8 +344,9 @@ namespace Grid {
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess) {};
SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess,_solnAsInitGuess) {};
//////////////////////////////////////////////////////
@ -405,8 +417,9 @@ namespace Grid {
/////////////////////////////////////////////////////
// Wrap the usual normal equations Schur trick
/////////////////////////////////////////////////////
SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)
: SchurRedBlackBase<Field>(HermitianRBSolver,initSubGuess) {};
SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field>(HermitianRBSolver,initSubGuess,_solnAsInitGuess) {};
virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
{

View File

@ -44,10 +44,13 @@ void CartesianCommunicator::Init(int *argc, char ***argv)
MPI_Initialized(&flag); // needed to coexist with other libs apparently
if ( !flag ) {
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
// assert (provided == MPI_THREAD_MULTIPLE);
//If only 1 comms thread we require any threading mode other than SINGLE, but for multiple comms threads we need MULTIPLE
if( (nCommThreads == 1 && provided == MPI_THREAD_SINGLE) ||
(nCommThreads > 1 && provided != MPI_THREAD_MULTIPLE) ) {
if( (nCommThreads == 1) && (provided == MPI_THREAD_SINGLE) ) {
assert(0);
}
if( (nCommThreads > 1) && (provided != MPI_THREAD_MULTIPLE) ) {
assert(0);
}
}
@ -55,9 +58,12 @@ void CartesianCommunicator::Init(int *argc, char ***argv)
// Never clean up as done once.
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
Grid_quiesce_nodes();
GlobalSharedMemory::Init(communicator_world);
GlobalSharedMemory::SharedMemoryAllocate(GlobalSharedMemory::MAX_MPI_SHM_BYTES,
GlobalSharedMemory::Hugepages);
GlobalSharedMemory::SharedMemoryAllocate(
GlobalSharedMemory::MAX_MPI_SHM_BYTES,
GlobalSharedMemory::Hugepages);
Grid_unquiesce_nodes();
}
///////////////////////////////////////////////////////////////////////////
@ -106,8 +112,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
//////////////////////////////////
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
{
_ndimension = processors.size();
_ndimension = processors.size(); assert(_ndimension>=1);
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
Coordinate parent_processor_coor(_ndimension,0);
Coordinate parent_processors (_ndimension,1);

View File

@ -52,7 +52,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
{
_processors = processors;
_ndimension = processors.size();
_ndimension = processors.size(); assert(_ndimension>=1);
_processor_coor.resize(_ndimension);
// Require 1^N processor grid for fake

View File

@ -90,7 +90,9 @@ public:
// Create an optimal reordered communicator that makes MPI_Cart_create get it right
//////////////////////////////////////////////////////////////////////////////////////
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
static void OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
///////////////////////////////////////////////////
// Provide shared memory facilities off comm world
///////////////////////////////////////////////////

View File

@ -141,8 +141,22 @@ int Log2Size(int TwoToPower,int MAXLOG2)
}
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
{
#ifdef HYPERCUBE
#warning "HPE 8600 Hypercube optimisations enabled"
//////////////////////////////////////////////////////////////////////////////
// Look and see if it looks like an HPE 8600 based on hostname conventions
//////////////////////////////////////////////////////////////////////////////
const int namelen = _POSIX_HOST_NAME_MAX;
char name[namelen];
int R;
int I;
int N;
gethostname(name,namelen);
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
if(nscan==3) OptimalCommunicatorHypercube(processors,optimal_comm);
else OptimalCommunicatorSharedMemory(processors,optimal_comm);
}
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
{
////////////////////////////////////////////////////////////////
// Assert power of two shm_size.
////////////////////////////////////////////////////////////////
@ -208,7 +222,8 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
////////////////////////////////////////////////////////////////
int ndimension = processors.size();
std::vector<int> processor_coor(ndimension);
std::vector<int> WorldDims = processors; std::vector<int> ShmDims (ndimension,1); std::vector<int> NodeDims (ndimension);
std::vector<int> WorldDims = processors.toVector();
std::vector<int> ShmDims (ndimension,1); std::vector<int> NodeDims (ndimension);
std::vector<int> ShmCoor (ndimension); std::vector<int> NodeCoor (ndimension); std::vector<int> WorldCoor(ndimension);
std::vector<int> HyperCoor(ndimension);
int dim = 0;
@ -263,7 +278,9 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
/////////////////////////////////////////////////////////////////
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
assert(ierr==0);
#else
}
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
{
////////////////////////////////////////////////////////////////
// Assert power of two shm_size.
////////////////////////////////////////////////////////////////
@ -316,7 +333,6 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
/////////////////////////////////////////////////////////////////
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
assert(ierr==0);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////
// SHMGET
@ -347,7 +363,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
int errsv = errno;
printf("Errno %d\n",errsv);
printf("key %d\n",key);
printf("size %lld\n",size);
printf("size %ld\n",size);
printf("flags %d\n",flags);
perror("shmget");
exit(1);

View File

@ -77,19 +77,18 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
GridLogIterative.Active(0);
GridLogDebug.Active(0);
GridLogPerformance.Active(0);
GridLogIntegrator.Active(0);
GridLogIntegrator.Active(1);
GridLogColours.Active(0);
for (int i = 0; i < logstreams.size(); i++) {
if (logstreams[i] == std::string("Error")) GridLogError.Active(1);
if (logstreams[i] == std::string("Warning")) GridLogWarning.Active(1);
if (logstreams[i] == std::string("NoMessage")) GridLogMessage.Active(0);
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
if (logstreams[i] == std::string("Performance"))
GridLogPerformance.Active(1);
if (logstreams[i] == std::string("Integrator")) GridLogIntegrator.Active(1);
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
if (logstreams[i] == std::string("Error")) GridLogError.Active(1);
if (logstreams[i] == std::string("Warning")) GridLogWarning.Active(1);
if (logstreams[i] == std::string("NoMessage")) GridLogMessage.Active(0);
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
if (logstreams[i] == std::string("Integrator")) GridLogIntegrator.Active(1);
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
}
}

View File

@ -204,10 +204,10 @@ class BinaryIO {
static inline void le32toh_v(void *file_object,uint64_t bytes)
{
uint32_t *fp = (uint32_t *)file_object;
uint32_t f;
uint64_t count = bytes/sizeof(uint32_t);
thread_for(i,count,{
uint32_t f;
f = fp[i];
// got network order and the network to host
f = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ;
@ -229,10 +229,9 @@ class BinaryIO {
static inline void le64toh_v(void *file_object,uint64_t bytes)
{
uint64_t *fp = (uint64_t *)file_object;
uint64_t f,g;
uint64_t count = bytes/sizeof(uint64_t);
thread_for( i, count, {
uint64_t f,g;
f = fp[i];
// got network order and the network to host
g = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ;
@ -343,7 +342,8 @@ class BinaryIO {
int ieee32 = (format == std::string("IEEE32"));
int ieee64big = (format == std::string("IEEE64BIG"));
int ieee64 = (format == std::string("IEEE64"));
assert(ieee64||ieee32|ieee64big||ieee32big);
assert((ieee64+ieee32+ieee64big+ieee32big)==1);
//////////////////////////////////////////////////////////////////////////////
// Do the I/O
//////////////////////////////////////////////////////////////////////////////
@ -613,6 +613,7 @@ class BinaryIO {
{
std::cout << GridLogMessage << "writeLatticeObject: read test checksum failure, re-writing (" << attemptsLeft << " attempt(s) remaining)" << std::endl;
offset = offsetCopy;
thread_for(x,lsites, { munge(scalardata[x],iodata[x]); });
}
else
{

View File

@ -44,6 +44,12 @@ extern "C" {
NAMESPACE_BEGIN(Grid);
#define GRID_FIELD_NORM "FieldNormMetaData"
#define GRID_FIELD_NORM_CALC(FieldNormMetaData_, n2ck) \
0.5*fabs(FieldNormMetaData_.norm2 - n2ck)/(FieldNormMetaData_.norm2 + n2ck)
#define GRID_FIELD_NORM_CHECK(FieldNormMetaData_, n2ck) \
assert(GRID_FIELD_NORM_CALC(FieldNormMetaData_, n2ck) < 1.0e-5);
/////////////////////////////////
// Encode word types as strings
/////////////////////////////////
@ -203,6 +209,7 @@ class GridLimeReader : public BinaryIO {
{
typedef typename vobj::scalar_object sobj;
scidacChecksum scidacChecksum_;
FieldNormMetaData FieldNormMetaData_;
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
std::string format = getFormatString<vobj>();
@ -231,21 +238,52 @@ class GridLimeReader : public BinaryIO {
// std::cout << " ReadLatticeObject from offset "<<offset << std::endl;
BinarySimpleMunger<sobj,sobj> munge;
BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
std::cout << GridLogMessage << "SciDAC checksum A " << std::hex << scidac_csuma << std::dec << std::endl;
std::cout << GridLogMessage << "SciDAC checksum B " << std::hex << scidac_csumb << std::dec << std::endl;
std::cout << GridLogMessage << "SciDAC checksum A " << std::hex << scidac_csuma << std::dec << std::endl;
std::cout << GridLogMessage << "SciDAC checksum B " << std::hex << scidac_csumb << std::dec << std::endl;
/////////////////////////////////////////////
// Insist checksum is next record
/////////////////////////////////////////////
readLimeObject(scidacChecksum_,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM));
readScidacChecksum(scidacChecksum_,FieldNormMetaData_);
/////////////////////////////////////////////
// Verify checksums
/////////////////////////////////////////////
if(FieldNormMetaData_.norm2 != 0.0){
RealD n2ck = norm2(field);
std::cout << GridLogMessage << "Field norm: metadata= " << FieldNormMetaData_.norm2
<< " / field= " << n2ck << " / rdiff= " << GRID_FIELD_NORM_CALC(FieldNormMetaData_,n2ck) << std::endl;
GRID_FIELD_NORM_CHECK(FieldNormMetaData_,n2ck);
}
assert(scidacChecksumVerify(scidacChecksum_,scidac_csuma,scidac_csumb)==1);
// find out if next field is a GridFieldNorm
return;
}
}
}
void readScidacChecksum(scidacChecksum &scidacChecksum_,
FieldNormMetaData &FieldNormMetaData_)
{
FieldNormMetaData_.norm2 =0.0;
std::string scidac_str(SCIDAC_CHECKSUM);
std::string field_norm_str(GRID_FIELD_NORM);
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
uint64_t nbytes = limeReaderBytes(LimeR);//size of this record (configuration)
std::vector<char> xmlc(nbytes+1,'\0');
limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);
std::string xmlstring = std::string(&xmlc[0]);
XmlReader RD(xmlstring, true, "");
if ( !strncmp(limeReaderType(LimeR), field_norm_str.c_str(),strlen(field_norm_str.c_str()) ) ) {
// std::cout << "FieldNormMetaData "<<xmlstring<<std::endl;
read(RD,field_norm_str,FieldNormMetaData_);
}
if ( !strncmp(limeReaderType(LimeR), scidac_str.c_str(),strlen(scidac_str.c_str()) ) ) {
// std::cout << SCIDAC_CHECKSUM << " " <<xmlstring<<std::endl;
read(RD,std::string("scidacChecksum"),scidacChecksum_);
return;
}
}
assert(0);
}
////////////////////////////////////////////
// Read a generic serialisable object
////////////////////////////////////////////
@ -264,7 +302,7 @@ class GridLimeReader : public BinaryIO {
limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);
// std::cout << GridLogMessage<< " readLimeObject matches XML " << &xmlc[0] <<std::endl;
xmlstring = std::string(&xmlc[0]);
xmlstring = std::string(&xmlc[0]);
return;
}
@ -278,8 +316,8 @@ class GridLimeReader : public BinaryIO {
std::string xmlstring;
readLimeObject(xmlstring, record_name);
XmlReader RD(xmlstring, true, "");
read(RD,object_name,object);
XmlReader RD(xmlstring, true, "");
read(RD,object_name,object);
}
};
@ -388,6 +426,8 @@ class GridLimeWriter : public BinaryIO
GridBase *grid = field.Grid();
assert(boss_node == field.Grid()->IsBoss() );
FieldNormMetaData FNMD; FNMD.norm2 = norm2(field);
////////////////////////////////////////////
// Create record header
////////////////////////////////////////////
@ -446,6 +486,7 @@ class GridLimeWriter : public BinaryIO
checksum.suma= streama.str();
checksum.sumb= streamb.str();
if ( boss_node ) {
writeLimeObject(0,0,FNMD,std::string(GRID_FIELD_NORM),std::string(GRID_FIELD_NORM));
writeLimeObject(0,1,checksum,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM));
}
}
@ -623,6 +664,12 @@ class IldgWriter : public ScidacWriter {
assert(header.nd==4);
assert(header.nd==header.dimension.size());
//////////////////////////////////////////////////////////////////////////////
// Field norm tests
//////////////////////////////////////////////////////////////////////////////
FieldNormMetaData FieldNormMetaData_;
FieldNormMetaData_.norm2 = norm2(Umu);
//////////////////////////////////////////////////////////////////////////////
// Fill the USQCD info field
//////////////////////////////////////////////////////////////////////////////
@ -631,11 +678,12 @@ class IldgWriter : public ScidacWriter {
info.plaq = header.plaquette;
info.linktr = header.link_trace;
std::cout << GridLogMessage << " Writing config; IldgIO "<<std::endl;
// std::cout << GridLogMessage << " Writing config; IldgIO n2 "<< FieldNormMetaData_.norm2<<std::endl;
//////////////////////////////////////////////
// Fill the Lime file record by record
//////////////////////////////////////////////
writeLimeObject(1,0,header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message
writeLimeObject(0,0,FieldNormMetaData_,FieldNormMetaData_.SerialisableClassName(),std::string(GRID_FIELD_NORM));
writeLimeObject(0,0,_scidacFile,_scidacFile.SerialisableClassName(),std::string(SCIDAC_PRIVATE_FILE_XML));
writeLimeObject(0,1,info,info.SerialisableClassName(),std::string(SCIDAC_FILE_XML));
writeLimeObject(1,0,_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML));
@ -678,6 +726,7 @@ class IldgReader : public GridLimeReader {
std::string ildgLFN_ ;
scidacChecksum scidacChecksum_;
usqcdInfo usqcdInfo_ ;
FieldNormMetaData FieldNormMetaData_;
// track what we read from file
int found_ildgFormat =0;
@ -686,7 +735,7 @@ class IldgReader : public GridLimeReader {
int found_usqcdInfo =0;
int found_ildgBinary =0;
int found_FieldMetaData =0;
int found_FieldNormMetaData =0;
uint32_t nersc_csum;
uint32_t scidac_csuma;
uint32_t scidac_csumb;
@ -773,11 +822,17 @@ class IldgReader : public GridLimeReader {
found_scidacChecksum = 1;
}
if ( !strncmp(limeReaderType(LimeR), GRID_FIELD_NORM,strlen(GRID_FIELD_NORM)) ) {
XmlReader RD(xmlstring, true, "");
read(RD,GRID_FIELD_NORM,FieldNormMetaData_);
found_FieldNormMetaData = 1;
}
} else {
/////////////////////////////////
// Binary data
/////////////////////////////////
std::cout << GridLogMessage << "ILDG Binary record found : " ILDG_BINARY_DATA << std::endl;
// std::cout << GridLogMessage << "ILDG Binary record found : " ILDG_BINARY_DATA << std::endl;
uint64_t offset= ftello(File);
if ( format == std::string("IEEE64BIG") ) {
GaugeSimpleMunger<dobj, sobj> munge;
@ -845,6 +900,13 @@ class IldgReader : public GridLimeReader {
////////////////////////////////////////////////////////////
// Really really want to mandate a scidac checksum
////////////////////////////////////////////////////////////
if ( found_FieldNormMetaData ) {
RealD nn = norm2(Umu);
GRID_FIELD_NORM_CHECK(FieldNormMetaData_,nn);
std::cout << GridLogMessage<<"FieldNormMetaData matches " << std::endl;
} else {
std::cout << GridLogWarning<<"FieldNormMetaData not found. " << std::endl;
}
if ( found_scidacChecksum ) {
FieldMetaData_.scidac_checksuma = stoull(scidacChecksum_.suma,0,16);
FieldMetaData_.scidac_checksumb = stoull(scidacChecksum_.sumb,0,16);

View File

@ -52,35 +52,40 @@ template<class vobj> static std::string getFormatString (void)
format = std::string("IEEE64BIG");
}
return format;
}
////////////////////////////////////////////////////////////////////////////////
// header specification/interpretation
////////////////////////////////////////////////////////////////////////////////
class FieldMetaData : Serializable {
public:
};
GRID_SERIALIZABLE_CLASS_MEMBERS(FieldMetaData,
int, nd,
std::vector<int>, dimension,
std::vector<std::string>, boundary,
int, data_start,
std::string, hdr_version,
std::string, storage_format,
double, link_trace,
double, plaquette,
uint32_t, checksum,
uint32_t, scidac_checksuma,
uint32_t, scidac_checksumb,
unsigned int, sequence_number,
std::string, data_type,
std::string, ensemble_id,
std::string, ensemble_label,
std::string, ildg_lfn,
std::string, creator,
std::string, creator_hardware,
std::string, creation_date,
std::string, archive_date,
std::string, floating_point);
////////////////////////////////////////////////////////////////////////////////
// header specification/interpretation
////////////////////////////////////////////////////////////////////////////////
class FieldNormMetaData : Serializable {
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(FieldNormMetaData, double, norm2);
};
class FieldMetaData : Serializable {
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(FieldMetaData,
int, nd,
std::vector<int>, dimension,
std::vector<std::string>, boundary,
int, data_start,
std::string, hdr_version,
std::string, storage_format,
double, link_trace,
double, plaquette,
uint32_t, checksum,
uint32_t, scidac_checksuma,
uint32_t, scidac_checksumb,
unsigned int, sequence_number,
std::string, data_type,
std::string, ensemble_id,
std::string, ensemble_label,
std::string, ildg_lfn,
std::string, creator,
std::string, creator_hardware,
std::string, creation_date,
std::string, archive_date,
std::string, floating_point);
// WARNING: non-initialised values might lead to twisted parallel IO
// issues, std::string are fine because they initliase to size 0
// as per C++ standard.
@ -89,7 +94,7 @@ public:
link_trace(0.), plaquette(0.), checksum(0),
scidac_checksuma(0), scidac_checksumb(0), sequence_number(0)
{}
};
};
// PB disable using namespace - this is a header and forces namesapce visibility for all
// including files

View File

@ -57,14 +57,15 @@ struct StaggeredImplParams {
StaggeredImplParams() {};
};
struct OneFlavourRationalParams : Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(OneFlavourRationalParams,
RealD, lo,
RealD, hi,
int, MaxIter,
RealD, tolerance,
int, degree,
int, precision);
struct OneFlavourRationalParams : Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(OneFlavourRationalParams,
RealD, lo,
RealD, hi,
int, MaxIter,
RealD, tolerance,
int, degree,
int, precision,
int, BoundsCheckFreq);
// MaxIter and tolerance, vectors??
@ -74,15 +75,17 @@ struct OneFlavourRationalParams : Serializable {
int _maxit = 1000,
RealD tol = 1.0e-8,
int _degree = 10,
int _precision = 64)
: lo(_lo),
hi(_hi),
MaxIter(_maxit),
tolerance(tol),
degree(_degree),
precision(_precision){};
};
int _precision = 64,
int _BoundsCheckFreq=20)
: lo(_lo),
hi(_hi),
MaxIter(_maxit),
tolerance(tol),
degree(_degree),
precision(_precision),
BoundsCheckFreq(_BoundsCheckFreq){};
};
NAMESPACE_END(Grid);
#endif

View File

@ -41,7 +41,7 @@ public:
INHERIT_IMPL_TYPES(Impl);
public:
void FreePropagator(const FermionField &in,FermionField &out,RealD mass, std::vector<double> twist, bool fiveD) {
void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<Complex> boundary, std::vector<double> twist, bool fiveD) {
FermionField in_k(in.Grid());
FermionField prop_k(in.Grid());
@ -54,15 +54,19 @@ public:
typedef typename Simd::scalar_type Scalar;
Scalar ci(0.0,1.0);
assert(twist.size() == Nd);//check that twist is Nd
assert(boundary.size() == Nd);//check that boundary conditions is Nd
int shift = 0;
if(fiveD) shift = 1;
for(unsigned int nu = 0; nu < Nd; nu++)
{
// Shift coordinate lattice index by 1 to account for 5th dimension.
LatticeCoordinate(coor, nu + shift);
ph = ph + twist[nu]*coor*((1./(in.Grid()->FullDimensions()[nu+shift])));
double boundary_phase = ::acos(real(boundary[nu]));
ph = ph + boundary_phase*coor*((1./(in.Grid()->_fdimensions[nu+shift])));
//momenta for propagator shifted by twist+boundary
twist[nu] = twist[nu] + boundary_phase/((2.0*M_PI));
}
in_buf = exp(Scalar(2.0*M_PI)*ci*ph*(-1.0))*in;
in_buf = exp(ci*ph*(-1.0))*in;
if(fiveD){//FFT only on temporal and spatial dimensions
std::vector<int> mask(Nd+1,1); mask[0] = 0;
@ -75,26 +79,29 @@ public:
this->MomentumSpacePropagatorHt(prop_k,in_k,mass,twist);
theFFT.FFT_all_dim(out,prop_k,FFT::backward);
}
//phase for boundary condition
out = out * exp(Scalar(2.0*M_PI)*ci*ph);
};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<double> twist) {
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<Complex> boundary,std::vector<double> twist) {
bool fiveD = true; //5d propagator by default
FreePropagator(in,out,mass,twist,fiveD);
FreePropagator(in,out,mass,boundary,twist,fiveD);
};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass, bool fiveD) {
std::vector<double> twist(Nd,0.0); //default: periodic boundarys in all directions
FreePropagator(in,out,mass,twist,fiveD);
std::vector<Complex> boundary;
for(int i=0;i<Nd;i++) boundary.push_back(1);//default: periodic boundary conditions
FreePropagator(in,out,mass,boundary,twist,fiveD);
};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass) {
bool fiveD = true; //5d propagator by default
std::vector<double> twist(Nd,0.0); //default: periodic boundarys in all directions
FreePropagator(in,out,mass,twist,fiveD);
};
std::vector<double> twist(Nd,0.0); //default: twist angle 0
std::vector<Complex> boundary;
for(int i=0;i<Nd;i++) boundary.push_back(1); //default: periodic boundary conditions
FreePropagator(in,out,mass,boundary,twist,fiveD);
};
virtual void Instantiatable(void) {};
// Constructors

View File

@ -93,7 +93,7 @@ public:
virtual void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m,std::vector<double> twist) { assert(0);};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<double> twist)
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<Complex> boundary,std::vector<double> twist)
{
FFT theFFT((GridCartesian *) in.Grid());
@ -106,27 +106,35 @@ public:
ComplexField coor(in.Grid());
ComplexField ph(in.Grid()); ph = Zero();
FermionField in_buf(in.Grid()); in_buf = Zero();
Scalar ci(0.0,1.0);
assert(twist.size() == Nd);//check that twist is Nd
assert(boundary.size() == Nd);//check that boundary conditions is Nd
for(unsigned int nu = 0; nu < Nd; nu++)
{
LatticeCoordinate(coor, nu);
ph = ph + twist[nu]*coor*((1./(in.Grid()->_fdimensions[nu])));
double boundary_phase = ::acos(real(boundary[nu]));
ph = ph + boundary_phase*coor*((1./(in.Grid()->_fdimensions[nu])));
//momenta for propagator shifted by twist+boundary
twist[nu] = twist[nu] + boundary_phase/((2.0*M_PI));
}
in_buf = (exp(Scalar(2.0*M_PI)*ci*ph*(-1.0)))*in;
in_buf = exp(ci*ph*(-1.0))*in;
theFFT.FFT_all_dim(in_k,in_buf,FFT::forward);
this->MomentumSpacePropagator(prop_k,in_k,mass,twist);
theFFT.FFT_all_dim(out,prop_k,FFT::backward);
theFFT.FFT_all_dim(out,prop_k,FFT::backward);
//phase for boundary condition
out = out * exp(Scalar(2.0*M_PI)*ci*ph);
};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass) {
std::vector<double> twist(Nd,0.0); //default: periodic boundarys in all directions
FreePropagator(in,out,mass,twist);
};
std::vector<Complex> boundary;
for(int i=0;i<Nd;i++) boundary.push_back(1);//default: periodic boundary conditions
std::vector<double> twist(Nd,0.0); //default: periodic boundarys in all directions
FreePropagator(in,out,mass,boundary,twist);
};
///////////////////////////////////////////////
// Updates gauge field during HMC
@ -146,8 +154,14 @@ public:
Current curr_type,
unsigned int mu,
unsigned int tmin,
unsigned int tmax,
ComplexField &lattice_cmplx)=0;
unsigned int tmax,
ComplexField &lattice_cmplx)=0;
// Only reimplemented in Wilson5D
// Default to just a zero correlation function
virtual void ContractJ5q(FermionField &q_in ,ComplexField &J5q) { J5q=Zero(); };
virtual void ContractJ5q(PropagatorField &q_in,ComplexField &J5q) { J5q=Zero(); };
///////////////////////////////////////////////
// Physical field import/export
///////////////////////////////////////////////

View File

@ -63,6 +63,7 @@ public:
public:
typedef WilsonFermion<Impl> WilsonBase;
virtual int ConstEE(void) { return 0; };
virtual void Instantiatable(void){};
// Constructors
WilsonCloverFermion(GaugeField &_Umu, GridCartesian &Fgrid,

View File

@ -229,6 +229,10 @@ public:
unsigned int tmin,
unsigned int tmax,
ComplexField &lattice_cmplx);
void ContractJ5q(PropagatorField &q_in,ComplexField &J5q);
void ContractJ5q(FermionField &q_in,ComplexField &J5q);
};
NAMESPACE_END(Grid);

View File

@ -898,6 +898,79 @@ void WilsonFermion5D<Impl>::MomentumSpacePropagatorHw(FermionField &out,const Fe
merge(qSiteRev, qSiteVec); \
}
// psi = chiralProjectPlus(Result_s[Ls/2-1]);
// psi+= chiralProjectMinus(Result_s[Ls/2]);
// PJ5q+=localInnerProduct(psi,psi);
template<class vobj>
Lattice<vobj> spProj5p(const Lattice<vobj> & in)
{
GridBase *grid=in.Grid();
Gamma G5(Gamma::Algebra::Gamma5);
Lattice<vobj> ret(grid);
auto ret_v = ret.View();
auto in_v = in.View();
thread_for(ss,grid->oSites(),{
ret_v[ss] = in_v[ss] + G5*in_v[ss];
});
return ret;
}
template<class vobj>
Lattice<vobj> spProj5m(const Lattice<vobj> & in)
{
Gamma G5(Gamma::Algebra::Gamma5);
GridBase *grid=in.Grid();
Lattice<vobj> ret(grid);
auto ret_v = ret.View();
auto in_v = in.View();
thread_for(ss,grid->oSites(),{
ret_v[ss] = in_v[ss] - G5*in_v[ss];
});
return ret;
}
template <class Impl>
void WilsonFermion5D<Impl>::ContractJ5q(FermionField &q_in,ComplexField &J5q)
{
conformable(GaugeGrid(), J5q.Grid());
conformable(q_in.Grid(), FermionGrid());
// 4d field
int Ls = this->Ls;
FermionField psi(GaugeGrid());
FermionField p_plus (GaugeGrid());
FermionField p_minus(GaugeGrid());
FermionField p(GaugeGrid());
ExtractSlice(p_plus , q_in, Ls/2 , 0);
ExtractSlice(p_minus, q_in, Ls/2-1 , 0);
p_plus = spProj5p(p_plus );
p_minus= spProj5m(p_minus);
p=p_plus+p_minus;
J5q = localInnerProduct(p,p);
}
template <class Impl>
void WilsonFermion5D<Impl>::ContractJ5q(PropagatorField &q_in,ComplexField &J5q)
{
conformable(GaugeGrid(), J5q.Grid());
conformable(q_in.Grid(), FermionGrid());
// 4d field
int Ls = this->Ls;
PropagatorField psi(GaugeGrid());
PropagatorField p_plus (GaugeGrid());
PropagatorField p_minus(GaugeGrid());
PropagatorField p(GaugeGrid());
ExtractSlice(p_plus , q_in, Ls/2 , 0);
ExtractSlice(p_minus, q_in, Ls/2-1 , 0);
p_plus = spProj5p(p_plus );
p_minus= spProj5m(p_minus);
p=p_plus+p_minus;
J5q = localInnerProduct(p,p);
}
template <class Impl>
void WilsonFermion5D<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
PropagatorField &q_in_2,
@ -908,6 +981,7 @@ void WilsonFermion5D<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
conformable(q_in_1.Grid(), FermionGrid());
conformable(q_in_1.Grid(), q_in_2.Grid());
conformable(_FourDimGrid, q_out.Grid());
PropagatorField tmp1(FermionGrid()), tmp2(FermionGrid());
unsigned int LLs = q_in_1.Grid()->_rdimensions[0];
q_out = Zero();
@ -960,7 +1034,6 @@ void WilsonFermion5D<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
}
template <class Impl>
void WilsonFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
PropagatorField &q_out,

View File

@ -29,14 +29,24 @@ directory
#ifndef GRID_GAUGE_IMPL_TYPES_H
#define GRID_GAUGE_IMPL_TYPES_H
NAMESPACE_BEGIN(Grid);
#define CPS_MD_TIME
#ifdef CPS_MD_TIME
#define HMC_MOMENTUM_DENOMINATOR (2.0)
#else
#define HMC_MOMENTUM_DENOMINATOR (1.0)
#endif
////////////////////////////////////////////////////////////////////////
// Implementation dependent gauge types
////////////////////////////////////////////////////////////////////////
#define INHERIT_GIMPL_TYPES(GImpl) \
typedef typename GImpl::Simd Simd; \
typedef typename GImpl::Scalar Scalar; \
typedef typename GImpl::LinkField GaugeLinkField; \
typedef typename GImpl::Field GaugeField; \
typedef typename GImpl::ComplexField ComplexField;\
@ -54,7 +64,8 @@ NAMESPACE_BEGIN(Grid);
template <class S, int Nrepresentation = Nc, int Nexp = 12 > class GaugeImplTypes {
public:
typedef S Simd;
typedef typename Simd::scalar_type scalar_type;
typedef scalar_type Scalar;
template <typename vtype> using iImplScalar = iScalar<iScalar<iScalar<vtype> > >;
template <typename vtype> using iImplGaugeLink = iScalar<iScalar<iMatrix<vtype, Nrepresentation> > >;
template <typename vtype> using iImplGaugeField = iVector<iScalar<iMatrix<vtype, Nrepresentation> >, Nd>;
@ -85,12 +96,10 @@ public:
///////////////////////////////////////////////////////////
// Move these to another class
// HMC auxiliary functions
static inline void generate_momenta(Field &P, GridParallelRNG &pRNG) {
// specific for SU gauge fields
LinkField Pmu(P.Grid());
Pmu = Zero();
//
static inline void generate_momenta(Field &P, GridParallelRNG &pRNG)
{
// Zbigniew Srocinsky thesis:
//
// P(p) = N \Prod_{x\mu}e^-{1/2 Tr (p^2_mux)}
//
// p_x,mu = c_x,mu,a T_a
@ -101,26 +110,16 @@ public:
//
// = N \Prod_{x,\mu,a} e^-{1/2 (c_xmua/sqrt{2})^2 }
//
// Expect cx' = cxmua/sqrt(2) to be a unit variance gaussian.
//
// Expect cxmua_new variance sqrt(2).
// Was variance cxmua_old variance 1
//
// tau_old * Pold = 1 = tau_old/sqrt(2) * [Pold * sqrt(2)]
// = tau_new * Pnew
// Expect cxmua variance sqrt(2).
//
// Hence tau_new = tau_cps = tau_guido/sqrt(2).
// Must scale the momentum by sqrt(2) to invoke CPS and UKQCD conventions
//
//
// Must scale the momentum by sqrt(2) up to invoke CPS and UKQCD conventions
//
//
// Hence expect cxmua = cx'*sqrt(2).
//
// Seek the scale parameter to be
LinkField Pmu(P.Grid());
Pmu = Zero();
for (int mu = 0; mu < Nd; mu++) {
SU<Nrepresentation>::GaussianFundamentalLieAlgebraMatrix(pRNG, Pmu);
RealD scale = ::sqrt(2) ;
RealD scale = ::sqrt(HMC_MOMENTUM_DENOMINATOR) ;
Pmu = Pmu*scale;
PokeIndex<LorentzIndex>(P, Pmu, mu);
}

View File

@ -36,6 +36,7 @@ NAMESPACE_BEGIN(Grid);
{
public:
typedef S Simd;
typedef typename Simd::scalar_type Scalar;
template <typename vtype>
using iImplGaugeLink = iScalar<iScalar<iScalar<vtype>>>;

View File

@ -74,7 +74,7 @@ public:
virtual void deriv(const GaugeField &Umu,GaugeField & dSdU) {
//extend Ta to include Lorentz indexes
RealD factor_p = c_plaq/RealD(Nc)*0.5;
RealD factor_r = c_rect/RealD(Nc)*0.5;
RealD factor_r = c_rect/RealD(Nc)*0.5;
GridBase *grid = Umu.Grid();

View File

@ -0,0 +1,53 @@
#pragma once
namespace Grid{
namespace QCD{
template<class Field>
void HighBoundCheck(LinearOperatorBase<Field> &HermOp,
Field &Phi,
RealD hi)
{
// Eigenvalue bound check at high end
PowerMethod<Field> power_method;
auto lambda_max = power_method(HermOp,Phi);
std::cout << GridLogMessage << "Pseudofermion action lamda_max "<<lambda_max<<"( bound "<<hi<<")"<<std::endl;
assert( (lambda_max < hi) && " High Bounds Check on operator failed" );
}
template<class Field> void InverseSqrtBoundsCheck(int MaxIter,double tol,
LinearOperatorBase<Field> &HermOp,
Field &GaussNoise,
MultiShiftFunction &PowerNegHalf)
{
GridBase *FermionGrid = GaussNoise._grid;
Field X(FermionGrid);
Field Y(FermionGrid);
Field Z(FermionGrid);
X=GaussNoise;
RealD Nx = norm2(X);
ConjugateGradientMultiShift<Field> msCG(MaxIter,PowerNegHalf);
msCG(HermOp,X,Y);
msCG(HermOp,Y,Z);
RealD Nz = norm2(Z);
HermOp.HermOp(Z,Y);
RealD Ny = norm2(Y);
X=X-Y;
RealD Nd = norm2(X);
std::cout << "************************* "<<std::endl;
std::cout << " noise = "<<Nx<<std::endl;
std::cout << " (MdagM^-1/2)^2 noise = "<<Nz<<std::endl;
std::cout << " MdagM (MdagM^-1/2)^2 noise = "<<Ny<<std::endl;
std::cout << " noise - MdagM (MdagM^-1/2)^2 noise = "<<Nd<<std::endl;
std::cout << "************************* "<<std::endl;
assert( (std::sqrt(Nd/Nx)<tol) && " InverseSqrtBoundsCheck ");
}
}
}

View File

@ -25,240 +25,302 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
/* END LEGAL */
/////////////////////////////////////////////////////////////////
// Implementation of exact one flavour algorithm (EOFA) //
// using fermion classes defined in: //
// Grid/qcd/action/fermion/DomainWallEOFAFermion.h (Shamir) //
// Grid/qcd/action/fermion/MobiusEOFAFermion.h (Mobius) //
// arXiv: 1403.1683, 1706.05843 //
/////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////
// Implementation of exact one flavour algorithm (EOFA) //
// using fermion classes defined in: //
// Grid/qcd/action/fermion/DomainWallEOFAFermion.h (Shamir) //
// Grid/qcd/action/fermion/MobiusEOFAFermion.h (Mobius) //
// arXiv: 1403.1683, 1706.05843 //
/////////////////////////////////////////////////////////////////
#ifndef QCD_PSEUDOFERMION_EXACT_ONE_FLAVOUR_RATIO_H
#define QCD_PSEUDOFERMION_EXACT_ONE_FLAVOUR_RATIO_H
NAMESPACE_BEGIN(Grid);
namespace Grid{
namespace QCD{
///////////////////////////////////////////////////////////////
// Exact one flavour implementation of DWF determinant ratio //
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
// Exact one flavour implementation of DWF determinant ratio //
///////////////////////////////////////////////////////////////
template<class Impl>
class ExactOneFlavourRatioPseudoFermionAction : public Action<typename Impl::GaugeField>
{
public:
INHERIT_IMPL_TYPES(Impl);
typedef OneFlavourRationalParams Params;
Params param;
MultiShiftFunction PowerNegHalf;
private:
bool use_heatbath_forecasting;
AbstractEOFAFermion<Impl>& Lop; // the basic LH operator
AbstractEOFAFermion<Impl>& Rop; // the basic RH operator
SchurRedBlackDiagMooeeSolve<FermionField> Solver;
FermionField Phi; // the pseudofermion field for this trajectory
public:
ExactOneFlavourRatioPseudoFermionAction(AbstractEOFAFermion<Impl>& _Lop, AbstractEOFAFermion<Impl>& _Rop,
OperatorFunction<FermionField>& S, Params& p, bool use_fc=false) : Lop(_Lop), Rop(_Rop), Solver(S),
Phi(_Lop.FermionGrid()), param(p), use_heatbath_forecasting(use_fc)
template<class Impl>
class ExactOneFlavourRatioPseudoFermionAction : public Action<typename Impl::GaugeField>
{
AlgRemez remez(param.lo, param.hi, param.precision);
public:
INHERIT_IMPL_TYPES(Impl);
typedef OneFlavourRationalParams Params;
Params param;
MultiShiftFunction PowerNegHalf;
// MdagM^(+- 1/2)
std::cout << GridLogMessage << "Generating degree " << param.degree << " for x^(-1/2)" << std::endl;
remez.generateApprox(param.degree, 1, 2);
PowerNegHalf.Init(remez, param.tolerance, true);
};
private:
bool use_heatbath_forecasting;
AbstractEOFAFermion<Impl>& Lop; // the basic LH operator
AbstractEOFAFermion<Impl>& Rop; // the basic RH operator
SchurRedBlackDiagMooeeSolve<FermionField> SolverHB;
SchurRedBlackDiagMooeeSolve<FermionField> SolverL;
SchurRedBlackDiagMooeeSolve<FermionField> SolverR;
SchurRedBlackDiagMooeeSolve<FermionField> DerivativeSolverL;
SchurRedBlackDiagMooeeSolve<FermionField> DerivativeSolverR;
FermionField Phi; // the pseudofermion field for this trajectory
virtual std::string action_name() { return "ExactOneFlavourRatioPseudoFermionAction"; }
public:
ExactOneFlavourRatioPseudoFermionAction(AbstractEOFAFermion<Impl>& _Lop,
AbstractEOFAFermion<Impl>& _Rop,
OperatorFunction<FermionField>& HeatbathCG,
OperatorFunction<FermionField>& ActionCGL, OperatorFunction<FermionField>& ActionCGR,
OperatorFunction<FermionField>& DerivCGL , OperatorFunction<FermionField>& DerivCGR,
Params& p,
bool use_fc=false) :
Lop(_Lop),
Rop(_Rop),
SolverHB(HeatbathCG,false,true),
SolverL(ActionCGL, false, true), SolverR(ActionCGR, false, true),
DerivativeSolverL(DerivCGL, false, true), DerivativeSolverR(DerivCGR, false, true),
Phi(_Lop.FermionGrid()),
param(p),
use_heatbath_forecasting(use_fc)
{
AlgRemez remez(param.lo, param.hi, param.precision);
virtual std::string LogParameters() {
std::stringstream sstream;
sstream << GridLogMessage << "[" << action_name() << "] Low :" << param.lo << std::endl;
sstream << GridLogMessage << "[" << action_name() << "] High :" << param.hi << std::endl;
sstream << GridLogMessage << "[" << action_name() << "] Max iterations :" << param.MaxIter << std::endl;
sstream << GridLogMessage << "[" << action_name() << "] Tolerance :" << param.tolerance << std::endl;
sstream << GridLogMessage << "[" << action_name() << "] Degree :" << param.degree << std::endl;
sstream << GridLogMessage << "[" << action_name() << "] Precision :" << param.precision << std::endl;
return sstream.str();
}
// MdagM^(+- 1/2)
std::cout << GridLogMessage << "Generating degree " << param.degree << " for x^(-1/2)" << std::endl;
remez.generateApprox(param.degree, 1, 2);
PowerNegHalf.Init(remez, param.tolerance, true);
};
// Spin projection
void spProj(const FermionField& in, FermionField& out, int sign, int Ls)
{
if(sign == 1){ for(int s=0; s<Ls; ++s){ axpby_ssp_pplus(out, 0.0, in, 1.0, in, s, s); } }
else{ for(int s=0; s<Ls; ++s){ axpby_ssp_pminus(out, 0.0, in, 1.0, in, s, s); } }
}
virtual std::string action_name() { return "ExactOneFlavourRatioPseudoFermionAction"; }
// EOFA heatbath: see Eqn. (29) of arXiv:1706.05843
// We generate a Gaussian noise vector \eta, and then compute
// \Phi = M_{\rm EOFA}^{-1/2} * \eta
// using a rational approximation to the inverse square root
virtual void refresh(const GaugeField& U, GridParallelRNG& pRNG)
{
Lop.ImportGauge(U);
Rop.ImportGauge(U);
FermionField eta (Lop.FermionGrid());
FermionField CG_src (Lop.FermionGrid());
FermionField CG_soln (Lop.FermionGrid());
FermionField Forecast_src(Lop.FermionGrid());
std::vector<FermionField> tmp(2, Lop.FermionGrid());
// Use chronological inverter to forecast solutions across poles
std::vector<FermionField> prev_solns;
if(use_heatbath_forecasting){ prev_solns.reserve(param.degree); }
ChronoForecast<AbstractEOFAFermion<Impl>, FermionField> Forecast;
// Seed with Gaussian noise vector (var = 0.5)
RealD scale = std::sqrt(0.5);
gaussian(pRNG,eta);
eta = eta * scale;
printf("Heatbath source vector: <\\eta|\\eta> = %1.15e\n", norm2(eta));
// \Phi = ( \alpha_{0} + \sum_{k=1}^{N_{p}} \alpha_{l} * \gamma_{l} ) * \eta
RealD N(PowerNegHalf.norm);
for(int k=0; k<param.degree; ++k){ N += PowerNegHalf.residues[k] / ( 1.0 + PowerNegHalf.poles[k] ); }
Phi = eta * N;
// LH terms:
// \Phi = \Phi + k \sum_{k=1}^{N_{p}} P_{-} \Omega_{-}^{\dagger} ( H(mf)
// - \gamma_{l} \Delta_{-}(mf,mb) P_{-} )^{-1} \Omega_{-} P_{-} \eta
RealD gamma_l(0.0);
spProj(eta, tmp[0], -1, Lop.Ls);
Lop.Omega(tmp[0], tmp[1], -1, 0);
G5R5(CG_src, tmp[1]);
tmp[1] = Zero();
for(int k=0; k<param.degree; ++k){
gamma_l = 1.0 / ( 1.0 + PowerNegHalf.poles[k] );
Lop.RefreshShiftCoefficients(-gamma_l);
if(use_heatbath_forecasting){ // Forecast CG guess using solutions from previous poles
Lop.Mdag(CG_src, Forecast_src);
CG_soln = Forecast(Lop, Forecast_src, prev_solns);
Solver(Lop, CG_src, CG_soln);
prev_solns.push_back(CG_soln);
} else {
CG_soln = Zero(); // Just use zero as the initial guess
Solver(Lop, CG_src, CG_soln);
virtual std::string LogParameters() {
std::stringstream sstream;
sstream << GridLogMessage << "[" << action_name() << "] Low :" << param.lo << std::endl;
sstream << GridLogMessage << "[" << action_name() << "] High :" << param.hi << std::endl;
sstream << GridLogMessage << "[" << action_name() << "] Max iterations :" << param.MaxIter << std::endl;
sstream << GridLogMessage << "[" << action_name() << "] Tolerance :" << param.tolerance << std::endl;
sstream << GridLogMessage << "[" << action_name() << "] Degree :" << param.degree << std::endl;
sstream << GridLogMessage << "[" << action_name() << "] Precision :" << param.precision << std::endl;
return sstream.str();
}
Lop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
tmp[1] = tmp[1] + ( PowerNegHalf.residues[k]*gamma_l*gamma_l*Lop.k ) * tmp[0];
}
Lop.Omega(tmp[1], tmp[0], -1, 1);
spProj(tmp[0], tmp[1], -1, Lop.Ls);
Phi = Phi + tmp[1];
// RH terms:
// \Phi = \Phi - k \sum_{k=1}^{N_{p}} P_{+} \Omega_{+}^{\dagger} ( H(mb)
// + \gamma_{l} \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} \eta
spProj(eta, tmp[0], 1, Rop.Ls);
Rop.Omega(tmp[0], tmp[1], 1, 0);
G5R5(CG_src, tmp[1]);
tmp[1] = Zero();
if(use_heatbath_forecasting){ prev_solns.clear(); } // empirically, LH solns don't help for RH solves
for(int k=0; k<param.degree; ++k){
gamma_l = 1.0 / ( 1.0 + PowerNegHalf.poles[k] );
Rop.RefreshShiftCoefficients(-gamma_l*PowerNegHalf.poles[k]);
if(use_heatbath_forecasting){
Rop.Mdag(CG_src, Forecast_src);
CG_soln = Forecast(Rop, Forecast_src, prev_solns);
Solver(Rop, CG_src, CG_soln);
prev_solns.push_back(CG_soln);
} else {
CG_soln = Zero();
Solver(Rop, CG_src, CG_soln);
// Spin projection
void spProj(const FermionField& in, FermionField& out, int sign, int Ls)
{
if(sign == 1){ for(int s=0; s<Ls; ++s){ axpby_ssp_pplus(out, 0.0, in, 1.0, in, s, s); } }
else{ for(int s=0; s<Ls; ++s){ axpby_ssp_pminus(out, 0.0, in, 1.0, in, s, s); } }
}
Rop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
tmp[1] = tmp[1] - ( PowerNegHalf.residues[k]*gamma_l*gamma_l*Rop.k ) * tmp[0];
}
Rop.Omega(tmp[1], tmp[0], 1, 1);
spProj(tmp[0], tmp[1], 1, Rop.Ls);
Phi = Phi + tmp[1];
// Reset shift coefficients for energy and force evals
Lop.RefreshShiftCoefficients(0.0);
Rop.RefreshShiftCoefficients(-1.0);
// EOFA heatbath: see Eqn. (29) of arXiv:1706.05843
// We generate a Gaussian noise vector \eta, and then compute
// \Phi = M_{\rm EOFA}^{-1/2} * \eta
// using a rational approximation to the inverse square root
//
// As a check of rational require \Phi^dag M_{EOFA} \Phi == eta^dag M^-1/2^dag M M^-1/2 eta = eta^dag eta
//
virtual void refresh(const GaugeField& U, GridParallelRNG& pRNG)
{
Lop.ImportGauge(U);
Rop.ImportGauge(U);
FermionField eta (Lop.FermionGrid());
FermionField CG_src (Lop.FermionGrid());
FermionField CG_soln (Lop.FermionGrid());
FermionField Forecast_src(Lop.FermionGrid());
std::vector<FermionField> tmp(2, Lop.FermionGrid());
// Use chronological inverter to forecast solutions across poles
std::vector<FermionField> prev_solns;
if(use_heatbath_forecasting){ prev_solns.reserve(param.degree); }
ChronoForecast<AbstractEOFAFermion<Impl>, FermionField> Forecast;
// Seed with Gaussian noise vector (var = 0.5)
RealD scale = std::sqrt(0.5);
gaussian(pRNG,eta);
eta = eta * scale;
// \Phi = ( \alpha_{0} + \sum_{k=1}^{N_{p}} \alpha_{l} * \gamma_{l} ) * \eta
RealD N(PowerNegHalf.norm);
for(int k=0; k<param.degree; ++k){ N += PowerNegHalf.residues[k] / ( 1.0 + PowerNegHalf.poles[k] ); }
Phi = eta * N;
// LH terms:
// \Phi = \Phi + k \sum_{k=1}^{N_{p}} P_{-} \Omega_{-}^{\dagger} ( H(mf)
// - \gamma_{l} \Delta_{-}(mf,mb) P_{-} )^{-1} \Omega_{-} P_{-} \eta
RealD gamma_l(0.0);
spProj(eta, tmp[0], -1, Lop.Ls);
Lop.Omega(tmp[0], tmp[1], -1, 0);
G5R5(CG_src, tmp[1]);
tmp[1] = zero;
for(int k=0; k<param.degree; ++k){
gamma_l = 1.0 / ( 1.0 + PowerNegHalf.poles[k] );
Lop.RefreshShiftCoefficients(-gamma_l);
if(use_heatbath_forecasting){ // Forecast CG guess using solutions from previous poles
Lop.Mdag(CG_src, Forecast_src);
CG_soln = Forecast(Lop, Forecast_src, prev_solns);
SolverHB(Lop, CG_src, CG_soln);
prev_solns.push_back(CG_soln);
} else {
CG_soln = zero; // Just use zero as the initial guess
SolverHB(Lop, CG_src, CG_soln);
}
Lop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
tmp[1] = tmp[1] + ( PowerNegHalf.residues[k]*gamma_l*gamma_l*Lop.k ) * tmp[0];
}
Lop.Omega(tmp[1], tmp[0], -1, 1);
spProj(tmp[0], tmp[1], -1, Lop.Ls);
Phi = Phi + tmp[1];
// RH terms:
// \Phi = \Phi - k \sum_{k=1}^{N_{p}} P_{+} \Omega_{+}^{\dagger} ( H(mb)
// + \gamma_{l} \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} \eta
spProj(eta, tmp[0], 1, Rop.Ls);
Rop.Omega(tmp[0], tmp[1], 1, 0);
G5R5(CG_src, tmp[1]);
tmp[1] = zero;
if(use_heatbath_forecasting){ prev_solns.clear(); } // empirically, LH solns don't help for RH solves
for(int k=0; k<param.degree; ++k){
gamma_l = 1.0 / ( 1.0 + PowerNegHalf.poles[k] );
Rop.RefreshShiftCoefficients(-gamma_l*PowerNegHalf.poles[k]);
if(use_heatbath_forecasting){
Rop.Mdag(CG_src, Forecast_src);
CG_soln = Forecast(Rop, Forecast_src, prev_solns);
SolverHB(Rop, CG_src, CG_soln);
prev_solns.push_back(CG_soln);
} else {
CG_soln = zero;
SolverHB(Rop, CG_src, CG_soln);
}
Rop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
tmp[1] = tmp[1] - ( PowerNegHalf.residues[k]*gamma_l*gamma_l*Rop.k ) * tmp[0];
}
Rop.Omega(tmp[1], tmp[0], 1, 1);
spProj(tmp[0], tmp[1], 1, Rop.Ls);
Phi = Phi + tmp[1];
// Reset shift coefficients for energy and force evals
Lop.RefreshShiftCoefficients(0.0);
Rop.RefreshShiftCoefficients(-1.0);
// Bounds check
RealD EtaDagEta = norm2(eta);
// RealD PhiDagMPhi= norm2(eta);
};
void Meofa(const GaugeField& U,const FermionField &phi, FermionField & Mphi)
{
#if 0
Lop.ImportGauge(U);
Rop.ImportGauge(U);
FermionField spProj_Phi(Lop.FermionGrid());
FermionField mPhi(Lop.FermionGrid());
std::vector<FermionField> tmp(2, Lop.FermionGrid());
mPhi = phi;
// LH term: S = S - k <\Phi| P_{-} \Omega_{-}^{\dagger} H(mf)^{-1} \Omega_{-} P_{-} |\Phi>
spProj(Phi, spProj_Phi, -1, Lop.Ls);
Lop.Omega(spProj_Phi, tmp[0], -1, 0);
G5R5(tmp[1], tmp[0]);
tmp[0] = zero;
SolverL(Lop, tmp[1], tmp[0]);
Lop.Dtilde(tmp[0], tmp[1]); // We actually solved Cayley preconditioned system: transform back
Lop.Omega(tmp[1], tmp[0], -1, 1);
mPhi = mPhi - Lop.k * innerProduct(spProj_Phi, tmp[0]).real();
// RH term: S = S + k <\Phi| P_{+} \Omega_{+}^{\dagger} ( H(mb)
// - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{-} P_{-} |\Phi>
spProj(Phi, spProj_Phi, 1, Rop.Ls);
Rop.Omega(spProj_Phi, tmp[0], 1, 0);
G5R5(tmp[1], tmp[0]);
tmp[0] = zero;
SolverR(Rop, tmp[1], tmp[0]);
Rop.Dtilde(tmp[0], tmp[1]);
Rop.Omega(tmp[1], tmp[0], 1, 1);
action += Rop.k * innerProduct(spProj_Phi, tmp[0]).real();
#endif
}
// EOFA action: see Eqn. (10) of arXiv:1706.05843
virtual RealD S(const GaugeField& U)
{
Lop.ImportGauge(U);
Rop.ImportGauge(U);
FermionField spProj_Phi(Lop.FermionGrid());
std::vector<FermionField> tmp(2, Lop.FermionGrid());
// S = <\Phi|\Phi>
RealD action(norm2(Phi));
// LH term: S = S - k <\Phi| P_{-} \Omega_{-}^{\dagger} H(mf)^{-1} \Omega_{-} P_{-} |\Phi>
spProj(Phi, spProj_Phi, -1, Lop.Ls);
Lop.Omega(spProj_Phi, tmp[0], -1, 0);
G5R5(tmp[1], tmp[0]);
tmp[0] = zero;
SolverL(Lop, tmp[1], tmp[0]);
Lop.Dtilde(tmp[0], tmp[1]); // We actually solved Cayley preconditioned system: transform back
Lop.Omega(tmp[1], tmp[0], -1, 1);
action -= Lop.k * innerProduct(spProj_Phi, tmp[0]).real();
// RH term: S = S + k <\Phi| P_{+} \Omega_{+}^{\dagger} ( H(mb)
// - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{-} P_{-} |\Phi>
spProj(Phi, spProj_Phi, 1, Rop.Ls);
Rop.Omega(spProj_Phi, tmp[0], 1, 0);
G5R5(tmp[1], tmp[0]);
tmp[0] = zero;
SolverR(Rop, tmp[1], tmp[0]);
Rop.Dtilde(tmp[0], tmp[1]);
Rop.Omega(tmp[1], tmp[0], 1, 1);
action += Rop.k * innerProduct(spProj_Phi, tmp[0]).real();
return action;
};
// EOFA pseudofermion force: see Eqns. (34)-(36) of arXiv:1706.05843
virtual void deriv(const GaugeField& U, GaugeField& dSdU)
{
Lop.ImportGauge(U);
Rop.ImportGauge(U);
FermionField spProj_Phi (Lop.FermionGrid());
FermionField Omega_spProj_Phi(Lop.FermionGrid());
FermionField CG_src (Lop.FermionGrid());
FermionField Chi (Lop.FermionGrid());
FermionField g5_R5_Chi (Lop.FermionGrid());
GaugeField force(Lop.GaugeGrid());
/////////////////////////////////////////////
// PAB:
// Optional single precision derivative ?
/////////////////////////////////////////////
// LH: dSdU = k \chi_{L}^{\dagger} \gamma_{5} R_{5} ( \partial_{x,\mu} D_{w} ) \chi_{L}
// \chi_{L} = H(mf)^{-1} \Omega_{-} P_{-} \Phi
spProj(Phi, spProj_Phi, -1, Lop.Ls);
Lop.Omega(spProj_Phi, Omega_spProj_Phi, -1, 0);
G5R5(CG_src, Omega_spProj_Phi);
spProj_Phi = zero;
DerivativeSolverL(Lop, CG_src, spProj_Phi);
Lop.Dtilde(spProj_Phi, Chi);
G5R5(g5_R5_Chi, Chi);
Lop.MDeriv(force, g5_R5_Chi, Chi, DaggerNo);
dSdU = -Lop.k * force;
// RH: dSdU = dSdU - k \chi_{R}^{\dagger} \gamma_{5} R_{5} ( \partial_{x,\mu} D_{w} ) \chi_{}
// \chi_{R} = ( H(mb) - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} \Phi
spProj(Phi, spProj_Phi, 1, Rop.Ls);
Rop.Omega(spProj_Phi, Omega_spProj_Phi, 1, 0);
G5R5(CG_src, Omega_spProj_Phi);
spProj_Phi = zero;
DerivativeSolverR(Rop, CG_src, spProj_Phi);
Rop.Dtilde(spProj_Phi, Chi);
G5R5(g5_R5_Chi, Chi);
Lop.MDeriv(force, g5_R5_Chi, Chi, DaggerNo);
dSdU = dSdU + Rop.k * force;
};
};
// EOFA action: see Eqn. (10) of arXiv:1706.05843
virtual RealD S(const GaugeField& U)
{
Lop.ImportGauge(U);
Rop.ImportGauge(U);
FermionField spProj_Phi(Lop.FermionGrid());
std::vector<FermionField> tmp(2, Lop.FermionGrid());
// S = <\Phi|\Phi>
RealD action(norm2(Phi));
// LH term: S = S - k <\Phi| P_{-} \Omega_{-}^{\dagger} H(mf)^{-1} \Omega_{-} P_{-} |\Phi>
spProj(Phi, spProj_Phi, -1, Lop.Ls);
Lop.Omega(spProj_Phi, tmp[0], -1, 0);
G5R5(tmp[1], tmp[0]);
tmp[0] = Zero();
Solver(Lop, tmp[1], tmp[0]);
Lop.Dtilde(tmp[0], tmp[1]); // We actually solved Cayley preconditioned system: transform back
Lop.Omega(tmp[1], tmp[0], -1, 1);
action -= Lop.k * innerProduct(spProj_Phi, tmp[0]).real();
// RH term: S = S + k <\Phi| P_{+} \Omega_{+}^{\dagger} ( H(mb)
// - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{-} P_{-} |\Phi>
spProj(Phi, spProj_Phi, 1, Rop.Ls);
Rop.Omega(spProj_Phi, tmp[0], 1, 0);
G5R5(tmp[1], tmp[0]);
tmp[0] = Zero();
Solver(Rop, tmp[1], tmp[0]);
Rop.Dtilde(tmp[0], tmp[1]);
Rop.Omega(tmp[1], tmp[0], 1, 1);
action += Rop.k * innerProduct(spProj_Phi, tmp[0]).real();
return action;
};
// EOFA pseudofermion force: see Eqns. (34)-(36) of arXiv:1706.05843
virtual void deriv(const GaugeField& U, GaugeField& dSdU)
{
Lop.ImportGauge(U);
Rop.ImportGauge(U);
FermionField spProj_Phi (Lop.FermionGrid());
FermionField Omega_spProj_Phi(Lop.FermionGrid());
FermionField CG_src (Lop.FermionGrid());
FermionField Chi (Lop.FermionGrid());
FermionField g5_R5_Chi (Lop.FermionGrid());
GaugeField force(Lop.GaugeGrid());
// LH: dSdU = k \chi_{L}^{\dagger} \gamma_{5} R_{5} ( \partial_{x,\mu} D_{w} ) \chi_{L}
// \chi_{L} = H(mf)^{-1} \Omega_{-} P_{-} \Phi
spProj(Phi, spProj_Phi, -1, Lop.Ls);
Lop.Omega(spProj_Phi, Omega_spProj_Phi, -1, 0);
G5R5(CG_src, Omega_spProj_Phi);
spProj_Phi = Zero();
Solver(Lop, CG_src, spProj_Phi);
Lop.Dtilde(spProj_Phi, Chi);
G5R5(g5_R5_Chi, Chi);
Lop.MDeriv(force, g5_R5_Chi, Chi, DaggerNo);
dSdU = Lop.k * force;
// RH: dSdU = dSdU - k \chi_{R}^{\dagger} \gamma_{5} R_{5} ( \partial_{x,\mu} D_{w} ) \chi_{}
// \chi_{R} = ( H(mb) - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} \Phi
spProj(Phi, spProj_Phi, 1, Rop.Ls);
Rop.Omega(spProj_Phi, Omega_spProj_Phi, 1, 0);
G5R5(CG_src, Omega_spProj_Phi);
spProj_Phi = Zero();
Solver(Rop, CG_src, spProj_Phi);
Rop.Dtilde(spProj_Phi, Chi);
G5R5(g5_R5_Chi, Chi);
Lop.MDeriv(force, g5_R5_Chi, Chi, DaggerNo);
dSdU = dSdU - Rop.k * force;
};
};
NAMESPACE_END(Grid);
}}
#endif

View File

@ -156,6 +156,13 @@ public:
msCG(Mpc, PhiOdd, Y);
if ( (rand()%param.BoundsCheckFreq)==0 ) {
FermionField gauss(FermOp.FermionRedBlackGrid());
gauss = PhiOdd;
HighBoundCheck(Mpc,gauss,param.hi);
InverseSqrtBoundsCheck(param.MaxIter,param.tolerance*100,Mpc,gauss,PowerNegHalf);
}
RealD action = norm2(Y);
std::cout << GridLogMessage << "Pseudofermion action FIXME -- is -1/4 "
"solve or -1/2 solve faster??? "

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -23,257 +23,267 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_PSEUDOFERMION_ONE_FLAVOUR_EVEN_ODD_RATIONAL_RATIO_H
#define QCD_PSEUDOFERMION_ONE_FLAVOUR_EVEN_ODD_RATIONAL_RATIO_H
NAMESPACE_BEGIN(Grid);
///////////////////////////////////////
// One flavour rational
///////////////////////////////////////
namespace Grid{
namespace QCD{
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// Here P/Q \sim R_{1/4} ~ (V^dagV)^{1/4}
// Here N/D \sim R_{-1/2} ~ (M^dagM)^{-1/2}
template<class Impl>
class OneFlavourEvenOddRatioRationalPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
INHERIT_IMPL_TYPES(Impl);
typedef OneFlavourRationalParams Params;
Params param;
MultiShiftFunction PowerHalf ;
MultiShiftFunction PowerNegHalf;
MultiShiftFunction PowerQuarter;
MultiShiftFunction PowerNegQuarter;
private:
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
FermionField PhiEven; // the pseudo fermion field for this trajectory
FermionField PhiOdd; // the pseudo fermion field for this trajectory
public:
OneFlavourEvenOddRatioRationalPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
Params & p
) :
NumOp(_NumOp),
DenOp(_DenOp),
PhiOdd (_NumOp.FermionRedBlackGrid()),
PhiEven(_NumOp.FermionRedBlackGrid()),
param(p)
{
AlgRemez remez(param.lo,param.hi,param.precision);
// MdagM^(+- 1/2)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/2)"<<std::endl;
remez.generateApprox(param.degree,1,2);
PowerHalf.Init(remez,param.tolerance,false);
PowerNegHalf.Init(remez,param.tolerance,true);
// MdagM^(+- 1/4)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/4)"<<std::endl;
remez.generateApprox(param.degree,1,4);
PowerQuarter.Init(remez,param.tolerance,false);
PowerNegQuarter.Init(remez,param.tolerance,true);
};
virtual std::string action_name(){return "OneFlavourEvenOddRatioRationalPseudoFermionAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] Low :" << param.lo << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] High :" << param.hi << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Max iterations :" << param.MaxIter << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance :" << param.tolerance << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Degree :" << param.degree << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Precision :" << param.precision << std::endl;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridParallelRNG& pRNG) {
///////////////////////////////////////
// One flavour rational
///////////////////////////////////////
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// P(phi) = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/2 (VdagV)^1/4 phi}
// = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/4 (MdagM)^-1/4 (VdagV)^1/4 phi}
//
// Phi = (VdagV)^-1/4 Mdag^{1/4} eta
//
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2).
// Here P/Q \sim R_{1/4} ~ (V^dagV)^{1/4}
// Here N/D \sim R_{-1/2} ~ (M^dagM)^{-1/2}
template<class Impl>
class OneFlavourEvenOddRatioRationalPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
RealD scale = std::sqrt(0.5);
INHERIT_IMPL_TYPES(Impl);
FermionField eta(NumOp.FermionGrid());
FermionField etaOdd (NumOp.FermionRedBlackGrid());
FermionField etaEven(NumOp.FermionRedBlackGrid());
FermionField tmp(NumOp.FermionRedBlackGrid());
typedef OneFlavourRationalParams Params;
Params param;
gaussian(pRNG,eta); eta=eta*scale;
MultiShiftFunction PowerHalf ;
MultiShiftFunction PowerNegHalf;
MultiShiftFunction PowerQuarter;
MultiShiftFunction PowerNegQuarter;
pickCheckerboard(Even,etaEven,eta);
pickCheckerboard(Odd,etaOdd,eta);
private:
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
FermionField PhiEven; // the pseudo fermion field for this trajectory
FermionField PhiOdd; // the pseudo fermion field for this trajectory
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
public:
OneFlavourEvenOddRatioRationalPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
Params & p
) :
NumOp(_NumOp),
DenOp(_DenOp),
PhiOdd (_NumOp.FermionRedBlackGrid()),
PhiEven(_NumOp.FermionRedBlackGrid()),
param(p)
{
AlgRemez remez(param.lo,param.hi,param.precision);
// MdagM^(+- 1/2)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/2)"<<std::endl;
remez.generateApprox(param.degree,1,2);
PowerHalf.Init(remez,param.tolerance,false);
PowerNegHalf.Init(remez,param.tolerance,true);
// MdagM^(+- 1/4)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/4)"<<std::endl;
remez.generateApprox(param.degree,1,4);
PowerQuarter.Init(remez,param.tolerance,false);
PowerNegQuarter.Init(remez,param.tolerance,true);
};
virtual std::string action_name(){return "OneFlavourEvenOddRatioRationalPseudoFermionAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] Low :" << param.lo << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] High :" << param.hi << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Max iterations :" << param.MaxIter << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance :" << param.tolerance << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Degree :" << param.degree << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Precision :" << param.precision << std::endl;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridParallelRNG& pRNG) {
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// P(phi) = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/2 (VdagV)^1/4 phi}
// = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/4 (MdagM)^-1/4 (VdagV)^1/4 phi}
//
// Phi = (VdagV)^-1/4 Mdag^{1/4} eta
//
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2).
RealD scale = std::sqrt(0.5);
FermionField eta(NumOp.FermionGrid());
FermionField etaOdd (NumOp.FermionRedBlackGrid());
FermionField etaEven(NumOp.FermionRedBlackGrid());
FermionField tmp(NumOp.FermionRedBlackGrid());
gaussian(pRNG,eta); eta=eta*scale;
pickCheckerboard(Even,etaEven,eta);
pickCheckerboard(Odd,etaOdd,eta);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
// MdagM^1/4 eta
SchurDifferentiableOperator<Impl> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerQuarter);
msCG_M(MdagM,etaOdd,tmp);
// MdagM^1/4 eta
SchurDifferentiableOperator<Impl> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerQuarter);
msCG_M(MdagM,etaOdd,tmp);
// VdagV^-1/4 MdagM^1/4 eta
SchurDifferentiableOperator<Impl> VdagV(NumOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerNegQuarter);
msCG_V(VdagV,tmp,PhiOdd);
// VdagV^-1/4 MdagM^1/4 eta
SchurDifferentiableOperator<Impl> VdagV(NumOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerNegQuarter);
msCG_V(VdagV,tmp,PhiOdd);
assert(NumOp.ConstEE() == 1);
assert(DenOp.ConstEE() == 1);
PhiEven = Zero();
assert(NumOp.ConstEE() == 1);
assert(DenOp.ConstEE() == 1);
PhiEven = zero;
};
};
//////////////////////////////////////////////////////
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
//////////////////////////////////////////////////////
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
FermionField X(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
FermionField X(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
// VdagV^1/4 Phi
SchurDifferentiableOperator<Impl> VdagV(NumOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
msCG_V(VdagV,PhiOdd,X);
// VdagV^1/4 Phi
SchurDifferentiableOperator<Impl> VdagV(NumOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
msCG_V(VdagV,PhiOdd,X);
// MdagM^-1/4 VdagV^1/4 Phi
SchurDifferentiableOperator<Impl> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegQuarter);
msCG_M(MdagM,X,Y);
// MdagM^-1/4 VdagV^1/4 Phi
SchurDifferentiableOperator<Impl> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegQuarter);
msCG_M(MdagM,X,Y);
// Phidag VdagV^1/4 MdagM^-1/4 MdagM^-1/4 VdagV^1/4 Phi
RealD action = norm2(Y);
// Randomly apply rational bounds checks.
if ( (rand()%param.BoundsCheckFreq)==0 ) {
FermionField gauss(NumOp.FermionRedBlackGrid());
gauss = PhiOdd;
HighBoundCheck(MdagM,gauss,param.hi);
InverseSqrtBoundsCheck(param.MaxIter,param.tolerance*100,MdagM,gauss,PowerNegHalf);
}
return action;
};
// Phidag VdagV^1/4 MdagM^-1/4 MdagM^-1/4 VdagV^1/4 Phi
RealD action = norm2(Y);
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// Here, M is some 5D operator and V is the Pauli-Villars field
// N and D makeup the rat. poly of the M term and P and & makeup the rat.poly of the denom term
//
// Need
// dS_f/dU = chi^dag d[P/Q] N/D P/Q chi
// + chi^dag P/Q d[N/D] P/Q chi
// + chi^dag P/Q N/D d[P/Q] chi
//
// P/Q is expressed as partial fraction expansion:
//
// a0 + \sum_k ak/(V^dagV + bk)
//
// d[P/Q] is then
//
// \sum_k -ak [V^dagV+bk]^{-1} [ dV^dag V + V^dag dV ] [V^dag V + bk]^{-1}
//
// and similar for N/D.
//
// Need
// MpvPhi_k = [Vdag V + bk]^{-1} chi
// MpvPhi = {a0 + \sum_k ak [Vdag V + bk]^{-1} }chi
//
// MfMpvPhi_k = [MdagM+bk]^{-1} MpvPhi
// MfMpvPhi = {a0 + \sum_k ak [Mdag M + bk]^{-1} } MpvPhi
//
// MpvMfMpvPhi_k = [Vdag V + bk]^{-1} MfMpvchi
//
return action;
};
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// Here, M is some 5D operator and V is the Pauli-Villars field
// N and D makeup the rat. poly of the M term and P and & makeup the rat.poly of the denom term
//
// Need
// dS_f/dU = chi^dag d[P/Q] N/D P/Q chi
// + chi^dag P/Q d[N/D] P/Q chi
// + chi^dag P/Q N/D d[P/Q] chi
//
// P/Q is expressed as partial fraction expansion:
//
// a0 + \sum_k ak/(V^dagV + bk)
//
// d[P/Q] is then
//
// \sum_k -ak [V^dagV+bk]^{-1} [ dV^dag V + V^dag dV ] [V^dag V + bk]^{-1}
//
// and similar for N/D.
//
// Need
// MpvPhi_k = [Vdag V + bk]^{-1} chi
// MpvPhi = {a0 + \sum_k ak [Vdag V + bk]^{-1} }chi
//
// MfMpvPhi_k = [MdagM+bk]^{-1} MpvPhi
// MfMpvPhi = {a0 + \sum_k ak [Mdag M + bk]^{-1} } MpvPhi
//
// MpvMfMpvPhi_k = [Vdag V + bk]^{-1} MfMpvchi
//
const int n_f = PowerNegHalf.poles.size();
const int n_pv = PowerQuarter.poles.size();
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
std::vector<FermionField> MpvPhi_k (n_pv,NumOp.FermionRedBlackGrid());
std::vector<FermionField> MpvMfMpvPhi_k(n_pv,NumOp.FermionRedBlackGrid());
std::vector<FermionField> MfMpvPhi_k (n_f ,NumOp.FermionRedBlackGrid());
const int n_f = PowerNegHalf.poles.size();
const int n_pv = PowerQuarter.poles.size();
FermionField MpvPhi(NumOp.FermionRedBlackGrid());
FermionField MfMpvPhi(NumOp.FermionRedBlackGrid());
FermionField MpvMfMpvPhi(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
std::vector<FermionField> MpvPhi_k (n_pv,NumOp.FermionRedBlackGrid());
std::vector<FermionField> MpvMfMpvPhi_k(n_pv,NumOp.FermionRedBlackGrid());
std::vector<FermionField> MfMpvPhi_k (n_f ,NumOp.FermionRedBlackGrid());
GaugeField tmp(NumOp.GaugeGrid());
FermionField MpvPhi(NumOp.FermionRedBlackGrid());
FermionField MfMpvPhi(NumOp.FermionRedBlackGrid());
FermionField MpvMfMpvPhi(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
GaugeField tmp(NumOp.GaugeGrid());
SchurDifferentiableOperator<Impl> VdagV(NumOp);
SchurDifferentiableOperator<Impl> MdagM(DenOp);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegHalf);
SchurDifferentiableOperator<Impl> VdagV(NumOp);
SchurDifferentiableOperator<Impl> MdagM(DenOp);
msCG_V(VdagV,PhiOdd,MpvPhi_k,MpvPhi);
msCG_M(MdagM,MpvPhi,MfMpvPhi_k,MfMpvPhi);
msCG_V(VdagV,MfMpvPhi,MpvMfMpvPhi_k,MpvMfMpvPhi);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegHalf);
RealD ak;
msCG_V(VdagV,PhiOdd,MpvPhi_k,MpvPhi);
msCG_M(MdagM,MpvPhi,MfMpvPhi_k,MfMpvPhi);
msCG_V(VdagV,MfMpvPhi,MpvMfMpvPhi_k,MpvMfMpvPhi);
dSdU = Zero();
RealD ak;
// With these building blocks
//
// dS/dU =
// \sum_k -ak MfMpvPhi_k^dag [ dM^dag M + M^dag dM ] MfMpvPhi_k (1)
// + \sum_k -ak MpvMfMpvPhi_k^\dag [ dV^dag V + V^dag dV ] MpvPhi_k (2)
// -ak MpvPhi_k^dag [ dV^dag V + V^dag dV ] MpvMfMpvPhi_k (3)
dSdU = zero;
//(1)
for(int k=0;k<n_f;k++){
ak = PowerNegHalf.residues[k];
MdagM.Mpc(MfMpvPhi_k[k],Y);
MdagM.MpcDagDeriv(tmp , MfMpvPhi_k[k], Y ); dSdU=dSdU+ak*tmp;
MdagM.MpcDeriv(tmp , Y, MfMpvPhi_k[k] ); dSdU=dSdU+ak*tmp;
}
// With these building blocks
//
// dS/dU =
// \sum_k -ak MfMpvPhi_k^dag [ dM^dag M + M^dag dM ] MfMpvPhi_k (1)
// + \sum_k -ak MpvMfMpvPhi_k^\dag [ dV^dag V + V^dag dV ] MpvPhi_k (2)
// -ak MpvPhi_k^dag [ dV^dag V + V^dag dV ] MpvMfMpvPhi_k (3)
//(1)
for(int k=0;k<n_f;k++){
ak = PowerNegHalf.residues[k];
MdagM.Mpc(MfMpvPhi_k[k],Y);
MdagM.MpcDagDeriv(tmp , MfMpvPhi_k[k], Y ); dSdU=dSdU+ak*tmp;
MdagM.MpcDeriv(tmp , Y, MfMpvPhi_k[k] ); dSdU=dSdU+ak*tmp;
}
//(2)
//(3)
for(int k=0;k<n_pv;k++){
//(2)
//(3)
for(int k=0;k<n_pv;k++){
ak = PowerQuarter.residues[k];
ak = PowerQuarter.residues[k];
VdagV.Mpc(MpvPhi_k[k],Y);
VdagV.MpcDagDeriv(tmp,MpvMfMpvPhi_k[k],Y); dSdU=dSdU+ak*tmp;
VdagV.MpcDeriv (tmp,Y,MpvMfMpvPhi_k[k]); dSdU=dSdU+ak*tmp;
VdagV.Mpc(MpvPhi_k[k],Y);
VdagV.MpcDagDeriv(tmp,MpvMfMpvPhi_k[k],Y); dSdU=dSdU+ak*tmp;
VdagV.MpcDeriv (tmp,Y,MpvMfMpvPhi_k[k]); dSdU=dSdU+ak*tmp;
VdagV.Mpc(MpvMfMpvPhi_k[k],Y); // V as we take Ydag
VdagV.MpcDeriv (tmp,Y, MpvPhi_k[k]); dSdU=dSdU+ak*tmp;
VdagV.MpcDagDeriv(tmp,MpvPhi_k[k], Y); dSdU=dSdU+ak*tmp;
VdagV.Mpc(MpvMfMpvPhi_k[k],Y); // V as we take Ydag
VdagV.MpcDeriv (tmp,Y, MpvPhi_k[k]); dSdU=dSdU+ak*tmp;
VdagV.MpcDagDeriv(tmp,MpvPhi_k[k], Y); dSdU=dSdU+ak*tmp;
}
}
//dSdU = Ta(dSdU);
//dSdU = Ta(dSdU);
};
};
};
};
}
}
NAMESPACE_END(Grid);
#endif

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -23,190 +23,199 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_PSEUDOFERMION_ONE_FLAVOUR_RATIONAL_H
#define QCD_PSEUDOFERMION_ONE_FLAVOUR_RATIONAL_H
NAMESPACE_BEGIN(Grid);
namespace Grid{
namespace QCD{
///////////////////////////////////////
// One flavour rational
///////////////////////////////////////
///////////////////////////////////////
// One flavour rational
///////////////////////////////////////
// S_f = chi^dag * N(M^dag*M)/D(M^dag*M) * chi
//
// Here, M is some operator
// N and D makeup the rat. poly
//
// S_f = chi^dag * N(M^dag*M)/D(M^dag*M) * chi
//
// Here, M is some operator
// N and D makeup the rat. poly
//
template<class Impl>
class OneFlavourRationalPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
INHERIT_IMPL_TYPES(Impl);
template<class Impl>
class OneFlavourRationalPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
INHERIT_IMPL_TYPES(Impl);
typedef OneFlavourRationalParams Params;
Params param;
typedef OneFlavourRationalParams Params;
Params param;
MultiShiftFunction PowerHalf ;
MultiShiftFunction PowerNegHalf;
MultiShiftFunction PowerQuarter;
MultiShiftFunction PowerNegQuarter;
MultiShiftFunction PowerHalf ;
MultiShiftFunction PowerNegHalf;
MultiShiftFunction PowerQuarter;
MultiShiftFunction PowerNegQuarter;
private:
private:
FermionOperator<Impl> & FermOp;// the basic operator
FermionOperator<Impl> & FermOp;// the basic operator
// NOT using "Nroots"; IroIro is -- perhaps later, but this wasn't good for us historically
// and hasenbusch works better
// NOT using "Nroots"; IroIro is -- perhaps later, but this wasn't good for us historically
// and hasenbusch works better
FermionField Phi; // the pseudo fermion field for this trajectory
FermionField Phi; // the pseudo fermion field for this trajectory
public:
public:
OneFlavourRationalPseudoFermionAction(FermionOperator<Impl> &Op,
Params & p
) : FermOp(Op), Phi(Op.FermionGrid()), param(p)
{
AlgRemez remez(param.lo,param.hi,param.precision);
OneFlavourRationalPseudoFermionAction(FermionOperator<Impl> &Op,
Params & p
) : FermOp(Op), Phi(Op.FermionGrid()), param(p)
{
AlgRemez remez(param.lo,param.hi,param.precision);
// MdagM^(+- 1/2)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/2)"<<std::endl;
remez.generateApprox(param.degree,1,2);
PowerHalf.Init(remez,param.tolerance,false);
PowerNegHalf.Init(remez,param.tolerance,true);
// MdagM^(+- 1/2)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/2)"<<std::endl;
remez.generateApprox(param.degree,1,2);
PowerHalf.Init(remez,param.tolerance,false);
PowerNegHalf.Init(remez,param.tolerance,true);
// MdagM^(+- 1/4)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/4)"<<std::endl;
remez.generateApprox(param.degree,1,4);
PowerQuarter.Init(remez,param.tolerance,false);
PowerNegQuarter.Init(remez,param.tolerance,true);
};
// MdagM^(+- 1/4)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/4)"<<std::endl;
remez.generateApprox(param.degree,1,4);
PowerQuarter.Init(remez,param.tolerance,false);
PowerNegQuarter.Init(remez,param.tolerance,true);
};
virtual std::string action_name(){return "OneFlavourRationalPseudoFermionAction";}
virtual std::string action_name(){return "OneFlavourRationalPseudoFermionAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] Low :" << param.lo << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] High :" << param.hi << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Max iterations :" << param.MaxIter << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance :" << param.tolerance << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Degree :" << param.degree << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Precision :" << param.precision << std::endl;
return sstream.str();
}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] Low :" << param.lo << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] High :" << param.hi << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Max iterations :" << param.MaxIter << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance :" << param.tolerance << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Degree :" << param.degree << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Precision :" << param.precision << std::endl;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridParallelRNG& pRNG) {
virtual void refresh(const GaugeField &U, GridParallelRNG& pRNG) {
// P(phi) = e^{- phi^dag (MdagM)^-1/2 phi}
// = e^{- phi^dag (MdagM)^-1/4 (MdagM)^-1/4 phi}
// Phi = Mdag^{1/4} eta
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2).
// P(phi) = e^{- phi^dag (MdagM)^-1/2 phi}
// = e^{- phi^dag (MdagM)^-1/4 (MdagM)^-1/4 phi}
// Phi = Mdag^{1/4} eta
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2).
RealD scale = std::sqrt(0.5);
RealD scale = std::sqrt(0.5);
FermionField eta(FermOp.FermionGrid());
FermionField eta(FermOp.FermionGrid());
gaussian(pRNG,eta);
gaussian(pRNG,eta);
FermOp.ImportGauge(U);
FermOp.ImportGauge(U);
// mutishift CG
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(FermOp);
ConjugateGradientMultiShift<FermionField> msCG(param.MaxIter,PowerQuarter);
msCG(MdagMOp,eta,Phi);
// mutishift CG
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(FermOp);
ConjugateGradientMultiShift<FermionField> msCG(param.MaxIter,PowerQuarter);
msCG(MdagMOp,eta,Phi);
Phi=Phi*scale;
Phi=Phi*scale;
};
};
//////////////////////////////////////////////////////
// S = phi^dag (Mdag M)^-1/2 phi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
//////////////////////////////////////////////////////
// S = phi^dag (Mdag M)^-1/2 phi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
FermOp.ImportGauge(U);
FermOp.ImportGauge(U);
FermionField Y(FermOp.FermionGrid());
FermionField Y(FermOp.FermionGrid());
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(FermOp);
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(FermOp);
ConjugateGradientMultiShift<FermionField> msCG(param.MaxIter,PowerNegQuarter);
ConjugateGradientMultiShift<FermionField> msCG(param.MaxIter,PowerNegQuarter);
msCG(MdagMOp,Phi,Y);
msCG(MdagMOp,Phi,Y);
RealD action = norm2(Y);
std::cout << GridLogMessage << "Pseudofermion action FIXME -- is -1/4 solve or -1/2 solve faster??? "<<action<<std::endl;
return action;
};
if ( (rand()%param.BoundsCheckFreq)==0 ) {
FermionField gauss(FermOp.FermionGrid());
gauss = Phi;
HighBoundCheck(MdagMOp,gauss,param.hi);
InverseSqrtBoundsCheck(param.MaxIter,param.tolerance*100,MdagMOp,gauss,PowerNegHalf);
}
//////////////////////////////////////////////////////
// Need
// dS_f/dU = chi^dag d[N/D] chi
//
// N/D is expressed as partial fraction expansion:
//
// a0 + \sum_k ak/(M^dagM + bk)
//
// d[N/D] is then
//
// \sum_k -ak [M^dagM+bk]^{-1} [ dM^dag M + M^dag dM ] [M^dag M + bk]^{-1}
//
// Need
// Mf Phi_k = [MdagM+bk]^{-1} Phi
// Mf Phi = \sum_k ak [MdagM+bk]^{-1} Phi
//
// With these building blocks
//
// dS/dU = \sum_k -ak Mf Phi_k^dag [ dM^dag M + M^dag dM ] Mf Phi_k
// S = innerprodReal(Phi,Mf Phi);
//////////////////////////////////////////////////////
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
const int Npole = PowerNegHalf.poles.size();
RealD action = norm2(Y);
std::cout << GridLogMessage << "Pseudofermion action FIXME -- is -1/4 solve or -1/2 solve faster??? "<<action<<std::endl;
return action;
};
std::vector<FermionField> MPhi_k (Npole,FermOp.FermionGrid());
//////////////////////////////////////////////////////
// Need
// dS_f/dU = chi^dag d[N/D] chi
//
// N/D is expressed as partial fraction expansion:
//
// a0 + \sum_k ak/(M^dagM + bk)
//
// d[N/D] is then
//
// \sum_k -ak [M^dagM+bk]^{-1} [ dM^dag M + M^dag dM ] [M^dag M + bk]^{-1}
//
// Need
// Mf Phi_k = [MdagM+bk]^{-1} Phi
// Mf Phi = \sum_k ak [MdagM+bk]^{-1} Phi
//
// With these building blocks
//
// dS/dU = \sum_k -ak Mf Phi_k^dag [ dM^dag M + M^dag dM ] Mf Phi_k
// S = innerprodReal(Phi,Mf Phi);
//////////////////////////////////////////////////////
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
FermionField X(FermOp.FermionGrid());
FermionField Y(FermOp.FermionGrid());
const int Npole = PowerNegHalf.poles.size();
GaugeField tmp(FermOp.GaugeGrid());
std::vector<FermionField> MPhi_k (Npole,FermOp.FermionGrid());
FermOp.ImportGauge(U);
FermionField X(FermOp.FermionGrid());
FermionField Y(FermOp.FermionGrid());
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(FermOp);
GaugeField tmp(FermOp.GaugeGrid());
ConjugateGradientMultiShift<FermionField> msCG(param.MaxIter,PowerNegHalf);
FermOp.ImportGauge(U);
msCG(MdagMOp,Phi,MPhi_k);
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagMOp(FermOp);
dSdU = Zero();
for(int k=0;k<Npole;k++){
ConjugateGradientMultiShift<FermionField> msCG(param.MaxIter,PowerNegHalf);
RealD ak = PowerNegHalf.residues[k];
msCG(MdagMOp,Phi,MPhi_k);
X = MPhi_k[k];
dSdU = zero;
for(int k=0;k<Npole;k++){
FermOp.M(X,Y);
RealD ak = PowerNegHalf.residues[k];
FermOp.MDeriv(tmp , Y, X,DaggerNo ); dSdU=dSdU+ak*tmp;
FermOp.MDeriv(tmp , X, Y,DaggerYes); dSdU=dSdU+ak*tmp;
X = MPhi_k[k];
}
FermOp.M(X,Y);
//dSdU = Ta(dSdU);
FermOp.MDeriv(tmp , Y, X,DaggerNo ); dSdU=dSdU+ak*tmp;
FermOp.MDeriv(tmp , X, Y,DaggerYes); dSdU=dSdU+ak*tmp;
};
};
}
NAMESPACE_END(Grid);
//dSdU = Ta(dSdU);
};
};
}
}
#endif

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -23,243 +23,253 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_PSEUDOFERMION_ONE_FLAVOUR_RATIONAL_RATIO_H
#define QCD_PSEUDOFERMION_ONE_FLAVOUR_RATIONAL_RATIO_H
NAMESPACE_BEGIN(Grid);
///////////////////////////////////////
// One flavour rational
///////////////////////////////////////
namespace Grid{
namespace QCD{
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// Here P/Q \sim R_{1/4} ~ (V^dagV)^{1/4}
// Here N/D \sim R_{-1/2} ~ (M^dagM)^{-1/2}
template<class Impl>
class OneFlavourRatioRationalPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
INHERIT_IMPL_TYPES(Impl);
typedef OneFlavourRationalParams Params;
Params param;
MultiShiftFunction PowerHalf ;
MultiShiftFunction PowerNegHalf;
MultiShiftFunction PowerQuarter;
MultiShiftFunction PowerNegQuarter;
private:
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
FermionField Phi; // the pseudo fermion field for this trajectory
public:
OneFlavourRatioRationalPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
Params & p
) : NumOp(_NumOp), DenOp(_DenOp), Phi(_NumOp.FermionGrid()), param(p)
{
AlgRemez remez(param.lo,param.hi,param.precision);
// MdagM^(+- 1/2)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/2)"<<std::endl;
remez.generateApprox(param.degree,1,2);
PowerHalf.Init(remez,param.tolerance,false);
PowerNegHalf.Init(remez,param.tolerance,true);
// MdagM^(+- 1/4)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/4)"<<std::endl;
remez.generateApprox(param.degree,1,4);
PowerQuarter.Init(remez,param.tolerance,false);
PowerNegQuarter.Init(remez,param.tolerance,true);
};
virtual std::string action_name(){return "OneFlavourRatioRationalPseudoFermionAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] Low :" << param.lo << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] High :" << param.hi << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Max iterations :" << param.MaxIter << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance :" << param.tolerance << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Degree :" << param.degree << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Precision :" << param.precision << std::endl;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridParallelRNG& pRNG) {
///////////////////////////////////////
// One flavour rational
///////////////////////////////////////
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// P(phi) = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/2 (VdagV)^1/4 phi}
// = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/4 (MdagM)^-1/4 (VdagV)^1/4 phi}
//
// Phi = (VdagV)^-1/4 Mdag^{1/4} eta
//
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2).
// Here P/Q \sim R_{1/4} ~ (V^dagV)^{1/4}
// Here N/D \sim R_{-1/2} ~ (M^dagM)^{-1/2}
template<class Impl>
class OneFlavourRatioRationalPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
RealD scale = std::sqrt(0.5);
INHERIT_IMPL_TYPES(Impl);
FermionField tmp(NumOp.FermionGrid());
FermionField eta(NumOp.FermionGrid());
typedef OneFlavourRationalParams Params;
Params param;
gaussian(pRNG,eta);
MultiShiftFunction PowerHalf ;
MultiShiftFunction PowerNegHalf;
MultiShiftFunction PowerQuarter;
MultiShiftFunction PowerNegQuarter;
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
private:
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
FermionField Phi; // the pseudo fermion field for this trajectory
// MdagM^1/4 eta
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerQuarter);
msCG_M(MdagM,eta,tmp);
public:
// VdagV^-1/4 MdagM^1/4 eta
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> VdagV(NumOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerNegQuarter);
msCG_V(VdagV,tmp,Phi);
OneFlavourRatioRationalPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
Params & p
) : NumOp(_NumOp), DenOp(_DenOp), Phi(_NumOp.FermionGrid()), param(p)
{
AlgRemez remez(param.lo,param.hi,param.precision);
Phi=Phi*scale;
// MdagM^(+- 1/2)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/2)"<<std::endl;
remez.generateApprox(param.degree,1,2);
PowerHalf.Init(remez,param.tolerance,false);
PowerNegHalf.Init(remez,param.tolerance,true);
// MdagM^(+- 1/4)
std::cout<<GridLogMessage << "Generating degree "<<param.degree<<" for x^(1/4)"<<std::endl;
remez.generateApprox(param.degree,1,4);
PowerQuarter.Init(remez,param.tolerance,false);
PowerNegQuarter.Init(remez,param.tolerance,true);
};
virtual std::string action_name(){return "OneFlavourRatioRationalPseudoFermionAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] Low :" << param.lo << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] High :" << param.hi << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Max iterations :" << param.MaxIter << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Tolerance :" << param.tolerance << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Degree :" << param.degree << std::endl;
sstream << GridLogMessage << "["<<action_name()<<"] Precision :" << param.precision << std::endl;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridParallelRNG& pRNG) {
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// P(phi) = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/2 (VdagV)^1/4 phi}
// = e^{- phi^dag (VdagV)^1/4 (MdagM)^-1/4 (MdagM)^-1/4 (VdagV)^1/4 phi}
//
// Phi = (VdagV)^-1/4 Mdag^{1/4} eta
//
// P(eta) = e^{- eta^dag eta}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
// So eta should be of width sig = 1/sqrt(2).
RealD scale = std::sqrt(0.5);
FermionField tmp(NumOp.FermionGrid());
FermionField eta(NumOp.FermionGrid());
gaussian(pRNG,eta);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
// MdagM^1/4 eta
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerQuarter);
msCG_M(MdagM,eta,tmp);
// VdagV^-1/4 MdagM^1/4 eta
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> VdagV(NumOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerNegQuarter);
msCG_V(VdagV,tmp,Phi);
Phi=Phi*scale;
};
};
//////////////////////////////////////////////////////
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
//////////////////////////////////////////////////////
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
FermionField X(NumOp.FermionGrid());
FermionField Y(NumOp.FermionGrid());
FermionField X(NumOp.FermionGrid());
FermionField Y(NumOp.FermionGrid());
// VdagV^1/4 Phi
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> VdagV(NumOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
msCG_V(VdagV,Phi,X);
// VdagV^1/4 Phi
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> VdagV(NumOp);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
msCG_V(VdagV,Phi,X);
// MdagM^-1/4 VdagV^1/4 Phi
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegQuarter);
msCG_M(MdagM,X,Y);
// MdagM^-1/4 VdagV^1/4 Phi
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagM(DenOp);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegQuarter);
msCG_M(MdagM,X,Y);
// Phidag VdagV^1/4 MdagM^-1/4 MdagM^-1/4 VdagV^1/4 Phi
RealD action = norm2(Y);
// Randomly apply rational bounds checks.
if ( (rand()%param.BoundsCheckFreq)==0 ) {
FermionField gauss(NumOp.FermionGrid());
gauss = Phi;
HighBoundCheck(MdagM,gauss,param.hi);
InverseSqrtBoundsCheck(param.MaxIter,param.tolerance*100,MdagM,gauss,PowerNegHalf);
}
return action;
};
// Phidag VdagV^1/4 MdagM^-1/4 MdagM^-1/4 VdagV^1/4 Phi
RealD action = norm2(Y);
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// Here, M is some 5D operator and V is the Pauli-Villars field
// N and D makeup the rat. poly of the M term and P and & makeup the rat.poly of the denom term
//
// Need
// dS_f/dU = chi^dag d[P/Q] N/D P/Q chi
// + chi^dag P/Q d[N/D] P/Q chi
// + chi^dag P/Q N/D d[P/Q] chi
//
// P/Q is expressed as partial fraction expansion:
//
// a0 + \sum_k ak/(V^dagV + bk)
//
// d[P/Q] is then
//
// \sum_k -ak [V^dagV+bk]^{-1} [ dV^dag V + V^dag dV ] [V^dag V + bk]^{-1}
//
// and similar for N/D.
//
// Need
// MpvPhi_k = [Vdag V + bk]^{-1} chi
// MpvPhi = {a0 + \sum_k ak [Vdag V + bk]^{-1} }chi
//
// MfMpvPhi_k = [MdagM+bk]^{-1} MpvPhi
// MfMpvPhi = {a0 + \sum_k ak [Mdag M + bk]^{-1} } MpvPhi
//
// MpvMfMpvPhi_k = [Vdag V + bk]^{-1} MfMpvchi
//
return action;
};
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
//
// Here, M is some 5D operator and V is the Pauli-Villars field
// N and D makeup the rat. poly of the M term and P and & makeup the rat.poly of the denom term
//
// Need
// dS_f/dU = chi^dag d[P/Q] N/D P/Q chi
// + chi^dag P/Q d[N/D] P/Q chi
// + chi^dag P/Q N/D d[P/Q] chi
//
// P/Q is expressed as partial fraction expansion:
//
// a0 + \sum_k ak/(V^dagV + bk)
//
// d[P/Q] is then
//
// \sum_k -ak [V^dagV+bk]^{-1} [ dV^dag V + V^dag dV ] [V^dag V + bk]^{-1}
//
// and similar for N/D.
//
// Need
// MpvPhi_k = [Vdag V + bk]^{-1} chi
// MpvPhi = {a0 + \sum_k ak [Vdag V + bk]^{-1} }chi
//
// MfMpvPhi_k = [MdagM+bk]^{-1} MpvPhi
// MfMpvPhi = {a0 + \sum_k ak [Mdag M + bk]^{-1} } MpvPhi
//
// MpvMfMpvPhi_k = [Vdag V + bk]^{-1} MfMpvchi
//
const int n_f = PowerNegHalf.poles.size();
const int n_pv = PowerQuarter.poles.size();
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
std::vector<FermionField> MpvPhi_k (n_pv,NumOp.FermionGrid());
std::vector<FermionField> MpvMfMpvPhi_k(n_pv,NumOp.FermionGrid());
std::vector<FermionField> MfMpvPhi_k (n_f,NumOp.FermionGrid());
const int n_f = PowerNegHalf.poles.size();
const int n_pv = PowerQuarter.poles.size();
FermionField MpvPhi(NumOp.FermionGrid());
FermionField MfMpvPhi(NumOp.FermionGrid());
FermionField MpvMfMpvPhi(NumOp.FermionGrid());
FermionField Y(NumOp.FermionGrid());
std::vector<FermionField> MpvPhi_k (n_pv,NumOp.FermionGrid());
std::vector<FermionField> MpvMfMpvPhi_k(n_pv,NumOp.FermionGrid());
std::vector<FermionField> MfMpvPhi_k (n_f,NumOp.FermionGrid());
GaugeField tmp(NumOp.GaugeGrid());
FermionField MpvPhi(NumOp.FermionGrid());
FermionField MfMpvPhi(NumOp.FermionGrid());
FermionField MpvMfMpvPhi(NumOp.FermionGrid());
FermionField Y(NumOp.FermionGrid());
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
GaugeField tmp(NumOp.GaugeGrid());
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagM(DenOp);
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> VdagV(NumOp);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegHalf);
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> MdagM(DenOp);
MdagMLinearOperator<FermionOperator<Impl> ,FermionField> VdagV(NumOp);
msCG_V(VdagV,Phi,MpvPhi_k,MpvPhi);
msCG_M(MdagM,MpvPhi,MfMpvPhi_k,MfMpvPhi);
msCG_V(VdagV,MfMpvPhi,MpvMfMpvPhi_k,MpvMfMpvPhi);
ConjugateGradientMultiShift<FermionField> msCG_V(param.MaxIter,PowerQuarter);
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegHalf);
RealD ak;
msCG_V(VdagV,Phi,MpvPhi_k,MpvPhi);
msCG_M(MdagM,MpvPhi,MfMpvPhi_k,MfMpvPhi);
msCG_V(VdagV,MfMpvPhi,MpvMfMpvPhi_k,MpvMfMpvPhi);
dSdU = Zero();
RealD ak;
// With these building blocks
//
// dS/dU =
// \sum_k -ak MfMpvPhi_k^dag [ dM^dag M + M^dag dM ] MfMpvPhi_k (1)
// + \sum_k -ak MpvMfMpvPhi_k^\dag [ dV^dag V + V^dag dV ] MpvPhi_k (2)
// -ak MpvPhi_k^dag [ dV^dag V + V^dag dV ] MpvMfMpvPhi_k (3)
dSdU = zero;
//(1)
for(int k=0;k<n_f;k++){
ak = PowerNegHalf.residues[k];
DenOp.M(MfMpvPhi_k[k],Y);
DenOp.MDeriv(tmp , MfMpvPhi_k[k], Y,DaggerYes ); dSdU=dSdU+ak*tmp;
DenOp.MDeriv(tmp , Y, MfMpvPhi_k[k], DaggerNo ); dSdU=dSdU+ak*tmp;
}
// With these building blocks
//
// dS/dU =
// \sum_k -ak MfMpvPhi_k^dag [ dM^dag M + M^dag dM ] MfMpvPhi_k (1)
// + \sum_k -ak MpvMfMpvPhi_k^\dag [ dV^dag V + V^dag dV ] MpvPhi_k (2)
// -ak MpvPhi_k^dag [ dV^dag V + V^dag dV ] MpvMfMpvPhi_k (3)
//(1)
for(int k=0;k<n_f;k++){
ak = PowerNegHalf.residues[k];
DenOp.M(MfMpvPhi_k[k],Y);
DenOp.MDeriv(tmp , MfMpvPhi_k[k], Y,DaggerYes ); dSdU=dSdU+ak*tmp;
DenOp.MDeriv(tmp , Y, MfMpvPhi_k[k], DaggerNo ); dSdU=dSdU+ak*tmp;
}
//(2)
//(3)
for(int k=0;k<n_pv;k++){
//(2)
//(3)
for(int k=0;k<n_pv;k++){
ak = PowerQuarter.residues[k];
ak = PowerQuarter.residues[k];
NumOp.M(MpvPhi_k[k],Y);
NumOp.MDeriv(tmp,MpvMfMpvPhi_k[k],Y,DaggerYes); dSdU=dSdU+ak*tmp;
NumOp.MDeriv(tmp,Y,MpvMfMpvPhi_k[k],DaggerNo); dSdU=dSdU+ak*tmp;
NumOp.M(MpvPhi_k[k],Y);
NumOp.MDeriv(tmp,MpvMfMpvPhi_k[k],Y,DaggerYes); dSdU=dSdU+ak*tmp;
NumOp.MDeriv(tmp,Y,MpvMfMpvPhi_k[k],DaggerNo); dSdU=dSdU+ak*tmp;
NumOp.M(MpvMfMpvPhi_k[k],Y); // V as we take Ydag
NumOp.MDeriv(tmp,Y, MpvPhi_k[k], DaggerNo); dSdU=dSdU+ak*tmp;
NumOp.MDeriv(tmp,MpvPhi_k[k], Y,DaggerYes); dSdU=dSdU+ak*tmp;
NumOp.M(MpvMfMpvPhi_k[k],Y); // V as we take Ydag
NumOp.MDeriv(tmp,Y, MpvPhi_k[k], DaggerNo); dSdU=dSdU+ak*tmp;
NumOp.MDeriv(tmp,MpvPhi_k[k], Y,DaggerYes); dSdU=dSdU+ak*tmp;
}
}
//dSdU = Ta(dSdU);
//dSdU = Ta(dSdU);
};
};
};
};
}
}
NAMESPACE_END(Grid);
#endif

View File

@ -29,6 +29,9 @@ directory
#ifndef QCD_PSEUDOFERMION_AGGREGATE_H
#define QCD_PSEUDOFERMION_AGGREGATE_H
// Rational functions
#include <Grid/qcd/action/pseudofermion/Bounds.h>
#include <Grid/qcd/action/pseudofermion/EvenOddSchurDifferentiable.h>
#include <Grid/qcd/action/pseudofermion/TwoFlavour.h>
#include <Grid/qcd/action/pseudofermion/TwoFlavourRatio.h>

View File

@ -84,21 +84,20 @@ public:
// and must multiply by 0.707....
//
// Chroma has this scale factor: two_flavor_monomial_w.h
// CPS uses this factor
// IroIro: does not use this scale. It is absorbed by a change of vars
// in the Phi integral, and thus is only an irrelevant prefactor for
// the partition function.
//
RealD scale = std::sqrt(0.5);
const RealD scale = std::sqrt(0.5);
FermionField eta(FermOp.FermionGrid());
gaussian(pRNG, eta);
gaussian(pRNG, eta); eta = scale *eta;
FermOp.ImportGauge(U);
FermOp.Mdag(eta, Phi);
Phi = Phi * scale;
};
//////////////////////////////////////////////////////

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -24,186 +24,194 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_PSEUDOFERMION_TWO_FLAVOUR_EVEN_ODD_RATIO_H
#define QCD_PSEUDOFERMION_TWO_FLAVOUR_EVEN_ODD_RATIO_H
NAMESPACE_BEGIN(Grid);
namespace Grid{
namespace QCD{
///////////////////////////////////////
// Two flavour ratio
///////////////////////////////////////
template<class Impl>
class TwoFlavourEvenOddRatioPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
INHERIT_IMPL_TYPES(Impl);
///////////////////////////////////////
// Two flavour ratio
///////////////////////////////////////
template<class Impl>
class TwoFlavourEvenOddRatioPseudoFermionAction : public Action<typename Impl::GaugeField> {
public:
INHERIT_IMPL_TYPES(Impl);
private:
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
private:
FermionOperator<Impl> & NumOp;// the basic operator
FermionOperator<Impl> & DenOp;// the basic operator
OperatorFunction<FermionField> &DerivativeSolver;
OperatorFunction<FermionField> &ActionSolver;
OperatorFunction<FermionField> &DerivativeSolver;
OperatorFunction<FermionField> &ActionSolver;
OperatorFunction<FermionField> &HeatbathSolver;
FermionField PhiOdd; // the pseudo fermion field for this trajectory
FermionField PhiEven; // the pseudo fermion field for this trajectory
FermionField PhiOdd; // the pseudo fermion field for this trajectory
FermionField PhiEven; // the pseudo fermion field for this trajectory
public:
TwoFlavourEvenOddRatioPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
OperatorFunction<FermionField> & DS,
OperatorFunction<FermionField> & AS) :
NumOp(_NumOp),
DenOp(_DenOp),
DerivativeSolver(DS),
ActionSolver(AS),
PhiEven(_NumOp.FermionRedBlackGrid()),
PhiOdd(_NumOp.FermionRedBlackGrid())
{
conformable(_NumOp.FermionGrid(), _DenOp.FermionGrid());
conformable(_NumOp.FermionRedBlackGrid(), _DenOp.FermionRedBlackGrid());
conformable(_NumOp.GaugeGrid(), _DenOp.GaugeGrid());
conformable(_NumOp.GaugeRedBlackGrid(), _DenOp.GaugeRedBlackGrid());
};
public:
TwoFlavourEvenOddRatioPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
OperatorFunction<FermionField> & DS,
OperatorFunction<FermionField> & AS ) :
TwoFlavourEvenOddRatioPseudoFermionAction(_NumOp,_DenOp, DS,AS,AS) {};
virtual std::string action_name(){return "TwoFlavourEvenOddRatioPseudoFermionAction";}
TwoFlavourEvenOddRatioPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
OperatorFunction<FermionField> & DS,
OperatorFunction<FermionField> & AS, OperatorFunction<FermionField> & HS) :
NumOp(_NumOp),
DenOp(_DenOp),
DerivativeSolver(DS),
ActionSolver(AS),
HeatbathSolver(HS),
PhiEven(_NumOp.FermionRedBlackGrid()),
PhiOdd(_NumOp.FermionRedBlackGrid())
{
conformable(_NumOp.FermionGrid(), _DenOp.FermionGrid());
conformable(_NumOp.FermionRedBlackGrid(), _DenOp.FermionRedBlackGrid());
conformable(_NumOp.GaugeGrid(), _DenOp.GaugeGrid());
conformable(_NumOp.GaugeRedBlackGrid(), _DenOp.GaugeRedBlackGrid());
};
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] has no parameters" << std::endl;
return sstream.str();
}
virtual std::string action_name(){return "TwoFlavourEvenOddRatioPseudoFermionAction";}
virtual std::string LogParameters(){
std::stringstream sstream;
sstream << GridLogMessage << "["<<action_name()<<"] has no parameters" << std::endl;
return sstream.str();
}
virtual void refresh(const GaugeField &U, GridParallelRNG& pRNG) {
virtual void refresh(const GaugeField &U, GridParallelRNG& pRNG) {
// P(phi) = e^{- phi^dag Vpc (MpcdagMpc)^-1 Vpcdag phi}
//
// NumOp == V
// DenOp == M
//
// Take phi_o = Vpcdag^{-1} Mpcdag eta_o ; eta_o = Mpcdag^{-1} Vpcdag Phi
//
// P(eta_o) = e^{- eta_o^dag eta_o}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
RealD scale = std::sqrt(0.5);
// P(phi) = e^{- phi^dag Vpc (MpcdagMpc)^-1 Vpcdag phi}
//
// NumOp == V
// DenOp == M
//
// Take phi_o = Vpcdag^{-1} Mpcdag eta_o ; eta_o = Mpcdag^{-1} Vpcdag Phi
//
// P(eta_o) = e^{- eta_o^dag eta_o}
//
// e^{x^2/2 sig^2} => sig^2 = 0.5.
//
RealD scale = std::sqrt(0.5);
FermionField eta (NumOp.FermionGrid());
FermionField etaOdd (NumOp.FermionRedBlackGrid());
FermionField etaEven(NumOp.FermionRedBlackGrid());
FermionField tmp (NumOp.FermionRedBlackGrid());
FermionField eta (NumOp.FermionGrid());
FermionField etaOdd (NumOp.FermionRedBlackGrid());
FermionField etaEven(NumOp.FermionRedBlackGrid());
FermionField tmp (NumOp.FermionRedBlackGrid());
gaussian(pRNG,eta);
gaussian(pRNG,eta);
pickCheckerboard(Even,etaEven,eta);
pickCheckerboard(Odd,etaOdd,eta);
pickCheckerboard(Even,etaEven,eta);
pickCheckerboard(Odd,etaOdd,eta);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
SchurDifferentiableOperator<Impl> Mpc(DenOp);
SchurDifferentiableOperator<Impl> Vpc(NumOp);
SchurDifferentiableOperator<Impl> Mpc(DenOp);
SchurDifferentiableOperator<Impl> Vpc(NumOp);
// Odd det factors
Mpc.MpcDag(etaOdd,PhiOdd);
tmp=Zero();
ActionSolver(Vpc,PhiOdd,tmp);
Vpc.Mpc(tmp,PhiOdd);
// Odd det factors
Mpc.MpcDag(etaOdd,PhiOdd);
tmp=zero;
HeatbathSolver(Vpc,PhiOdd,tmp);
Vpc.Mpc(tmp,PhiOdd);
// Even det factors
DenOp.MooeeDag(etaEven,tmp);
NumOp.MooeeInvDag(tmp,PhiEven);
// Even det factors
DenOp.MooeeDag(etaEven,tmp);
NumOp.MooeeInvDag(tmp,PhiEven);
PhiOdd =PhiOdd*scale;
PhiEven=PhiEven*scale;
PhiOdd =PhiOdd*scale;
PhiEven=PhiEven*scale;
};
};
//////////////////////////////////////////////////////
// S = phi^dag V (Mdag M)^-1 Vdag phi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
//////////////////////////////////////////////////////
// S = phi^dag V (Mdag M)^-1 Vdag phi
//////////////////////////////////////////////////////
virtual RealD S(const GaugeField &U) {
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
SchurDifferentiableOperator<Impl> Mpc(DenOp);
SchurDifferentiableOperator<Impl> Vpc(NumOp);
SchurDifferentiableOperator<Impl> Mpc(DenOp);
SchurDifferentiableOperator<Impl> Vpc(NumOp);
FermionField X(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
FermionField X(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
Vpc.MpcDag(PhiOdd,Y); // Y= Vdag phi
X=Zero();
ActionSolver(Mpc,Y,X); // X= (MdagM)^-1 Vdag phi
//Mpc.Mpc(X,Y); // Y= Mdag^-1 Vdag phi
// Multiply by Ydag
RealD action = real(innerProduct(Y,X));
Vpc.MpcDag(PhiOdd,Y); // Y= Vdag phi
X=zero;
ActionSolver(Mpc,Y,X); // X= (MdagM)^-1 Vdag phi
//Mpc.Mpc(X,Y); // Y= Mdag^-1 Vdag phi
// Multiply by Ydag
RealD action = real(innerProduct(Y,X));
//RealD action = norm2(Y);
//RealD action = norm2(Y);
// The EE factorised block; normally can replace with Zero() if det is constant (gauge field indept)
// Only really clover term that creates this. Leave the EE portion as a future to do to make most
// rapid progresss on DWF for now.
//
NumOp.MooeeDag(PhiEven,X);
DenOp.MooeeInvDag(X,Y);
action = action + norm2(Y);
// The EE factorised block; normally can replace with zero if det is constant (gauge field indept)
// Only really clover term that creates this. Leave the EE portion as a future to do to make most
// rapid progresss on DWF for now.
//
NumOp.MooeeDag(PhiEven,X);
DenOp.MooeeInvDag(X,Y);
action = action + norm2(Y);
return action;
};
return action;
};
//////////////////////////////////////////////////////
// dS/du = phi^dag dV (Mdag M)^-1 V^dag phi
// - phi^dag V (Mdag M)^-1 [ Mdag dM + dMdag M ] (Mdag M)^-1 V^dag phi
// + phi^dag V (Mdag M)^-1 dV^dag phi
//////////////////////////////////////////////////////
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
//////////////////////////////////////////////////////
// dS/du = phi^dag dV (Mdag M)^-1 V^dag phi
// - phi^dag V (Mdag M)^-1 [ Mdag dM + dMdag M ] (Mdag M)^-1 V^dag phi
// + phi^dag V (Mdag M)^-1 dV^dag phi
//////////////////////////////////////////////////////
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
NumOp.ImportGauge(U);
DenOp.ImportGauge(U);
SchurDifferentiableOperator<Impl> Mpc(DenOp);
SchurDifferentiableOperator<Impl> Vpc(NumOp);
SchurDifferentiableOperator<Impl> Mpc(DenOp);
SchurDifferentiableOperator<Impl> Vpc(NumOp);
FermionField X(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
FermionField X(NumOp.FermionRedBlackGrid());
FermionField Y(NumOp.FermionRedBlackGrid());
// This assignment is necessary to be compliant with the HMC grids
GaugeField force(dSdU.Grid());
// This assignment is necessary to be compliant with the HMC grids
GaugeField force(dSdU._grid);
//Y=Vdag phi
//X = (Mdag M)^-1 V^dag phi
//Y = (Mdag)^-1 V^dag phi
Vpc.MpcDag(PhiOdd,Y); // Y= Vdag phi
X=Zero();
DerivativeSolver(Mpc,Y,X); // X= (MdagM)^-1 Vdag phi
Mpc.Mpc(X,Y); // Y= Mdag^-1 Vdag phi
//Y=Vdag phi
//X = (Mdag M)^-1 V^dag phi
//Y = (Mdag)^-1 V^dag phi
Vpc.MpcDag(PhiOdd,Y); // Y= Vdag phi
X=zero;
DerivativeSolver(Mpc,Y,X); // X= (MdagM)^-1 Vdag phi
Mpc.Mpc(X,Y); // Y= Mdag^-1 Vdag phi
// phi^dag V (Mdag M)^-1 dV^dag phi
Vpc.MpcDagDeriv(force , X, PhiOdd ); dSdU = force;
// phi^dag V (Mdag M)^-1 dV^dag phi
Vpc.MpcDagDeriv(force , X, PhiOdd ); dSdU = force;
// phi^dag dV (Mdag M)^-1 V^dag phi
Vpc.MpcDeriv(force , PhiOdd, X ); dSdU = dSdU+force;
// phi^dag dV (Mdag M)^-1 V^dag phi
Vpc.MpcDeriv(force , PhiOdd, X ); dSdU = dSdU+force;
// - phi^dag V (Mdag M)^-1 Mdag dM (Mdag M)^-1 V^dag phi
// - phi^dag V (Mdag M)^-1 dMdag M (Mdag M)^-1 V^dag phi
Mpc.MpcDeriv(force,Y,X); dSdU = dSdU-force;
Mpc.MpcDagDeriv(force,X,Y); dSdU = dSdU-force;
// - phi^dag V (Mdag M)^-1 Mdag dM (Mdag M)^-1 V^dag phi
// - phi^dag V (Mdag M)^-1 dMdag M (Mdag M)^-1 V^dag phi
Mpc.MpcDeriv(force,Y,X); dSdU = dSdU-force;
Mpc.MpcDagDeriv(force,X,Y); dSdU = dSdU-force;
// FIXME No force contribution from EvenEven assumed here
// Needs a fix for clover.
assert(NumOp.ConstEE() == 1);
assert(DenOp.ConstEE() == 1);
// FIXME No force contribution from EvenEven assumed here
// Needs a fix for clover.
assert(NumOp.ConstEE() == 1);
assert(DenOp.ConstEE() == 1);
dSdU = -dSdU;
dSdU = -dSdU;
};
};
NAMESPACE_END(Grid);
};
};
}
}
#endif

View File

@ -41,19 +41,16 @@ public:
GRID_SERIALIZABLE_CLASS_MEMBERS(IntegratorParameters,
std::string, name, // name of the integrator
unsigned int, MDsteps, // number of outer steps
RealD, trajL, // trajectory length
)
RealD, trajL) // trajectory length
IntegratorParameters(int MDsteps_ = 10, RealD trajL_ = 1.0)
: MDsteps(MDsteps_),
trajL(trajL_){
// empty body constructor
};
trajL(trajL_) {};
template <class ReaderClass, typename std::enable_if<isReader<ReaderClass>::value, int >::type = 0 >
IntegratorParameters(ReaderClass & Reader){
std::cout << "Reading integrator\n";
IntegratorParameters(ReaderClass & Reader)
{
std::cout << GridLogMessage << "Reading integrator\n";
read(Reader, "Integrator", *this);
}
@ -83,17 +80,18 @@ protected:
const ActionSet<Field, RepresentationPolicy> as;
void update_P(Field& U, int level, double ep) {
void update_P(Field& U, int level, double ep)
{
t_P[level] += ep;
update_P(P, U, level, ep);
std::cout << GridLogIntegrator << "[" << level << "] P "
<< " dt " << ep << " : t_P " << t_P[level] << std::endl;
std::cout << GridLogIntegrator << "[" << level << "] P " << " dt " << ep << " : t_P " << t_P[level] << std::endl;
}
// to be used by the actionlevel class to iterate
// over the representations
struct _updateP {
struct _updateP
{
template <class FieldType, class GF, class Repr>
void operator()(std::vector<Action<FieldType>*> repr_set, Repr& Rep,
GF& Mom, GF& U, double ep) {
@ -104,7 +102,7 @@ protected:
GF force = Rep.RtoFundamentalProject(forceR); // Ta for the fundamental rep
Real force_abs = std::sqrt(norm2(force)/(U.Grid()->gSites()));
std::cout << GridLogIntegrator << "Hirep Force average: " << force_abs << std::endl;
Mom -= force * ep ;
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
}
}
} update_P_hireps{};
@ -128,18 +126,19 @@ protected:
double end_force = usecond();
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites());
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] Force average: " << force_abs << std::endl;
Mom -= force * ep;
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
double end_full = usecond();
double time_full = (end_full - start_full) / 1e3;
double time_force = (end_force - start_force) / 1e3;
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] P update elapsed time: " << time_full << " ms (force: " << time_force << " ms)" << std::endl;
std::cout << GridLogMessage << "["<<level<<"]["<<a<<"] P update elapsed time: " << time_full << " ms (force: " << time_force << " ms)" << std::endl;
}
// Force from the other representations
as[level].apply(update_P_hireps, Representations, Mom, U, ep);
}
void update_U(Field& U, double ep) {
void update_U(Field& U, double ep)
{
update_U(P, U, ep);
t_U += ep;
@ -147,7 +146,8 @@ protected:
std::cout << GridLogIntegrator << " " << "[" << fl << "] U " << " dt " << ep << " : t_U " << t_U << std::endl;
}
void update_U(MomentaField& Mom, Field& U, double ep) {
void update_U(MomentaField& Mom, Field& U, double ep)
{
// exponential of Mom*U in the gauge fields case
FieldImplementation::update_field(Mom, U, ep);
@ -169,7 +169,8 @@ public:
P(grid),
levels(Aset.size()),
Smearer(Sm),
Representations(grid) {
Representations(grid)
{
t_P.resize(levels, 0.0);
t_U = 0.0;
// initialization of smearer delegated outside of Integrator
@ -179,12 +180,14 @@ public:
virtual std::string integrator_name() = 0;
void print_parameters(){
void print_parameters()
{
std::cout << GridLogMessage << "[Integrator] Name : "<< integrator_name() << std::endl;
Params.print_parameters();
}
void print_actions(){
void print_actions()
{
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::" << std::endl;
std::cout << GridLogMessage << "[Integrator] Action summary: "<<std::endl;
for (int level = 0; level < as.size(); ++level) {
@ -198,7 +201,8 @@ public:
}
void reverse_momenta(){
void reverse_momenta()
{
P *= -1.0;
}
@ -217,7 +221,8 @@ public:
} refresh_hireps{};
// Initialization of momenta and actions
void refresh(Field& U, GridParallelRNG& pRNG) {
void refresh(Field& U, GridParallelRNG& pRNG)
{
assert(P.Grid() == U.Grid());
std::cout << GridLogIntegrator << "Integrator refresh\n";
@ -237,8 +242,7 @@ public:
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
// get gauge field from the SmearingPolicy and
// based on the boolean is_smeared in actionID
Field& Us =
Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
as[level].actions.at(actionID)->refresh(Us, pRNG);
}
@ -251,13 +255,11 @@ public:
// over the representations
struct _S {
template <class FieldType, class Repr>
void operator()(std::vector<Action<FieldType>*> repr_set, Repr& Rep,
int level, RealD& H) {
void operator()(std::vector<Action<FieldType>*> repr_set, Repr& Rep, int level, RealD& H) {
for (int a = 0; a < repr_set.size(); ++a) {
RealD Hterm = repr_set.at(a)->S(Rep.U);
std::cout << GridLogMessage << "S Level " << level << " term " << a
<< " H Hirep = " << Hterm << std::endl;
std::cout << GridLogMessage << "S Level " << level << " term " << a << " H Hirep = " << Hterm << std::endl;
H += Hterm;
}
@ -265,22 +267,24 @@ public:
} S_hireps{};
// Calculate action
RealD S(Field& U) { // here also U not used
RealD S(Field& U)
{ // here also U not used
std::cout << GridLogIntegrator << "Integrator action\n";
RealD H = - FieldImplementation::FieldSquareNorm(P)/HMC_MOMENTUM_DENOMINATOR; // - trace (P*P)/denom
RealD H = - FieldImplementation::FieldSquareNorm(P); // - trace (P*P)
RealD Hterm;
std::cout << GridLogMessage << "Momentum action H_p = " << H << "\n";
// Actions
for (int level = 0; level < as.size(); ++level) {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
// get gauge field from the SmearingPolicy and
// based on the boolean is_smeared in actionID
Field& Us =
Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
Hterm = as[level].actions.at(actionID)->S(Us);
std::cout << GridLogMessage << "S Level " << level << " term "
<< actionID << " H = " << Hterm << std::endl;
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
H += Hterm;
}
as[level].apply(S_hireps, Representations, level, H);
@ -289,7 +293,8 @@ public:
return H;
}
void integrate(Field& U) {
void integrate(Field& U)
{
// reset the clocks
t_U = 0;
for (int level = 0; level < as.size(); ++level) {
@ -305,8 +310,7 @@ public:
// Check the clocks all match on all levels
for (int level = 0; level < as.size(); ++level) {
assert(fabs(t_U - t_P[level]) < 1.0e-6); // must be the same
std::cout << GridLogIntegrator << " times[" << level
<< "]= " << t_P[level] << " " << t_U << std::endl;
std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl;
}
// and that we indeed got to the end of the trajectory

View File

@ -26,15 +26,15 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
//--------------------------------------------------------------------
/* END LEGAL */
//--------------------------------------------------------------------
/*! @file Integrator_algorithm.h
* @brief Declaration of classes for the Molecular Dynamics algorithms
*
*/
//--------------------------------------------------------------------
/*! @file Integrator_algorithm.h
* @brief Declaration of classes for the Molecular Dynamics algorithms
*
*/
//--------------------------------------------------------------------
#ifndef INTEGRATOR_ALG_INCLUDED
#define INTEGRATOR_ALG_INCLUDED
@ -92,22 +92,17 @@ NAMESPACE_BEGIN(Grid);
* P 1/2 P 1/2
*/
template <class FieldImplementation, class SmearingPolicy,
class RepresentationPolicy =
Representations<FundamentalRepresentation> >
class LeapFrog : public Integrator<FieldImplementation, SmearingPolicy,
RepresentationPolicy> {
template <class FieldImplementation, class SmearingPolicy, class RepresentationPolicy = Representations<FundamentalRepresentation> >
class LeapFrog : public Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>
{
public:
typedef LeapFrog<FieldImplementation, SmearingPolicy, RepresentationPolicy>
Algorithm;
typedef LeapFrog<FieldImplementation, SmearingPolicy, RepresentationPolicy> Algorithm;
INHERIT_FIELD_TYPES(FieldImplementation);
std::string integrator_name(){return "LeapFrog";}
LeapFrog(GridBase* grid, IntegratorParameters Par,
ActionSet<Field, RepresentationPolicy>& Aset, SmearingPolicy& Sm)
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(
grid, Par, Aset, Sm){};
LeapFrog(GridBase* grid, IntegratorParameters Par, ActionSet<Field, RepresentationPolicy>& Aset, SmearingPolicy& Sm)
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(grid, Par, Aset, Sm){};
void step(Field& U, int level, int _first, int _last) {
int fl = this->as.size() - 1;
@ -140,21 +135,17 @@ public:
}
};
template <class FieldImplementation, class SmearingPolicy,
class RepresentationPolicy =
Representations<FundamentalRepresentation> >
class MinimumNorm2 : public Integrator<FieldImplementation, SmearingPolicy,
RepresentationPolicy> {
template <class FieldImplementation, class SmearingPolicy, class RepresentationPolicy = Representations<FundamentalRepresentation> >
class MinimumNorm2 : public Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>
{
private:
const RealD lambda = 0.1931833275037836;
public:
INHERIT_FIELD_TYPES(FieldImplementation);
MinimumNorm2(GridBase* grid, IntegratorParameters Par,
ActionSet<Field, RepresentationPolicy>& Aset, SmearingPolicy& Sm)
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(
grid, Par, Aset, Sm){};
MinimumNorm2(GridBase* grid, IntegratorParameters Par, ActionSet<Field, RepresentationPolicy>& Aset, SmearingPolicy& Sm)
: Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>(grid, Par, Aset, Sm){};
std::string integrator_name(){return "MininumNorm2";}
@ -201,14 +192,11 @@ public:
}
};
template <class FieldImplementation, class SmearingPolicy,
class RepresentationPolicy =
Representations<FundamentalRepresentation> >
class ForceGradient : public Integrator<FieldImplementation, SmearingPolicy,
RepresentationPolicy> {
template <class FieldImplementation, class SmearingPolicy, class RepresentationPolicy = Representations<FundamentalRepresentation> >
class ForceGradient : public Integrator<FieldImplementation, SmearingPolicy, RepresentationPolicy>
{
private:
const RealD lambda = 1.0 / 6.0;
;
const RealD chi = 1.0 / 72.0;
const RealD xi = 0.0;
const RealD theta = 0.0;
@ -230,8 +218,7 @@ public:
Field Pfg(U.Grid());
Ufg = U;
Pfg = Zero();
std::cout << GridLogIntegrator << "FG update " << fg_dt << " " << ep
<< std::endl;
std::cout << GridLogIntegrator << "FG update " << fg_dt << " " << ep << std::endl;
// prepare_fg; no prediction/result cache for now
// could relax CG stopping conditions for the
// derivatives in the small step since the force gets multiplied by
@ -270,8 +257,7 @@ public:
this->step(U, level + 1, first_step, 0);
}
this->FG_update_P(U, level, 2 * Chi / ((1.0 - 2.0 * lambda) * eps),
(1.0 - 2.0 * lambda) * eps);
this->FG_update_P(U, level, 2 * Chi / ((1.0 - 2.0 * lambda) * eps), (1.0 - 2.0 * lambda) * eps);
if (level == fl) { // lowest level
this->update_U(U, 0.5 * eps);

View File

@ -10,6 +10,24 @@ const std::array<const Gamma, 4> Gamma::gmu = {{
Gamma(Gamma::Algebra::GammaZ),
Gamma(Gamma::Algebra::GammaT)}};
const std::array<const Gamma, 16> Gamma::gall = {{
Gamma(Gamma::Algebra::Identity),
Gamma(Gamma::Algebra::Gamma5),
Gamma(Gamma::Algebra::GammaX),
Gamma(Gamma::Algebra::GammaY),
Gamma(Gamma::Algebra::GammaZ),
Gamma(Gamma::Algebra::GammaT),
Gamma(Gamma::Algebra::GammaXGamma5),
Gamma(Gamma::Algebra::GammaYGamma5),
Gamma(Gamma::Algebra::GammaZGamma5),
Gamma(Gamma::Algebra::GammaTGamma5),
Gamma(Gamma::Algebra::SigmaXT),
Gamma(Gamma::Algebra::SigmaXY),
Gamma(Gamma::Algebra::SigmaXZ),
Gamma(Gamma::Algebra::SigmaYT),
Gamma(Gamma::Algebra::SigmaYZ),
Gamma(Gamma::Algebra::SigmaZT)}};
const std::array<const char *, Gamma::nGamma> Gamma::name = {{
"-Gamma5 ",
"Gamma5 ",

View File

@ -47,6 +47,7 @@ class Gamma {
static const std::array<std::array<Algebra, nGamma>, nGamma> mul;
static const std::array<Algebra, nGamma> adj;
static const std::array<const Gamma, 4> gmu;
static const std::array<const Gamma, 16> gall;
Algebra g;
public:
accelerator Gamma(Algebra initg): g(initg) {}

View File

@ -10,10 +10,10 @@
NotebookFileLineBreakTest
NotebookFileLineBreakTest
NotebookDataPosition[ 158, 7]
NotebookDataLength[ 75090, 1956]
NotebookOptionsPosition[ 69536, 1867]
NotebookOutlinePosition[ 69898, 1883]
CellTagsIndexPosition[ 69855, 1880]
NotebookDataLength[ 67118, 1714]
NotebookOptionsPosition[ 63485, 1652]
NotebookOutlinePosition[ 63842, 1668]
CellTagsIndexPosition[ 63799, 1665]
WindowFrame->Normal*)
(* Beginning of Notebook Content *)
@ -76,234 +76,6 @@ Cell[BoxData["\<\"/Users/antonin/Development/Grid/lib/qcd/spin/gamma-gen\"\>"]\
Cell[CellGroupData[{
Cell[BoxData[
RowBox[{"FactorInteger", "[", "3152", "]"}]], "Input",
CellChangeTimes->{{3.7432347536316767`*^9, 3.7432347764739027`*^9}, {
3.743234833567358*^9,
3.743234862146022*^9}},ExpressionUUID->"d1a0fd03-85e1-43af-ba80-\
3ca4235675d8"],
Cell[BoxData[
RowBox[{"{",
RowBox[{
RowBox[{"{",
RowBox[{"2", ",", "4"}], "}"}], ",",
RowBox[{"{",
RowBox[{"197", ",", "1"}], "}"}]}], "}"}]], "Output",
CellChangeTimes->{{3.743234836792224*^9,
3.743234862493619*^9}},ExpressionUUID->"16d3f953-4b24-4ed2-ae62-\
306dcab66ca7"]
}, Open ]],
Cell[CellGroupData[{
Cell[BoxData[
RowBox[{"sol", "=",
RowBox[{"Solve", "[",
RowBox[{
RowBox[{
RowBox[{
SuperscriptBox["x", "2"], "+",
SuperscriptBox["y", "2"], "+",
SuperscriptBox["z", "2"]}], "\[Equal]", "2"}], ",",
RowBox[{"{",
RowBox[{"x", ",", "y", ",", "z"}], "}"}], ",", "Integers"}],
"]"}]}]], "Input",
CellChangeTimes->{{3.743235304127721*^9,
3.7432353087929983`*^9}},ExpressionUUID->"f0fa2a5c-3d81-4d75-a447-\
50c7ca3459ff"],
Cell[BoxData[
RowBox[{"{",
RowBox[{
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"y", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"z", "\[Rule]", "0"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"y", "\[Rule]", "0"}], ",",
RowBox[{"z", "\[Rule]",
RowBox[{"-", "1"}]}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"y", "\[Rule]", "0"}], ",",
RowBox[{"z", "\[Rule]", "1"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"y", "\[Rule]", "1"}], ",",
RowBox[{"z", "\[Rule]", "0"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "0"}], ",",
RowBox[{"y", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"z", "\[Rule]",
RowBox[{"-", "1"}]}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "0"}], ",",
RowBox[{"y", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"z", "\[Rule]", "1"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "0"}], ",",
RowBox[{"y", "\[Rule]", "1"}], ",",
RowBox[{"z", "\[Rule]",
RowBox[{"-", "1"}]}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "0"}], ",",
RowBox[{"y", "\[Rule]", "1"}], ",",
RowBox[{"z", "\[Rule]", "1"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "1"}], ",",
RowBox[{"y", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"z", "\[Rule]", "0"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "1"}], ",",
RowBox[{"y", "\[Rule]", "0"}], ",",
RowBox[{"z", "\[Rule]",
RowBox[{"-", "1"}]}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "1"}], ",",
RowBox[{"y", "\[Rule]", "0"}], ",",
RowBox[{"z", "\[Rule]", "1"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "1"}], ",",
RowBox[{"y", "\[Rule]", "1"}], ",",
RowBox[{"z", "\[Rule]", "0"}]}], "}"}]}], "}"}]], "Output",
CellChangeTimes->{{3.743235305220907*^9,
3.743235309139554*^9}},ExpressionUUID->"d9825c95-24bb-442a-8734-\
4c0f47e99dfc"]
}, Open ]],
Cell[BoxData[
RowBox[{
RowBox[{"xmlElem", "[", "x_", "]"}], ":=",
RowBox[{"Print", "[",
RowBox[{"\"\<<elem>\>\"", "<>",
RowBox[{"ToString", "[",
RowBox[{"x", "[",
RowBox[{"[", "1", "]"}], "]"}], "]"}], "<>", "\"\< \>\"", "<>",
RowBox[{"ToString", "[",
RowBox[{"x", "[",
RowBox[{"[", "2", "]"}], "]"}], "]"}], "<>", "\"\< \>\"", "<>",
RowBox[{"ToString", "[",
RowBox[{"x", "[",
RowBox[{"[", "3", "]"}], "]"}], "]"}], "<>", "\"\<</elem>\>\""}],
"]"}]}]], "Input",
CellChangeTimes->{{3.74323534002862*^9, 3.743235351000985*^9}, {
3.743235403233039*^9, 3.743235413488028*^9}, {3.743235473169856*^9,
3.7432354747126904`*^9}},ExpressionUUID->"aea76313-c89e-45e8-b429-\
3f454091666d"],
Cell[CellGroupData[{
Cell[BoxData[
RowBox[{
RowBox[{
RowBox[{"xmlElem", "[",
RowBox[{
RowBox[{"{",
RowBox[{"x", ",", "y", ",", "z"}], "}"}], "/.", "#"}], "]"}], "&"}], "/@",
"sol"}]], "Input",
CellChangeTimes->{{3.743235415820318*^9,
3.743235467025091*^9}},ExpressionUUID->"07da3998-8eab-40ba-8c0b-\
ac6b130cb4fb"],
Cell[CellGroupData[{
Cell[BoxData["\<\"<elem>-1 -1 0</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476581676*^9},ExpressionUUID->"c577ba06-b67a-405a-9ff5-\
2bf7dc898d03"],
Cell[BoxData["\<\"<elem>-1 0 -1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476588011*^9},ExpressionUUID->"d041aa36-0cea-457c-9d4b-\
1fe9be66e2ab"],
Cell[BoxData["\<\"<elem>-1 0 1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476596887*^9},ExpressionUUID->"bf141b55-86b2-4430-a994-\
5c03d5a19441"],
Cell[BoxData["\<\"<elem>-1 1 0</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476605785*^9},ExpressionUUID->"4968a660-4ecf-4b66-9071-\
8bd798c18d21"],
Cell[BoxData["\<\"<elem>0 -1 -1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476613523*^9},ExpressionUUID->"4e22d943-2680-416b-a1d7-\
a16ca20b781f"],
Cell[BoxData["\<\"<elem>0 -1 1</elem>\"\>"], "Print",
CellChangeTimes->{
3.7432354766218576`*^9},ExpressionUUID->"6dd38385-08b3-4dd9-932f-\
98a00c6db1b2"],
Cell[BoxData["\<\"<elem>0 1 -1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476629427*^9},ExpressionUUID->"ef3baad3-91d1-4735-9a22-\
53495a624c15"],
Cell[BoxData["\<\"<elem>0 1 1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476638257*^9},ExpressionUUID->"413fbb68-5017-4272-a62a-\
fa234e6daaea"],
Cell[BoxData["\<\"<elem>1 -1 0</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476646203*^9},ExpressionUUID->"3a832a60-ae00-414b-a9ac-\
f5e86e67e917"],
Cell[BoxData["\<\"<elem>1 0 -1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476653907*^9},ExpressionUUID->"bfc79ef6-f6c7-4f1e-88e8-\
005ac314be9c"],
Cell[BoxData["\<\"<elem>1 0 1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476662575*^9},ExpressionUUID->"0f892891-f885-489c-9925-\
ddef4d698410"],
Cell[BoxData["\<\"<elem>1 1 0</elem>\"\>"], "Print",
CellChangeTimes->{
3.7432354766702337`*^9},ExpressionUUID->"2906f190-e673-4f33-9c34-\
e8e56efe7a27"]
}, Open ]],
Cell[BoxData[
RowBox[{"{",
RowBox[{
"Null", ",", "Null", ",", "Null", ",", "Null", ",", "Null", ",", "Null",
",", "Null", ",", "Null", ",", "Null", ",", "Null", ",", "Null", ",",
"Null"}], "}"}]], "Output",
CellChangeTimes->{
3.7432354246225967`*^9, {3.7432354674878073`*^9,
3.743235476678007*^9}},ExpressionUUID->"500ca3c1-88d8-46e5-a1a1-\
86a7878e5638"]
}, Open ]],
Cell[CellGroupData[{
Cell["Clifford algebra generation", "Section",
CellChangeTimes->{{3.6942089434583883`*^9,
3.694208978559093*^9}},ExpressionUUID->"a5b064b3-3011-4922-8559-\
@ -1048,9 +820,10 @@ generated by the Mathematica notebook gamma-gen/gamma-gen.nb\n\n#include \
"\"\< static const std::array<const char *, nGamma> \
name;\n static const std::array<std::array<Algebra, nGamma>, nGamma> mul;\n\
static const std::array<Algebra, nGamma> adj;\n \
static const std::array<const Gamma, 4> gmu;\n \
Algebra g;\n public:\n \
Gamma(Algebra initg): g(initg) {} \n};\n\n\>\""}]}], ";",
static const std::array<const Gamma, 4> gmu;\n static \
const std::array<const Gamma, 16> gall;\n Algebra \
g;\n public:\n \
Gamma(Algebra initg): g(initg) {} \n};\n\n\>\""}]}], ";",
"\[IndentingNewLine]",
RowBox[{"out", " ", "=",
RowBox[{"out", "<>", "funcCode"}]}], ";", "\[IndentingNewLine]",
@ -1076,7 +849,8 @@ Algebra g;\n public:\n \
3.694963343265525*^9}, {3.694964367519239*^9, 3.69496439461199*^9}, {
3.694964462130747*^9, 3.6949644669959793`*^9}, 3.694964509762739*^9, {
3.694964705045744*^9, 3.694964723148797*^9}, {3.694964992988984*^9,
3.6949649968504257`*^9}},ExpressionUUID->"c7103bd6-b539-4495-b98c-\
3.6949649968504257`*^9}, {3.758291687176977*^9,
3.758291694181189*^9}},ExpressionUUID->"c7103bd6-b539-4495-b98c-\
d4d12ac6cad8"],
Cell["Gamma enum generation:", "Text",
@ -1745,8 +1519,17 @@ namespace QCD {\>\""}]}], ";", "\[IndentingNewLine]",
"\"\<\n\nconst std::array<const Gamma, 4> Gamma::gmu = {{\n \
Gamma(Gamma::Algebra::GammaX),\n Gamma(Gamma::Algebra::GammaY),\n \
Gamma(Gamma::Algebra::GammaZ),\n Gamma(Gamma::Algebra::GammaT)}};\n\nconst \
std::array<const char *, Gamma::nGamma> Gamma::name = {{\n\>\""}]}], ";",
"\[IndentingNewLine]",
std::array<const Gamma, 16> Gamma::gall = {{\n \
Gamma(Gamma::Algebra::Identity),\n Gamma(Gamma::Algebra::Gamma5),\n \
Gamma(Gamma::Algebra::GammaX),\n Gamma(Gamma::Algebra::GammaY),\n \
Gamma(Gamma::Algebra::GammaZ),\n Gamma(Gamma::Algebra::GammaT),\n \
Gamma(Gamma::Algebra::GammaXGamma5),\n Gamma(Gamma::Algebra::GammaYGamma5),\n\
Gamma(Gamma::Algebra::GammaZGamma5),\n \
Gamma(Gamma::Algebra::GammaTGamma5),\n Gamma(Gamma::Algebra::SigmaXT), \
\n Gamma(Gamma::Algebra::SigmaXY), \n Gamma(Gamma::Algebra::SigmaXZ), \
\n Gamma(Gamma::Algebra::SigmaYT),\n Gamma(Gamma::Algebra::SigmaYZ),\n \
Gamma(Gamma::Algebra::SigmaZT)}};\n\nconst std::array<const char *, \
Gamma::nGamma> Gamma::name = {{\n\>\""}]}], ";", "\[IndentingNewLine]",
RowBox[{"Do", "[", "\[IndentingNewLine]",
RowBox[{
RowBox[{"out", " ", "=", " ",
@ -1847,7 +1630,9 @@ Gamma::nGamma> Gamma::mul = {{\\n\>\""}]}], ";", "\[IndentingNewLine]",
3.694963031525289*^9}, {3.694963065828494*^9, 3.694963098327538*^9}, {
3.6949632020836153`*^9, 3.6949632715940027`*^9}, {3.694963440035037*^9,
3.6949634418966017`*^9}, {3.6949651447067547`*^9, 3.694965161228381*^9}, {
3.694967957845581*^9, 3.694967958364184*^9}}],
3.694967957845581*^9, 3.694967958364184*^9}, {3.758291673792514*^9,
3.758291676983432*^9}},ExpressionUUID->"b1b309f8-a3a7-4081-a781-\
c3845e3cd372"],
Cell[BoxData[
RowBox[{
@ -1867,8 +1652,8 @@ Cell[BoxData[""], "Input",
},
WindowSize->{1246, 1005},
WindowMargins->{{282, Automatic}, {Automatic, 14}},
FrontEndVersion->"11.2 for Mac OS X x86 (32-bit, 64-bit Kernel) (September \
10, 2017)",
FrontEndVersion->"11.3 for Mac OS X x86 (32-bit, 64-bit Kernel) (March 5, \
2018)",
StyleDefinitions->"Default.nb"
]
(* End of Notebook Content *)
@ -1888,75 +1673,48 @@ Cell[1948, 43, 570, 11, 73, "Input",ExpressionUUID->"5c937a3e-adfd-4d7e-8fde-afb
Cell[2521, 56, 1172, 17, 34, "Output",ExpressionUUID->"72817ba6-2f6a-4a4d-8212-6f0970f49e7c"]
}, Open ]],
Cell[CellGroupData[{
Cell[3730, 78, 248, 5, 30, "Input",ExpressionUUID->"d1a0fd03-85e1-43af-ba80-3ca4235675d8"],
Cell[3981, 85, 299, 9, 34, "Output",ExpressionUUID->"16d3f953-4b24-4ed2-ae62-306dcab66ca7"]
Cell[3730, 78, 174, 3, 67, "Section",ExpressionUUID->"a5b064b3-3011-4922-8559-ead857cad102"],
Cell[3907, 83, 535, 16, 52, "Input",ExpressionUUID->"aa28f02b-31e1-4df2-9b5d-482177464b59"],
Cell[4445, 101, 250, 4, 35, "Text",ExpressionUUID->"c8896b88-f1db-4ce4-b7a6-0c9838bdb8f1"],
Cell[4698, 107, 5511, 169, 425, "Input",ExpressionUUID->"52a96ff6-047e-4043-86d0-e303866e5f8e"],
Cell[CellGroupData[{
Cell[10234, 280, 2183, 58, 135, "Input",ExpressionUUID->"8b0f4955-2c3f-418c-9226-9be8f87621e8"],
Cell[12420, 340, 1027, 27, 56, "Output",ExpressionUUID->"edd0619f-6f12-4070-a1d2-6b547877fadc"]
}, Open ]],
Cell[CellGroupData[{
Cell[4317, 99, 469, 14, 33, "Input",ExpressionUUID->"f0fa2a5c-3d81-4d75-a447-50c7ca3459ff"],
Cell[4789, 115, 2423, 77, 56, "Output",ExpressionUUID->"d9825c95-24bb-442a-8734-4c0f47e99dfc"]
Cell[13484, 372, 1543, 46, 114, "Input",ExpressionUUID->"fb45123c-c610-4075-99b0-7cd71c728ae7"],
Cell[15030, 420, 1311, 32, 87, "Output",ExpressionUUID->"2ae14565-b412-4dc0-9dce-bd6c1ba5ef27"]
}, Open ]],
Cell[7227, 195, 751, 18, 30, "Input",ExpressionUUID->"aea76313-c89e-45e8-b429-3f454091666d"],
Cell[16356, 455, 179, 3, 35, "Text",ExpressionUUID->"af247231-a58d-417b-987a-26908dafffdb"],
Cell[16538, 460, 2175, 65, 94, "Input",ExpressionUUID->"7c44cadd-e488-4f51-87d8-c64eef11f40c"],
Cell[18716, 527, 193, 3, 35, "Text",ExpressionUUID->"856f1746-1107-4509-a5ce-ac9c7f56cdb1"],
Cell[CellGroupData[{
Cell[8003, 217, 323, 10, 30, "Input",ExpressionUUID->"07da3998-8eab-40ba-8c0b-ac6b130cb4fb"],
Cell[CellGroupData[{
Cell[8351, 231, 156, 3, 24, "Print",ExpressionUUID->"c577ba06-b67a-405a-9ff5-2bf7dc898d03"],
Cell[8510, 236, 156, 3, 24, "Print",ExpressionUUID->"d041aa36-0cea-457c-9d4b-1fe9be66e2ab"],
Cell[8669, 241, 155, 3, 24, "Print",ExpressionUUID->"bf141b55-86b2-4430-a994-5c03d5a19441"],
Cell[8827, 246, 155, 3, 24, "Print",ExpressionUUID->"4968a660-4ecf-4b66-9071-8bd798c18d21"],
Cell[8985, 251, 156, 3, 24, "Print",ExpressionUUID->"4e22d943-2680-416b-a1d7-a16ca20b781f"],
Cell[9144, 256, 157, 3, 24, "Print",ExpressionUUID->"6dd38385-08b3-4dd9-932f-98a00c6db1b2"],
Cell[9304, 261, 155, 3, 24, "Print",ExpressionUUID->"ef3baad3-91d1-4735-9a22-53495a624c15"],
Cell[9462, 266, 154, 3, 24, "Print",ExpressionUUID->"413fbb68-5017-4272-a62a-fa234e6daaea"],
Cell[9619, 271, 155, 3, 24, "Print",ExpressionUUID->"3a832a60-ae00-414b-a9ac-f5e86e67e917"],
Cell[9777, 276, 155, 3, 24, "Print",ExpressionUUID->"bfc79ef6-f6c7-4f1e-88e8-005ac314be9c"],
Cell[9935, 281, 154, 3, 24, "Print",ExpressionUUID->"0f892891-f885-489c-9925-ddef4d698410"],
Cell[10092, 286, 156, 3, 24, "Print",ExpressionUUID->"2906f190-e673-4f33-9c34-e8e56efe7a27"]
}, Open ]],
Cell[10263, 292, 376, 9, 34, "Output",ExpressionUUID->"500ca3c1-88d8-46e5-a1a1-86a7878e5638"]
Cell[18934, 534, 536, 16, 30, "Input",ExpressionUUID->"8674484a-8543-434f-b177-3b27f9353212"],
Cell[19473, 552, 1705, 35, 87, "Output",ExpressionUUID->"c3b3f84d-91f6-41af-af6b-a394ca020511"]
}, Open ]],
Cell[21193, 590, 170, 3, 35, "Text",ExpressionUUID->"518a3040-54b1-4d43-8947-5c7d12efa94d"],
Cell[CellGroupData[{
Cell[10676, 306, 174, 3, 67, "Section",ExpressionUUID->"a5b064b3-3011-4922-8559-ead857cad102"],
Cell[10853, 311, 535, 16, 52, "Input",ExpressionUUID->"aa28f02b-31e1-4df2-9b5d-482177464b59"],
Cell[11391, 329, 250, 4, 35, "Text",ExpressionUUID->"c8896b88-f1db-4ce4-b7a6-0c9838bdb8f1"],
Cell[11644, 335, 5511, 169, 425, "Input",ExpressionUUID->"52a96ff6-047e-4043-86d0-e303866e5f8e"],
Cell[CellGroupData[{
Cell[17180, 508, 2183, 58, 135, "Input",ExpressionUUID->"8b0f4955-2c3f-418c-9226-9be8f87621e8"],
Cell[19366, 568, 1027, 27, 67, "Output",ExpressionUUID->"edd0619f-6f12-4070-a1d2-6b547877fadc"]
}, Open ]],
Cell[CellGroupData[{
Cell[20430, 600, 1543, 46, 114, "Input",ExpressionUUID->"fb45123c-c610-4075-99b0-7cd71c728ae7"],
Cell[21976, 648, 1311, 32, 98, "Output",ExpressionUUID->"2ae14565-b412-4dc0-9dce-bd6c1ba5ef27"]
}, Open ]],
Cell[23302, 683, 179, 3, 35, "Text",ExpressionUUID->"af247231-a58d-417b-987a-26908dafffdb"],
Cell[23484, 688, 2175, 65, 94, "Input",ExpressionUUID->"7c44cadd-e488-4f51-87d8-c64eef11f40c"],
Cell[25662, 755, 193, 3, 35, "Text",ExpressionUUID->"856f1746-1107-4509-a5ce-ac9c7f56cdb1"],
Cell[CellGroupData[{
Cell[25880, 762, 536, 16, 30, "Input",ExpressionUUID->"8674484a-8543-434f-b177-3b27f9353212"],
Cell[26419, 780, 1705, 35, 87, "Output",ExpressionUUID->"c3b3f84d-91f6-41af-af6b-a394ca020511"]
}, Open ]],
Cell[28139, 818, 170, 3, 35, "Text",ExpressionUUID->"518a3040-54b1-4d43-8947-5c7d12efa94d"],
Cell[CellGroupData[{
Cell[28334, 825, 536, 14, 30, "Input",ExpressionUUID->"61a2e974-2b39-4a07-8043-2dfd39a70569"],
Cell[28873, 841, 6754, 167, 303, "Output",ExpressionUUID->"73480ac0-3043-4077-80cc-b952a94c822a"]
Cell[21388, 597, 536, 14, 30, "Input",ExpressionUUID->"61a2e974-2b39-4a07-8043-2dfd39a70569"],
Cell[21927, 613, 6754, 167, 303, "Output",ExpressionUUID->"73480ac0-3043-4077-80cc-b952a94c822a"]
}, Open ]]
}, Open ]],
Cell[CellGroupData[{
Cell[35676, 1014, 226, 4, 67, "Section",ExpressionUUID->"4e833cd6-9f0e-4aa3-a873-3d579e874720"],
Cell[35905, 1020, 188, 4, 44, "Text",ExpressionUUID->"6d27fc04-3a60-4e03-8df7-3dd3aeee35b4"],
Cell[36096, 1026, 2980, 53, 703, "Input",ExpressionUUID->"c7103bd6-b539-4495-b98c-d4d12ac6cad8"],
Cell[39079, 1081, 221, 4, 44, "Text",ExpressionUUID->"0625593d-290f-4a39-9d80-8e2c6fdbc94e"],
Cell[39303, 1087, 4936, 150, 682, "Input",ExpressionUUID->"1ad4904c-352f-4b1d-a7c7-91e1b0549409"],
Cell[44242, 1239, 2645, 56, 199, "Input",ExpressionUUID->"0221674f-9b63-4662-91bc-ccc8c6ae9589"],
Cell[46890, 1297, 209, 4, 44, "Text",ExpressionUUID->"d2d2257a-487b-416f-bc40-abd4482225f7"],
Cell[47102, 1303, 15306, 397, 2131, "Input",ExpressionUUID->"daea68a9-c9e8-46ab-9bc8-5186e2cf477c"],
Cell[62411, 1702, 137, 2, 44, "Text",ExpressionUUID->"76ba9d5a-7ee3-4888-be7e-6377003275e8"],
Cell[62551, 1706, 521, 12, 30, "Input",ExpressionUUID->"4ec61f4c-3fd3-49ea-b5ef-6f7f04a16b34"]
Cell[28730, 786, 226, 4, 67, "Section",ExpressionUUID->"4e833cd6-9f0e-4aa3-a873-3d579e874720"],
Cell[28959, 792, 188, 4, 44, "Text",ExpressionUUID->"6d27fc04-3a60-4e03-8df7-3dd3aeee35b4"],
Cell[29150, 798, 3104, 55, 724, "Input",ExpressionUUID->"c7103bd6-b539-4495-b98c-d4d12ac6cad8"],
Cell[32257, 855, 221, 4, 44, "Text",ExpressionUUID->"0625593d-290f-4a39-9d80-8e2c6fdbc94e"],
Cell[32481, 861, 4936, 150, 682, "Input",ExpressionUUID->"1ad4904c-352f-4b1d-a7c7-91e1b0549409"],
Cell[37420, 1013, 2645, 56, 199, "Input",ExpressionUUID->"0221674f-9b63-4662-91bc-ccc8c6ae9589"],
Cell[40068, 1071, 209, 4, 44, "Text",ExpressionUUID->"d2d2257a-487b-416f-bc40-abd4482225f7"],
Cell[40280, 1077, 15306, 397, 2131, "Input",ExpressionUUID->"daea68a9-c9e8-46ab-9bc8-5186e2cf477c"],
Cell[55589, 1476, 137, 2, 44, "Text",ExpressionUUID->"76ba9d5a-7ee3-4888-be7e-6377003275e8"],
Cell[55729, 1480, 521, 12, 30, "Input",ExpressionUUID->"4ec61f4c-3fd3-49ea-b5ef-6f7f04a16b34"]
}, Open ]],
Cell[CellGroupData[{
Cell[63109, 1723, 167, 2, 67, "Section",ExpressionUUID->"a4458b3a-09b5-4e36-a1fc-781d6702b2dc"],
Cell[63279, 1727, 5693, 122, 829, "Input",ExpressionUUID->"b1b309f8-a3a7-4081-a781-c3845e3cd372"],
Cell[68975, 1851, 448, 10, 30, "Input",ExpressionUUID->"cba42949-b0f2-42ce-aebd-ffadfd83ef88"],
Cell[69426, 1863, 94, 1, 30, "Input",ExpressionUUID->"6175b72c-af9f-43c2-b4ca-bd84c48a456d"]
Cell[56287, 1497, 167, 2, 67, "Section",ExpressionUUID->"a4458b3a-09b5-4e36-a1fc-781d6702b2dc"],
Cell[56457, 1501, 6464, 133, 1207, "Input",ExpressionUUID->"b1b309f8-a3a7-4081-a781-c3845e3cd372"],
Cell[62924, 1636, 448, 10, 30, "Input",ExpressionUUID->"cba42949-b0f2-42ce-aebd-ffadfd83ef88"],
Cell[63375, 1648, 94, 1, 30, "Input",ExpressionUUID->"6175b72c-af9f-43c2-b4ca-bd84c48a456d"]
}, Open ]]
}
]

View File

@ -995,21 +995,20 @@ void A2Autils<FImpl>::ContractWWVV(std::vector<PropagatorField> &WWVV,
auto vs_v = vs[s].View();
auto tmp1 = vs_v[ss];
vobj tmp2 = Zero();
vobj tmp3 = Zero();
for(int d=d_o;d<MIN(d_o+d_unroll,N_d);d++){
Scalar_v coeff = WW_sd(t,s,d);
auto vd_v = vd[d].View();
mac(&tmp2 ,& coeff, & vd_v[ss]);
Scalar_v coeff = WW_sd(t,s,d);
tmp3 = conjugate(vd_v[ss]);
mac(&tmp2, &coeff, &tmp3);
}
//////////////////////////
// Fast outer product of tmp1 with a sum of terms suppressed by d_unroll
//////////////////////////
tmp2 = conjugate(tmp2);
auto WWVV_v = WWVV[t].View();
for(int s1=0;s1<Ns;s1++){
for(int s2=0;s2<Ns;s2++){
WWVV_v[ss]()(s1,s2)(0,0) += tmp1()(s1)(0)*tmp2()(s2)(0);
WWVV_v[ss]()(s1,s2)(0,1) += tmp1()(s1)(0)*tmp2()(s2)(1);
WWVV_v[ss]()(s1,s2)(0,2) += tmp1()(s1)(0)*tmp2()(s2)(2);

View File

@ -0,0 +1,87 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/scalar/CovariantLaplacian.h
Copyright (C) 2016
Author: Azusa Yamaguchi
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
#pragma once
namespace Grid {
namespace QCD {
template <class Gimpl> class CovariantSmearing : public Gimpl
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
typedef typename Gimpl::GaugeLinkField GaugeMat;
typedef typename Gimpl::GaugeField GaugeLorentz;
template<typename T>
static void GaussianSmear(const std::vector<LatticeColourMatrix>& U,
T& chi,
const Real& width, int Iterations, int orthog)
{
GridBase *grid = chi._grid;
T psi(grid);
////////////////////////////////////////////////////////////////////////////////////
// Follow Chroma conventions for width to keep compatibility with previous data
// Free field iterates
// chi = (1 - w^2/4N p^2)^N chi
//
// ~ (e^(-w^2/4N p^2)^N chi
// ~ (e^(-w^2/4 p^2) chi
// ~ (e^(-w'^2/2 p^2) chi [ w' = w/sqrt(2) ]
//
// Which in coordinate space is proportional to
//
// e^(-x^2/w^2) = e^(-x^2/2w'^2)
//
// The 4 is a bit unconventional from Gaussian width perspective, but... it's Chroma convention.
// 2nd derivative approx d^2/dx^2 = x+mu + x-mu - 2x
//
// d^2/dx^2 = - p^2
//
// chi = ( 1 + w^2/4N d^2/dx^2 )^N chi
//
////////////////////////////////////////////////////////////////////////////////////
Real coeff = (width*width) / Real(4*Iterations);
int dims = Nd;
if( orthog < Nd ) dims=Nd-1;
for(int n = 0; n < Iterations; ++n) {
psi = (-2.0*dims)*chi;
for(int mu=0;mu<Nd;mu++) {
if ( mu != orthog ) {
psi = psi + Gimpl::CovShiftForward(U[mu],mu,chi);
psi = psi + Gimpl::CovShiftBackward(U[mu],mu,chi);
}
}
chi = chi + coeff*psi;
}
}
};
}}

View File

@ -31,6 +31,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
NAMESPACE_BEGIN(Grid);
template <class Gimpl>
class FourierAcceleratedGaugeFixer : public Gimpl {
public:
@ -45,30 +46,57 @@ public:
A[mu] = Ta(U[mu]) * cmi;
}
}
static void DmuAmu(const std::vector<GaugeMat> &A,GaugeMat &dmuAmu) {
static void DmuAmu(const std::vector<GaugeMat> &A,GaugeMat &dmuAmu,int orthog) {
dmuAmu=Zero();
for(int mu=0;mu<Nd;mu++){
dmuAmu = dmuAmu + A[mu] - Cshift(A[mu],mu,-1);
if ( mu != orthog ) {
dmuAmu = dmuAmu + A[mu] - Cshift(A[mu],mu,-1);
}
}
}
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false) {
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1) {
GridBase *grid = Umu.Grid();
GaugeMat xform(grid);
SteepestDescentGaugeFix(Umu,xform,alpha,maxiter,Omega_tol,Phi_tol,Fourier,orthog);
}
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,GaugeMat &xform,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1) {
GridBase *grid = Umu.Grid();
Real org_plaq =WilsonLoops<Gimpl>::avgPlaquette(Umu);
Real org_link_trace=WilsonLoops<Gimpl>::linkTrace(Umu);
Real old_trace = org_link_trace;
Real trG;
xform=1.0;
std::vector<GaugeMat> U(Nd,grid);
GaugeMat dmuAmu(grid);
for(int i=0;i<maxiter;i++){
for(int mu=0;mu<Nd;mu++) U[mu]= PeekIndex<LorentzIndex>(Umu,mu);
if ( Fourier==false ) {
trG = SteepestDescentStep(U,alpha,dmuAmu);
{
Real plaq =WilsonLoops<Gimpl>::avgPlaquette(Umu);
Real link_trace=WilsonLoops<Gimpl>::linkTrace(Umu);
if( (orthog>=0) && (orthog<Nd) ){
std::cout << GridLogMessage << " Gauge fixing to Coulomb gauge time="<<orthog<< " plaq= "<<plaq<<" link trace = "<<link_trace<< std::endl;
} else {
trG = FourierAccelSteepestDescentStep(U,alpha,dmuAmu);
std::cout << GridLogMessage << " Gauge fixing to Landau gauge plaq= "<<plaq<<" link trace = "<<link_trace<< std::endl;
}
}
for(int i=0;i<maxiter;i++){
for(int mu=0;mu<Nd;mu++) U[mu]= PeekIndex<LorentzIndex>(Umu,mu);
if ( Fourier==false ) {
trG = SteepestDescentStep(U,xform,alpha,dmuAmu,orthog);
} else {
trG = FourierAccelSteepestDescentStep(U,xform,alpha,dmuAmu,orthog);
}
// std::cout << GridLogMessage << "trG "<< trG<< std::endl;
// std::cout << GridLogMessage << "xform "<< norm2(xform)<< std::endl;
// std::cout << GridLogMessage << "dmuAmu "<< norm2(dmuAmu)<< std::endl;
for(int mu=0;mu<Nd;mu++) PokeIndex<LorentzIndex>(Umu,U[mu],mu);
// Monitor progress and convergence test
// infrequently to minimise cost overhead
@ -84,7 +112,6 @@ public:
Real Phi = 1.0 - old_trace / link_trace ;
Real Omega= 1.0 - trG;
std::cout << GridLogMessage << " Iteration "<<i<< " Phi= "<<Phi<< " Omega= " << Omega<< " trG " << trG <<std::endl;
if ( (Omega < Omega_tol) && ( ::fabs(Phi) < Phi_tol) ) {
std::cout << GridLogMessage << "Converged ! "<<std::endl;
@ -96,25 +123,26 @@ public:
}
}
};
static Real SteepestDescentStep(std::vector<GaugeMat> &U,Real & alpha, GaugeMat & dmuAmu) {
static Real SteepestDescentStep(std::vector<GaugeMat> &U,GaugeMat &xform,Real & alpha, GaugeMat & dmuAmu,int orthog) {
GridBase *grid = U[0].Grid();
std::vector<GaugeMat> A(Nd,grid);
GaugeMat g(grid);
GaugeLinkToLieAlgebraField(U,A);
ExpiAlphaDmuAmu(A,g,alpha,dmuAmu);
ExpiAlphaDmuAmu(A,g,alpha,dmuAmu,orthog);
Real vol = grid->gSites();
Real trG = TensorRemove(sum(trace(g))).real()/vol/Nc;
xform = g*xform ;
SU<Nc>::GaugeTransform(U,g);
return trG;
}
static Real FourierAccelSteepestDescentStep(std::vector<GaugeMat> &U,Real & alpha, GaugeMat & dmuAmu) {
static Real FourierAccelSteepestDescentStep(std::vector<GaugeMat> &U,GaugeMat &xform,Real & alpha, GaugeMat & dmuAmu,int orthog) {
GridBase *grid = U[0].Grid();
@ -133,38 +161,41 @@ public:
GaugeLinkToLieAlgebraField(U,A);
DmuAmu(A,dmuAmu);
DmuAmu(A,dmuAmu,orthog);
theFFT.FFT_all_dim(dmuAmu_p,dmuAmu,FFT::forward);
std::vector<int> mask(Nd,1);
for(int mu=0;mu<Nd;mu++) if (mu==orthog) mask[mu]=0;
theFFT.FFT_dim_mask(dmuAmu_p,dmuAmu,mask,FFT::forward);
//////////////////////////////////
// Work out Fp = psq_max/ psq...
// Avoid singularities in Fp
//////////////////////////////////
Coordinate latt_size = grid->GlobalDimensions();
Coordinate coor(grid->_ndimension,0);
for(int mu=0;mu<Nd;mu++) {
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(pmu,mu);
pmu = TwoPiL * pmu ;
psq = psq + 4.0*sin(pmu*0.5)*sin(pmu*0.5);
if ( mu != orthog ) {
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(pmu,mu);
pmu = TwoPiL * pmu ;
psq = psq + 4.0*sin(pmu*0.5)*sin(pmu*0.5);
}
}
Complex psqMax(16.0);
Fp = psqMax*one/psq;
/*
static int once;
if ( once == 0 ) {
std::cout << " Fp " << Fp <<std::endl;
once ++;
}*/
pokeSite(TComplex(1.0),Fp,coor);
pokeSite(TComplex(16.0),Fp,coor);
if( (orthog>=0) && (orthog<Nd) ){
for(int t=0;t<grid->GlobalDimensions()[orthog];t++){
coor[orthog]=t;
pokeSite(TComplex(16.0),Fp,coor);
}
}
dmuAmu_p = dmuAmu_p * Fp;
theFFT.FFT_all_dim(dmuAmu,dmuAmu_p,FFT::backward);
theFFT.FFT_dim_mask(dmuAmu,dmuAmu_p,mask,FFT::backward);
GaugeMat ciadmam(grid);
Complex cialpha(0.0,-alpha);
@ -173,16 +204,17 @@ public:
Real trG = TensorRemove(sum(trace(g))).real()/vol/Nc;
xform = g*xform ;
SU<Nc>::GaugeTransform(U,g);
return trG;
}
static void ExpiAlphaDmuAmu(const std::vector<GaugeMat> &A,GaugeMat &g,Real & alpha, GaugeMat &dmuAmu) {
static void ExpiAlphaDmuAmu(const std::vector<GaugeMat> &A,GaugeMat &g,Real & alpha, GaugeMat &dmuAmu,int orthog) {
GridBase *grid = g.Grid();
Complex cialpha(0.0,-alpha);
GaugeMat ciadmam(grid);
DmuAmu(A,dmuAmu);
DmuAmu(A,dmuAmu,orthog);
ciadmam = dmuAmu*cialpha;
SU<Nc>::taExp(ciadmam,g);
}

View File

@ -678,9 +678,18 @@ public:
out += la;
}
}
/*
add GaugeTrans
*/
/*
* Fundamental rep gauge xform
*/
template<typename Fundamental,typename GaugeMat>
static void GaugeTransformFundamental( Fundamental &ferm, GaugeMat &g){
GridBase *grid = ferm._grid;
conformable(grid,g._grid);
ferm = g*ferm;
}
/*
* Adjoint rep gauge xform
*/
template<typename GaugeField,typename GaugeMat>
static void GaugeTransform( GaugeField &Umu, GaugeMat &g){

View File

@ -33,12 +33,76 @@ Author: Guido Cossu <guido.cossu@ed.ac.uk>
#include <type_traits>
#include <Grid/tensors/Tensors.h>
#include <Grid/serialisation/VectorUtils.h>
#include <Grid/Eigen/unsupported/CXX11/Tensor>
namespace Grid {
namespace EigenIO {
// EigenIO works for scalars that are not just Grid supported scalars
template<typename T, typename V = void> struct is_complex : public std::false_type {};
// Support all complex types (not just Grid complex types) - even if the definitions overlap (!)
template<typename T> struct is_complex< T , typename
std::enable_if< ::Grid::is_complex< T >::value>::type> : public std::true_type {};
template<typename T> struct is_complex<std::complex<T>, typename
std::enable_if<!::Grid::is_complex<std::complex<T>>::value>::type> : public std::true_type {};
// Helpers to support I/O for Eigen tensors of arithmetic scalars, complex types, or Grid tensors
template<typename T, typename V = void> struct is_scalar : public std::false_type {};
template<typename T> struct is_scalar<T, typename std::enable_if<std::is_arithmetic<T>::value || is_complex<T>::value>::type> : public std::true_type {};
// Is this an Eigen tensor
template<typename T> struct is_tensor : std::integral_constant<bool,
std::is_base_of<Eigen::TensorBase<T, Eigen::ReadOnlyAccessors>, T>::value> {};
// Is this an Eigen tensor of a supported scalar
template<typename T, typename V = void> struct is_tensor_of_scalar : public std::false_type {};
template<typename T> struct is_tensor_of_scalar<T, typename std::enable_if<is_tensor<T>::value && is_scalar<typename T::Scalar>::value>::type> : public std::true_type {};
// Is this an Eigen tensor of a supported container
template<typename T, typename V = void> struct is_tensor_of_container : public std::false_type {};
template<typename T> struct is_tensor_of_container<T, typename std::enable_if<is_tensor<T>::value && isGridTensor<typename T::Scalar>::value>::type> : public std::true_type {};
// These traits describe the scalars inside Eigen tensors
// I wish I could define these in reference to the scalar type (so there would be fewer traits defined)
// but I'm unable to find a syntax to make this work
template<typename T, typename V = void> struct Traits {};
// Traits are the default for scalars, or come from GridTypeMapper for GridTensors
template<typename T> struct Traits<T, typename std::enable_if<is_tensor_of_scalar<T>::value>::type>
: public GridTypeMapper_Base {
using scalar_type = typename T::Scalar; // ultimate base scalar
static constexpr bool is_complex = ::Grid::EigenIO::is_complex<scalar_type>::value;
};
// Traits are the default for scalars, or come from GridTypeMapper for GridTensors
template<typename T> struct Traits<T, typename std::enable_if<is_tensor_of_container<T>::value>::type> {
using BaseTraits = GridTypeMapper<typename T::Scalar>;
using scalar_type = typename BaseTraits::scalar_type; // ultimate base scalar
static constexpr bool is_complex = ::Grid::EigenIO::is_complex<scalar_type>::value;
static constexpr int TensorLevel = BaseTraits::TensorLevel;
static constexpr int Rank = BaseTraits::Rank;
static constexpr std::size_t count = BaseTraits::count;
static constexpr int Dimension(int dim) { return BaseTraits::Dimension(dim); }
};
// Is this a fixed-size Eigen tensor
template<typename T> struct is_tensor_fixed : public std::false_type {};
template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
struct is_tensor_fixed<Eigen::TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType>>
: public std::true_type {};
template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType,
int MapOptions_, template <class> class MapPointer_>
struct is_tensor_fixed<Eigen::TensorMap<Eigen::TensorFixedSize<Scalar_, Dimensions_,
Options_, IndexType>, MapOptions_, MapPointer_>>
: public std::true_type {};
// Is this a variable-size Eigen tensor
template<typename T, typename V = void> struct is_tensor_variable : public std::false_type {};
template<typename T> struct is_tensor_variable<T, typename std::enable_if<is_tensor<T>::value
&& !is_tensor_fixed<T>::value>::type> : public std::true_type {};
}
// Abstract writer/reader classes ////////////////////////////////////////////
// static polymorphism implemented using CRTP idiom
class Serializable;
// Static abstract writer
template <typename T>
class Writer
@ -49,10 +113,10 @@ namespace Grid {
void push(const std::string &s);
void pop(void);
template <typename U>
typename std::enable_if<std::is_base_of<Serializable, U>::value, void>::type
typename std::enable_if<std::is_base_of<Serializable, U>::value>::type
write(const std::string& s, const U &output);
template <typename U>
typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
typename std::enable_if<!std::is_base_of<Serializable, U>::value && !EigenIO::is_tensor<U>::value>::type
write(const std::string& s, const U &output);
template <typename U>
void write(const std::string &s, const iScalar<U> &output);
@ -60,6 +124,42 @@ namespace Grid {
void write(const std::string &s, const iVector<U, N> &output);
template <typename U, int N>
void write(const std::string &s, const iMatrix<U, N> &output);
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor<ETensor>::value>::type
write(const std::string &s, const ETensor &output);
// Helper functions for Scalar vs Container specialisations
template <typename ETensor>
inline typename std::enable_if<EigenIO::is_tensor_of_scalar<ETensor>::value,
const typename ETensor::Scalar *>::type
getFirstScalar(const ETensor &output)
{
return output.data();
}
template <typename ETensor>
inline typename std::enable_if<EigenIO::is_tensor_of_container<ETensor>::value,
const typename EigenIO::Traits<ETensor>::scalar_type *>::type
getFirstScalar(const ETensor &output)
{
return output.data()->begin();
}
template <typename S>
inline typename std::enable_if<EigenIO::is_scalar<S>::value, void>::type
copyScalars(S * &pCopy, const S &Source)
{
* pCopy ++ = Source;
}
template <typename S>
inline typename std::enable_if<isGridTensor<S>::value, void>::type
copyScalars(typename GridTypeMapper<S>::scalar_type * &pCopy, const S &Source)
{
for( const typename GridTypeMapper<S>::scalar_type &item : Source )
* pCopy ++ = item;
}
void scientificFormat(const bool set);
bool isScientific(void);
void setPrecision(const unsigned int prec);
@ -83,7 +183,8 @@ namespace Grid {
typename std::enable_if<std::is_base_of<Serializable, U>::value, void>::type
read(const std::string& s, U &output);
template <typename U>
typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
typename std::enable_if<!std::is_base_of<Serializable, U>::value
&& !EigenIO::is_tensor<U>::value, void>::type
read(const std::string& s, U &output);
template <typename U>
void read(const std::string &s, iScalar<U> &output);
@ -91,6 +192,32 @@ namespace Grid {
void read(const std::string &s, iVector<U, N> &output);
template <typename U, int N>
void read(const std::string &s, iMatrix<U, N> &output);
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
read(const std::string &s, ETensor &output);
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor_fixed<ETensor>::value, void>::type
Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims );
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor_variable<ETensor>::value, void>::type
Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims );
// Helper functions for Scalar vs Container specialisations
template <typename S>
inline typename std::enable_if<EigenIO::is_scalar<S>::value, void>::type
copyScalars(S &Dest, const S * &pSource)
{
Dest = * pSource ++;
}
template <typename S>
inline typename std::enable_if<isGridTensor<S>::value, void>::type
copyScalars(S &Dest, const typename GridTypeMapper<S>::scalar_type * &pSource)
{
for( typename GridTypeMapper<S>::scalar_type &item : Dest )
item = * pSource ++;
}
protected:
template <typename U>
void fromString(U &output, const std::string &s);
@ -135,12 +262,14 @@ namespace Grid {
template <typename T>
template <typename U>
typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
typename std::enable_if<!std::is_base_of<Serializable, U>::value
&& !EigenIO::is_tensor<U>::value, void>::type
Writer<T>::write(const std::string &s, const U &output)
{
upcast->writeDefault(s, output);
}
template <typename T>
template <typename U>
void Writer<T>::write(const std::string &s, const iScalar<U> &output)
@ -161,6 +290,57 @@ namespace Grid {
{
upcast->writeDefault(s, tensorToVec(output));
}
// Eigen::Tensors of Grid tensors (iScalar, iVector, iMatrix)
template <typename T>
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
Writer<T>::write(const std::string &s, const ETensor &output)
{
using Index = typename ETensor::Index;
using Container = typename ETensor::Scalar; // NB: could be same as scalar
using Traits = EigenIO::Traits<ETensor>;
using Scalar = typename Traits::scalar_type; // type of the underlying scalar
constexpr unsigned int TensorRank{ETensor::NumIndices};
constexpr unsigned int ContainerRank{Traits::Rank}; // Only non-zero for containers
constexpr unsigned int TotalRank{TensorRank + ContainerRank};
const Index NumElements{output.size()};
assert( NumElements > 0 );
// Get the dimensionality of the tensor
std::vector<std::size_t> TotalDims(TotalRank);
for(auto i = 0; i < TensorRank; i++ ) {
auto dim = output.dimension(i);
TotalDims[i] = static_cast<size_t>(dim);
assert( TotalDims[i] == dim ); // check we didn't lose anything in the conversion
}
for(auto i = 0; i < ContainerRank; i++ )
TotalDims[TensorRank + i] = Traits::Dimension(i);
// If the Tensor isn't in Row-Major order, then we'll need to copy it's data
const bool CopyData{NumElements > 1 && ETensor::Layout != Eigen::StorageOptions::RowMajor};
const Scalar * pWriteBuffer;
std::vector<Scalar> CopyBuffer;
const Index TotalNumElements = NumElements * Traits::count;
if( !CopyData ) {
pWriteBuffer = getFirstScalar( output );
} else {
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
CopyBuffer.resize( TotalNumElements );
Scalar * pCopy = &CopyBuffer[0];
pWriteBuffer = pCopy;
std::array<Index, TensorRank> MyIndex;
for( auto &idx : MyIndex ) idx = 0;
for( auto n = 0; n < NumElements; n++ ) {
const Container & c = output( MyIndex );
copyScalars( pCopy, c );
// Now increment the index
for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
MyIndex[i] = 0;
}
}
upcast->template writeMultiDim<Scalar>(s, TotalDims, pWriteBuffer, TotalNumElements);
}
template <typename T>
void Writer<T>::scientificFormat(const bool set)
@ -215,7 +395,8 @@ namespace Grid {
template <typename T>
template <typename U>
typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
typename std::enable_if<!std::is_base_of<Serializable, U>::value
&& !EigenIO::is_tensor<U>::value, void>::type
Reader<T>::read(const std::string &s, U &output)
{
upcast->readDefault(s, output);
@ -251,6 +432,79 @@ namespace Grid {
vecToTensor(output, v);
}
template <typename T>
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
Reader<T>::read(const std::string &s, ETensor &output)
{
using Index = typename ETensor::Index;
using Container = typename ETensor::Scalar; // NB: could be same as scalar
using Traits = EigenIO::Traits<ETensor>;
using Scalar = typename Traits::scalar_type; // type of the underlying scalar
constexpr unsigned int TensorRank{ETensor::NumIndices};
constexpr unsigned int ContainerRank{Traits::Rank}; // Only non-zero for containers
constexpr unsigned int TotalRank{TensorRank + ContainerRank};
using ETDims = std::array<Index, TensorRank>; // Dimensions of the tensor
// read the (flat) data and dimensionality
std::vector<std::size_t> dimData;
std::vector<Scalar> buf;
upcast->readMultiDim( s, buf, dimData );
assert(dimData.size() == TotalRank && "EigenIO: Tensor rank mismatch" );
// Make sure that the number of elements read matches dimensions read
std::size_t NumContainers = 1;
for( auto i = 0 ; i < TensorRank ; i++ )
NumContainers *= dimData[i];
// If our scalar object is a Container, make sure it's dimensions match what we read back
std::size_t ElementsPerContainer = 1;
for( auto i = 0 ; i < ContainerRank ; i++ ) {
assert( dimData[TensorRank+i] == Traits::Dimension(i) && "Tensor Container dimensions don't match data" );
ElementsPerContainer *= dimData[TensorRank+i];
}
assert( NumContainers * ElementsPerContainer == buf.size() && "EigenIO: Number of elements != product of dimensions" );
// Now see whether the tensor is the right shape, or can be made to be
const auto & dims = output.dimensions();
bool bShapeOK = (output.data() != nullptr);
for( auto i = 0; bShapeOK && i < TensorRank ; i++ )
if( dims[i] != dimData[i] )
bShapeOK = false;
// Make the tensor the same size as the data read
ETDims MyIndex;
if( !bShapeOK ) {
for( auto i = 0 ; i < TensorRank ; i++ )
MyIndex[i] = dimData[i];
Reshape(output, MyIndex);
}
// Copy the data into the tensor
for( auto &d : MyIndex ) d = 0;
const Scalar * pSource = &buf[0];
for( std::size_t n = 0 ; n < NumContainers ; n++ ) {
Container & c = output( MyIndex );
copyScalars( c, pSource );
// Now increment the index
for( int i = TensorRank - 1; i != -1 && ++MyIndex[i] == dims[i]; i-- )
MyIndex[i] = 0;
}
assert( pSource == &buf[NumContainers * ElementsPerContainer] );
}
template <typename T>
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor_fixed<ETensor>::value, void>::type
Reader<T>::Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims )
{
assert( 0 && "EigenIO: Fixed tensor dimensions can't be changed" );
}
template <typename T>
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor_variable<ETensor>::value, void>::type
Reader<T>::Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims )
{
//t.reshape( dims );
t.resize( dims );
}
template <typename T>
template <typename U>
void Reader<T>::fromString(U &output, const std::string &s)
@ -289,8 +543,70 @@ namespace Grid {
{
return os;
}
template <typename T1, typename T2>
static inline typename std::enable_if<!EigenIO::is_tensor<T1>::value || !EigenIO::is_tensor<T2>::value, bool>::type
CompareMember(const T1 &lhs, const T2 &rhs) {
return lhs == rhs;
}
template <typename T1, typename T2>
static inline typename std::enable_if<EigenIO::is_tensor<T1>::value && EigenIO::is_tensor<T2>::value, bool>::type
CompareMember(const T1 &lhs, const T2 &rhs) {
// First check whether dimensions match (Eigen tensor library will assert if they don't match)
bool bReturnValue = (T1::NumIndices == T2::NumIndices);
for( auto i = 0 ; bReturnValue && i < T1::NumIndices ; i++ )
bReturnValue = ( lhs.dimension(i) == rhs.dimension(i) );
if( bReturnValue ) {
Eigen::Tensor<bool, 0, T1::Options> bResult = (lhs == rhs).all();
bReturnValue = bResult(0);
}
return bReturnValue;
}
template <typename T>
static inline typename std::enable_if<EigenIO::is_tensor<T>::value, bool>::type
CompareMember(const std::vector<T> &lhs, const std::vector<T> &rhs) {
const auto NumElements = lhs.size();
bool bResult = ( NumElements == rhs.size() );
for( auto i = 0 ; i < NumElements && bResult ; i++ )
bResult = CompareMember(lhs[i], rhs[i]);
return bResult;
}
template <typename T>
static inline typename std::enable_if<!EigenIO::is_tensor<T>::value, void>::type
WriteMember(std::ostream &os, const T &object) {
os << object;
}
template <typename T>
static inline typename std::enable_if<EigenIO::is_tensor<T>::value, void>::type
WriteMember(std::ostream &os, const T &object) {
using Index = typename T::Index;
const Index NumElements{object.size()};
assert( NumElements > 0 );
Index count = 1;
os << "T<";
for( int i = 0; i < T::NumIndices; i++ ) {
Index dim = object.dimension(i);
count *= dim;
if( i )
os << ",";
os << dim;
}
assert( count == NumElements && "Number of elements doesn't match tensor dimensions" );
os << ">{";
const typename T::Scalar * p = object.data();
for( Index i = 0; i < count; i++ ) {
if( i )
os << ",";
os << *p++;
}
os << "}";
}
};
// Generic writer interface //////////////////////////////////////////////////
template <typename T>
inline void push(Writer<T> &w, const std::string &s) {

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -24,8 +24,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_SERIALISATION_BINARY_READER_H
#define GRID_SERIALISATION_BINARY_READER_H
@ -37,83 +37,132 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <vector>
#include <cassert>
NAMESPACE_BEGIN(Grid);
namespace Grid {
class BinaryWriter: public Writer<BinaryWriter>
{
public:
BinaryWriter(const std::string &fileName);
virtual ~BinaryWriter(void) = default;
void push(const std::string &s) {};
void pop(void) {};
class BinaryWriter: public Writer<BinaryWriter>
{
public:
BinaryWriter(const std::string &fileName);
virtual ~BinaryWriter(void) = default;
void push(const std::string &s) {};
void pop(void) {};
template <typename U>
void writeDefault(const std::string &s, const U &x);
template <typename U>
void writeDefault(const std::string &s, const std::vector<U> &x);
void writeDefault(const std::string &s, const char *x);
template <typename U>
void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
private:
std::ofstream file_;
};
class BinaryReader: public Reader<BinaryReader>
{
public:
BinaryReader(const std::string &fileName);
virtual ~BinaryReader(void) = default;
bool push(const std::string &s) {return true;}
void pop(void) {};
template <typename U>
void readDefault(const std::string &s, U &output);
template <typename U>
void readDefault(const std::string &s, std::vector<U> &output);
template <typename U>
void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
private:
std::ifstream file_;
};
// Writer template implementation ////////////////////////////////////////////
template <typename U>
void writeDefault(const std::string &s, const U &x);
void BinaryWriter::writeDefault(const std::string &s, const U &x)
{
file_.write((char *)&x, sizeof(U));
}
template <>
void BinaryWriter::writeDefault(const std::string &s, const std::string &x);
template <typename U>
void writeDefault(const std::string &s, const std::vector<U> &x);
void writeDefault(const std::string &s, const char *x);
private:
std::ofstream file_;
};
class BinaryReader: public Reader<BinaryReader>
{
public:
BinaryReader(const std::string &fileName);
virtual ~BinaryReader(void) = default;
bool push(const std::string &s) {return true;}
void pop(void) {};
template <typename U>
void readDefault(const std::string &s, U &output);
template <typename U>
void readDefault(const std::string &s, std::vector<U> &output);
private:
std::ifstream file_;
};
// Writer template implementation ////////////////////////////////////////////
template <typename U>
void BinaryWriter::writeDefault(const std::string &s, const U &x)
{
file_.write((char *)&x, sizeof(U));
}
template <>
void BinaryWriter::writeDefault(const std::string &s, const std::string &x);
template <typename U>
void BinaryWriter::writeDefault(const std::string &s, const std::vector<U> &x)
{
uint64_t sz = x.size();
void BinaryWriter::writeDefault(const std::string &s, const std::vector<U> &x)
{
uint64_t sz = x.size();
write("", sz);
for (uint64_t i = 0; i < sz; ++i)
write("", sz);
for (uint64_t i = 0; i < sz; ++i)
{
write("", x[i]);
}
}
}
// Reader template implementation ////////////////////////////////////////////
template <> void BinaryReader::readDefault(const std::string &s, std::string &output);
template <typename U>
void BinaryWriter::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
{
uint64_t rank = static_cast<uint64_t>( Dimensions.size() );
uint64_t tmp = 1;
for( auto i = 0 ; i < rank ; i++ )
tmp *= Dimensions[i];
assert( tmp == NumElements && "Dimensions don't match size of data being written" );
// Total number of elements
write("", tmp);
// Number of dimensions
write("", rank);
// Followed by each dimension
for( auto i = 0 ; i < rank ; i++ ) {
tmp = Dimensions[i];
write("", tmp);
}
for( auto i = 0; i < NumElements; ++i)
write("", pDataRowMajor[i]);
}
// Reader template implementation ////////////////////////////////////////////
template <typename U>
void BinaryReader::readDefault(const std::string &s, U &output)
{
file_.read((char *)&output, sizeof(U));
}
template <typename U>
void BinaryReader::readDefault(const std::string &s, U &output)
{
file_.read((char *)&output, sizeof(U));
}
template <>
void BinaryReader::readDefault(const std::string &s, std::string &output);
template <typename U>
void BinaryReader::readDefault(const std::string &s, std::vector<U> &output)
{
uint64_t sz;
template <typename U>
void BinaryReader::readDefault(const std::string &s, std::vector<U> &output)
{
uint64_t sz;
read("", sz);
output.resize(sz);
for (uint64_t i = 0; i < sz; ++i)
read("", sz);
output.resize(sz);
for (uint64_t i = 0; i < sz; ++i)
{
read("", output[i]);
}
}
template <typename U>
void BinaryReader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
{
// Number of elements
uint64_t NumElements;
read("", NumElements);
// Number of dimensions
uint64_t rank;
read("", rank);
// Followed by each dimension
uint64_t count = 1;
dim.resize(rank);
uint64_t tmp;
for( auto i = 0 ; i < rank ; i++ ) {
read("", tmp);
dim[i] = tmp;
count *= tmp;
}
assert( count == NumElements && "Dimensions don't match size of data being read" );
buf.resize(count);
for( auto i = 0; i < count; ++i)
read("", buf[i]);
}
}
NAMESPACE_END(Grid);
#endif

View File

@ -3,6 +3,7 @@
#include <stack>
#include <string>
#include <list>
#include <vector>
#include <H5Cpp.h>
#include <Grid/tensors/Tensors.h>
@ -38,6 +39,8 @@ namespace Grid
template <typename U>
typename std::enable_if<!element<std::vector<U>>::is_number, void>::type
writeDefault(const std::string &s, const std::vector<U> &x);
template <typename U>
void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
H5NS::Group & getGroup(void);
private:
template <typename U>
@ -48,7 +51,7 @@ namespace Grid
std::vector<std::string> path_;
H5NS::H5File file_;
H5NS::Group group_;
unsigned int dataSetThres_{HDF5_DEF_DATASET_THRES};
const unsigned int dataSetThres_{HDF5_DEF_DATASET_THRES};
};
class Hdf5Reader: public Reader<Hdf5Reader>
@ -66,6 +69,8 @@ namespace Grid
template <typename U>
typename std::enable_if<!element<std::vector<U>>::is_number, void>::type
readDefault(const std::string &s, std::vector<U> &x);
template <typename U>
void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
H5NS::Group & getGroup(void);
private:
template <typename U>
@ -101,6 +106,75 @@ namespace Grid
template <>
void Hdf5Writer::writeDefault(const std::string &s, const std::string &x);
template <typename U>
void Hdf5Writer::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
{
// Hdf5 needs the dimensions as hsize_t
const int rank = static_cast<int>(Dimensions.size());
std::vector<hsize_t> dim(rank);
for(int i = 0; i < rank; i++)
dim[i] = Dimensions[i];
// write the entire dataset to file
H5NS::DataSpace dataSpace(rank, dim.data());
if (NumElements > dataSetThres_)
{
// Make sure 1) each dimension; and 2) chunk size is < 4GB
const hsize_t MaxElements = ( sizeof( U ) == 1 ) ? 0xffffffff : 0x100000000 / sizeof( U );
hsize_t ElementsPerChunk = 1;
bool bTooBig = false;
for( int i = rank - 1 ; i != -1 ; i-- ) {
auto &d = dim[i];
if( bTooBig )
d = 1; // Chunk size is already as big as can be - remaining dimensions = 1
else {
// If individual dimension too big, reduce by prime factors if possible
while( d > MaxElements && ( d & 1 ) == 0 )
d >>= 1;
const char ErrorMsg[] = " dimension > 4GB and not divisible by 2^n. "
"Hdf5IO chunk size will be inefficient. NB Serialisation is not intended for large datasets - please consider alternatives.";
if( d > MaxElements ) {
std::cout << GridLogWarning << "Individual" << ErrorMsg << std::endl;
hsize_t quotient = d / MaxElements;
if( d % MaxElements )
quotient++;
d /= quotient;
}
// Now make sure overall size is not too big
hsize_t OverflowCheck = ElementsPerChunk;
ElementsPerChunk *= d;
assert( OverflowCheck == ElementsPerChunk / d && "Product of dimensions overflowed hsize_t" );
// If product of dimensions too big, reduce by prime factors
while( ElementsPerChunk > MaxElements && ( ElementsPerChunk & 1 ) == 0 ) {
bTooBig = true;
d >>= 1;
ElementsPerChunk >>= 1;
}
if( ElementsPerChunk > MaxElements ) {
std::cout << GridLogWarning << "Product of" << ErrorMsg << std::endl;
hsize_t quotient = ElementsPerChunk / MaxElements;
if( ElementsPerChunk % MaxElements )
quotient++;
d /= quotient;
ElementsPerChunk /= quotient;
}
}
}
H5NS::DataSet dataSet;
H5NS::DSetCreatPropList plist;
plist.setChunk(rank, dim.data());
plist.setFletcher32();
dataSet = group_.createDataSet(s, Hdf5Type<U>::type(), dataSpace, plist);
dataSet.write(pDataRowMajor, Hdf5Type<U>::type());
}
else
{
H5NS::Attribute attribute;
attribute = group_.createAttribute(s, Hdf5Type<U>::type(), dataSpace);
attribute.write(Hdf5Type<U>::type(), pDataRowMajor);
}
}
template <typename U>
typename std::enable_if<element<std::vector<U>>::is_number, void>::type
Hdf5Writer::writeDefault(const std::string &s, const std::vector<U> &x)
@ -110,34 +184,11 @@ namespace Grid
// flatten the vector and getting dimensions
Flatten<std::vector<U>> flat(x);
std::vector<hsize_t> dim;
std::vector<size_t> dim;
const auto &flatx = flat.getFlatVector();
for (auto &d: flat.getDim())
{
dim.push_back(d);
}
// write to file
H5NS::DataSpace dataSpace(dim.size(), dim.data());
if (flatx.size() > dataSetThres_)
{
H5NS::DataSet dataSet;
H5NS::DSetCreatPropList plist;
plist.setChunk(dim.size(), dim.data());
plist.setFletcher32();
dataSet = group_.createDataSet(s, Hdf5Type<Element>::type(), dataSpace, plist);
dataSet.write(flatx.data(), Hdf5Type<Element>::type());
}
else
{
H5NS::Attribute attribute;
attribute = group_.createAttribute(s, Hdf5Type<Element>::type(), dataSpace);
attribute.write(Hdf5Type<Element>::type(), flatx.data());
}
writeMultiDim<Element>(s, dim, &flatx[0], flatx.size());
}
template <typename U>
@ -173,10 +224,9 @@ namespace Grid
template <>
void Hdf5Reader::readDefault(const std::string &s, std::string &x);
template <typename U>
typename std::enable_if<element<std::vector<U>>::is_number, void>::type
Hdf5Reader::readDefault(const std::string &s, std::vector<U> &x)
void Hdf5Reader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
{
// alias to element type
typedef typename element<std::vector<U>>::type Element;
@ -184,7 +234,6 @@ namespace Grid
// read the dimensions
H5NS::DataSpace dataSpace;
std::vector<hsize_t> hdim;
std::vector<size_t> dim;
hsize_t size = 1;
if (group_.attrExists(s))
@ -204,8 +253,8 @@ namespace Grid
}
// read the flat vector
std::vector<Element> buf(size);
buf.resize(size);
if (size > dataSetThres_)
{
H5NS::DataSet dataSet;
@ -220,7 +269,19 @@ namespace Grid
attribute = group_.openAttribute(s);
attribute.read(Hdf5Type<Element>::type(), buf.data());
}
}
template <typename U>
typename std::enable_if<element<std::vector<U>>::is_number, void>::type
Hdf5Reader::readDefault(const std::string &s, std::vector<U> &x)
{
// alias to element type
typedef typename element<std::vector<U>>::type Element;
std::vector<size_t> dim;
std::vector<Element> buf;
readMultiDim( s, buf, dim );
// reconstruct the multidimensional vector
Reconstruct<std::vector<U>> r(buf, dim);

View File

@ -107,9 +107,10 @@ THE SOFTWARE.
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define GRID_MACRO_MEMBER(A,B) A B;
#define GRID_MACRO_COMP_MEMBER(A,B) result = (result and (lhs. B == rhs. B));
#define GRID_MACRO_OS_WRITE_MEMBER(A,B) os<< #A <<" " #B << " = " << obj. B << " ; " <<std::endl;
#define GRID_MACRO_READ_MEMBER(A,B) ::Grid::read(RD,#B,obj. B);
#define GRID_MACRO_COMP_MEMBER(A,B) result = (result and CompareMember(lhs. B, rhs. B));
#define GRID_MACRO_OS_WRITE_MEMBER(A,B) os<< #A <<" " #B << " = "; WriteMember( os, obj. B ); os << " ; " <<std::endl;
#define GRID_MACRO_READ_MEMBER(A,B) ::Grid::read(RD,#B,obj. B);
#define GRID_MACRO_WRITE_MEMBER(A,B) ::Grid::write(WR,#B,obj. B);
#define GRID_SERIALIZABLE_CLASS_MEMBERS(cname,...)\

View File

@ -51,6 +51,8 @@ namespace Grid
void writeDefault(const std::string &s, const U &x);
template <typename U>
void writeDefault(const std::string &s, const std::vector<U> &x);
template <typename U>
void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
private:
void indent(void);
private:
@ -69,6 +71,8 @@ namespace Grid
void readDefault(const std::string &s, U &output);
template <typename U>
void readDefault(const std::string &s, std::vector<U> &output);
template <typename U>
void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
private:
void checkIndent(void);
private:
@ -95,7 +99,18 @@ namespace Grid
write(s, x[i]);
}
}
template <typename U>
void TextWriter::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
{
uint64_t Rank = Dimensions.size();
write(s, Rank);
for( uint64_t d : Dimensions )
write(s, d);
while( NumElements-- )
write(s, *pDataRowMajor++);
}
// Reader template implementation ////////////////////////////////////////////
template <>
void TextReader::readDefault(const std::string &s, std::string &output);
@ -122,7 +137,22 @@ namespace Grid
}
}
template <typename U>
void TextReader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
{
const char sz[] = "";
uint64_t Rank;
read(sz, Rank);
dim.resize( Rank );
size_t NumElements = 1;
for( auto &d : dim ) {
read(sz, d);
NumElements *= d;
}
buf.resize( NumElements );
for( auto &x : buf )
read(s, x);
}
}

View File

@ -1,3 +1,32 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./Grid/serialisation/VectorUtils.h
Copyright (C) 2015
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_SERIALISATION_VECTORUTILS_H
#define GRID_SERIALISATION_VECTORUTILS_H
@ -53,6 +82,17 @@ namespace Grid {
return os;
}
// std::vector<std:vector<...>> nested to specified Rank //////////////////////////////////
template<typename T, unsigned int Rank>
struct NestedStdVector {
typedef typename std::vector<typename NestedStdVector<T, Rank - 1>::type> type;
};
template<typename T>
struct NestedStdVector<T,0> {
typedef T type;
};
// Grid scalar tensors to nested std::vectors //////////////////////////////////
template <typename T>
struct TensorToVec
@ -436,4 +476,4 @@ std::string vecToStr(const std::vector<T> &v)
return sstr.str();
}
#endif
#endif

View File

@ -57,6 +57,8 @@ namespace Grid
void writeDefault(const std::string &s, const U &x);
template <typename U>
void writeDefault(const std::string &s, const std::vector<U> &x);
template <typename U>
void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
std::string docString(void);
std::string string(void);
private:
@ -78,6 +80,8 @@ namespace Grid
template <typename U>
void readDefault(const std::string &s, U &output);
template <typename U> void readDefault(const std::string &s, std::vector<U> &output);
template <typename U>
void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
void readCurrentSubtree(std::string &s);
private:
@ -122,13 +126,45 @@ namespace Grid
void XmlWriter::writeDefault(const std::string &s, const std::vector<U> &x)
{
push(s);
for (auto &x_i: x)
for( auto &u : x )
{
write("elem", x_i);
write("elem", u);
}
pop();
}
template <typename U>
void XmlWriter::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
{
push(s);
size_t count = 1;
const int Rank = static_cast<int>( Dimensions.size() );
write("rank", Rank );
std::vector<size_t> MyIndex( Rank );
for( auto d : Dimensions ) {
write("dim", d);
count *= d;
}
assert( count == NumElements && "XmlIO : element count doesn't match dimensions" );
static const char sName[] = "tensor";
for( int i = 0 ; i < Rank ; i++ ) {
MyIndex[i] = 0;
push(sName);
}
while (NumElements--) {
write("elem", *pDataRowMajor++);
int i;
for( i = Rank - 1 ; i != -1 && ++MyIndex[i] == Dimensions[i] ; i-- )
MyIndex[i] = 0;
int Rollover = Rank - 1 - i;
for( i = 0 ; i < Rollover ; i++ )
pop();
for( i = 0 ; NumElements && i < Rollover ; i++ )
push(sName);
}
pop();
}
// Reader template implementation ////////////////////////////////////////////
template <> void XmlReader::readDefault(const std::string &s, std::string &output);
template <typename U>
@ -143,25 +179,66 @@ namespace Grid
template <typename U>
void XmlReader::readDefault(const std::string &s, std::vector<U> &output)
{
std::string buf;
unsigned int i = 0;
if (!push(s))
{
std::cout << GridLogWarning << "XML: cannot open node '" << s << "'";
std::cout << std::endl;
return;
} else {
for(unsigned int i = 0; node_.child("elem"); )
{
output.resize(i + 1);
read("elem", output[i++]);
node_.child("elem").set_name("elem-done");
}
pop();
}
}
template <typename U>
void XmlReader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
{
if (!push(s))
{
std::cout << GridLogWarning << "XML: cannot open node '" << s << "'";
std::cout << std::endl;
} else {
static const char sName[] = "tensor";
static const char sNameDone[] = "tensor-done";
int Rank;
read("rank", Rank);
dim.resize( Rank );
size_t NumElements = 1;
for( auto &d : dim )
{
read("dim", d);
node_.child("dim").set_name("dim-done");
NumElements *= d;
}
buf.resize( NumElements );
std::vector<size_t> MyIndex( Rank );
for( int i = 0 ; i < Rank ; i++ ) {
MyIndex[i] = 0;
push(sName);
}
for( auto &x : buf )
{
NumElements--;
read("elem", x);
node_.child("elem").set_name("elem-done");
int i;
for( i = Rank - 1 ; i != -1 && ++MyIndex[i] == dim[i] ; i-- )
MyIndex[i] = 0;
int Rollover = Rank - 1 - i;
for( i = 0 ; i < Rollover ; i++ ) {
node_.set_name(sNameDone);
pop();
}
for( i = 0 ; NumElements && i < Rollover ; i++ )
push(sName);
}
pop();
}
while (node_.child("elem"))
{
output.resize(i + 1);
read("elem", output[i]);
node_.child("elem").set_name("elem-done");
i++;
}
pop();
}
}
#endif

View File

@ -1,652 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/simd/Grid_gpu.h
Copyright (C) 2018
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
//----------------------------------------------------------------------
/*! @file Grid_gpu.h
@brief Optimization libraries for GPU
Use float4, double2
*/
//----------------------------------------------------------------------
#include <cuda_fp16.h>
namespace Grid {
// re im, re, im, re, im etc..
struct half8 {
half ax, ay, az, aw, bx, by, bz, bw;
};
accelerator_inline float half2float(half h)
{
float f;
#ifdef __CUDA_ARCH__
f = __half2float(h);
#else
//f = __half2float(h);
__half_raw hr(h);
Grid_half hh;
hh.x = hr.x;
f= sfw_half_to_float(hh);
#endif
return f;
}
accelerator_inline half float2half(float f)
{
half h;
#ifdef __CUDA_ARCH__
h = __float2half(f);
#else
Grid_half hh = sfw_float_to_half(f);
__half_raw hr;
hr.x = hh.x;
h = __half(hr);
#endif
return h;
}
namespace Optimization {
inline accelerator float4 operator*(float4 a,float4 b) {return make_float4(a.x*b.x,a.y*b.y,a.z*b.z,a.w*b.w);}
inline accelerator float4 operator+(float4 a,float4 b) {return make_float4(a.x+b.x,a.y+b.y,a.z+b.z,a.w+b.w);}
inline accelerator float4 operator-(float4 a,float4 b) {return make_float4(a.x-b.x,a.y-b.y,a.z-b.z,a.w-b.w);}
inline accelerator float4 operator/(float4 a,float4 b) {return make_float4(a.x/b.x,a.y/b.y,a.z/b.z,a.w/b.w);}
inline accelerator double2 operator*(double2 a,double2 b) {return make_double2(a.x*b.x,a.y*b.y);}
inline accelerator double2 operator+(double2 a,double2 b) {return make_double2(a.x+b.x,a.y+b.y);}
inline accelerator double2 operator-(double2 a,double2 b) {return make_double2(a.x-b.x,a.y-b.y);}
inline accelerator double2 operator/(double2 a,double2 b) {return make_double2(a.x/b.x,a.y/b.y);}
inline accelerator int4 operator*(int4 a,int4 b) {return make_int4(a.x*b.x,a.y*b.y,a.z*b.z,a.w*b.w);}
inline accelerator int4 operator+(int4 a,int4 b) {return make_int4(a.x+b.x,a.y+b.y,a.z+b.z,a.w+b.w);}
inline accelerator int4 operator-(int4 a,int4 b) {return make_int4(a.x-b.x,a.y-b.y,a.z-b.z,a.w-b.w);}
inline accelerator int4 operator/(int4 a,int4 b) {return make_int4(a.x/b.x,a.y/b.y,a.z/b.z,a.w/b.w);}
struct Vsplat{
//Complex float
accelerator_inline float4 operator()(float a, float b){
float4 ret;
ret.x=ret.z=a;
ret.y=ret.w=b;
return ret;
}
// Real float
accelerator_inline float4 operator()(float a){
float4 ret;
ret.x=ret.y=ret.z=ret.w = a;
return ret;
}
//Complex double
accelerator_inline double2 operator()(double a, double b){
double2 ret;
ret.x=a;
ret.y=b;
return ret;
}
//Real double
accelerator_inline double2 operator()(double a){
double2 ret;
ret.x = ret.y = a;
return ret;
}
//Integer
accelerator_inline int4 operator()(Integer a){
int4 ret;
ret.x=ret.y=ret.z=ret.w=a;
return ret;
}
};
struct Vstore{
//Float
accelerator_inline void operator()(float4 a, float* F){
float4 *F4 = (float4 *)F;
*F4 = a;
}
//Double
accelerator_inline void operator()(double2 a, double* D){
double2 *D2 = (double2 *)D;
*D2 = a;
}
//Integer
accelerator_inline void operator()(int4 a, Integer* I){
int4 *I4 = (int4 *)I;
*I4 = a;
}
};
struct Vstream{
//Float
accelerator_inline void operator()(float * a, float4 b){
float4 * a4 = (float4 *)a;
*a4 = b;
}
//Double
accelerator_inline void operator()(double * a, double2 b){
double2 * a2 = (double2 *)a;
*a2 = b;
}
};
struct Vset{
// Complex float
accelerator_inline float4 operator()(Grid::ComplexF *a){
float4 ret;
ret.x = a[0].real();
ret.y = a[0].imag();
ret.z = a[1].real();
ret.w = a[1].imag();
return ret;
}
// Complex double
accelerator_inline double2 operator()(Grid::ComplexD *a){
double2 ret;
ret.x = a[0].real();
ret.y = a[0].imag();
return ret;
}
// Real float
accelerator_inline float4 operator()(float *a){
float4 ret;
ret.x = a[0];
ret.y = a[1];
ret.z = a[2];
ret.w = a[3];
return ret;
}
// Real double
accelerator_inline double2 operator()(double *a){
double2 ret;
ret.x = a[0];
ret.y = a[1];
return ret;
}
// Integer
accelerator_inline int4 operator()(Integer *a){
int4 ret;
ret.x = a[0];
ret.y = a[1];
ret.z = a[2];
ret.w = a[3];
return ret;
}
};
template <typename Out_type, typename In_type>
struct Reduce{
//Need templated class to overload output type
//General form must generate error if compiled
accelerator_inline Out_type operator()(In_type in){
printf("Error, using wrong Reduce function\n");
exit(1);
return 0;
}
};
/////////////////////////////////////////////////////
// Arithmetic operations
/////////////////////////////////////////////////////
struct Sum{
//Complex/Real float
accelerator_inline float4 operator()(float4 a, float4 b){
return a+b;
}
//Complex/Real double
accelerator_inline double2 operator()(double2 a, double2 b){
return a+b;
}
//Integer
accelerator_inline int4 operator()(int4 a,int4 b){
return a+b;
}
};
struct Sub{
//Complex/Real float
accelerator_inline float4 operator()(float4 a, float4 b){
return a-b;
}
//Complex/Real double
accelerator_inline double2 operator()(double2 a, double2 b){
return a-b;
}
//Integer
accelerator_inline int4 operator()(int4 a, int4 b){
return a-b;
}
};
struct MultRealPart{
accelerator_inline float4 operator()(float4 a, float4 b){
float4 ymm0;
ymm0.x = a.x;
ymm0.y = a.x;
ymm0.z = a.z;
ymm0.w = a.z;
return ymm0*b;
// ymm0 = _mm_shuffle_ps(a,a,_MM_SELECT_FOUR_FOUR(2,2,0,0)); // ymm0 <- ar ar,
// return _mm_mul_ps(ymm0,b); // ymm0 <- ar bi, ar br
}
accelerator_inline double2 operator()(double2 a, double2 b){
double2 ymm0;
ymm0.x = a.x;
ymm0.y = a.x;
return ymm0*b;
// ymm0 = _mm_shuffle_pd(a,a,0x0); // ymm0 <- ar ar, ar,ar b'00,00
// return _mm_mul_pd(ymm0,b); // ymm0 <- ar bi, ar br
}
};
struct MaddRealPart{
accelerator_inline float4 operator()(float4 a, float4 b, float4 c){
float4 ymm0; // = _mm_shuffle_ps(a,a,_MM_SELECT_FOUR_FOUR(2,2,0,0)); // ymm0 <- ar ar,
ymm0.x = a.x;
ymm0.y = a.x;
ymm0.z = a.z;
ymm0.w = a.z;
return c+ymm0*b;
}
accelerator_inline double2 operator()(double2 a, double2 b, double2 c){
// ymm0 = _mm_shuffle_pd( a, a, 0x0 );
double2 ymm0;
ymm0.x = a.x;
ymm0.y = a.x;
return c+ymm0*b;
}
};
struct MultComplex{
// Complex float
accelerator_inline float4 operator()(float4 a, float4 b){
float4 ymm0;
ymm0.x = a.x*b.x - a.y*b.y ; // rr - ii
ymm0.y = a.x*b.y + a.y*b.x ; // ir + ri
ymm0.z = a.z*b.z - a.w*b.w ; // rr - ii
ymm0.w = a.w*b.z + a.z*b.w ; // ir + ri
return ymm0;
}
// Complex double
accelerator_inline double2 operator()(double2 a, double2 b){
double2 ymm0;
ymm0.x = a.x*b.x - a.y*b.y ; // rr - ii
ymm0.y = a.x*b.y + a.y*b.x ; // ir + ri
return ymm0;
}
};
struct Mult{
accelerator_inline void mac(float4 &a, float4 b, float4 c){
a= a+b*c;
}
accelerator_inline void mac(double2 &a, double2 b, double2 c){
a= a+b*c;
}
// Real float
accelerator_inline float4 operator()(float4 a, float4 b){
return a*b;
}
// Real double
accelerator_inline double2 operator()(double2 a, double2 b){
return a*b;
}
// Integer
accelerator_inline int4 operator()(int4 a, int4 b){
return a*b;
}
};
struct Div{
// Real float
accelerator_inline float4 operator()(float4 a, float4 b){
return a/b;
}
// Real double
accelerator_inline double2 operator()(double2 a, double2 b){
return a/b;
}
};
struct Conj{
// Complex single
accelerator_inline float4 operator()(float4 in){
float4 ret;
ret.x = in.x;
ret.y = - in.y;
ret.z = in.z;
ret.w = - in.w;
return ret;
}
// Complex double
accelerator_inline double2 operator()(double2 in){
double2 ret;
ret.x = in.x;
ret.y = - in.y;
return ret;
}
// do not define for integer input
};
struct TimesMinusI{
//Complex single
accelerator_inline float4 operator()(float4 in, float4 ret){
float4 tmp;
tmp.x = in.y;
tmp.y = - in.x;
tmp.z = in.w;
tmp.w = - in.z;
return tmp;
}
//Complex double
accelerator_inline double2 operator()(double2 in, double2 ret){
double2 tmp;
tmp.x = in.y;
tmp.y = - in.x;
return tmp;
}
};
struct TimesI{
//Complex single
accelerator_inline float4 operator()(float4 in, float4 ret){
float4 tmp;
tmp.x = - in.y;
tmp.y = in.x;
tmp.z = - in.w;
tmp.w = in.z;
return tmp;
}
//Complex double
accelerator_inline double2 operator()(double2 in, double2 ret){
double2 tmp ;
tmp.x = - in.y;
tmp.y = in.x;
return tmp;
}
};
struct Permute{
static accelerator_inline float4 Permute0(float4 in){
float4 tmp;
tmp.x = in.z;
tmp.y = in.w;
tmp.z = in.x;
tmp.w = in.y;
return tmp;
};
static accelerator_inline float4 Permute1(float4 in){
float4 tmp;
tmp.x = in.y;
tmp.y = in.x;
tmp.z = in.w;
tmp.w = in.z;
return tmp;
};
static accelerator_inline float4 Permute2(float4 in){
return in;
};
static accelerator_inline float4 Permute3(float4 in){
return in;
};
static accelerator_inline double2 Permute0(double2 in){ //AB -> BA
double2 tmp;
tmp.x = in.y;
tmp.y = in.x;
return tmp;
};
static accelerator_inline double2 Permute1(double2 in){
return in;
};
static accelerator_inline double2 Permute2(double2 in){
return in;
};
static accelerator_inline double2 Permute3(double2 in){
return in;
};
};
struct PrecisionChange {
static accelerator_inline half8 StoH (float4 a,float4 b) {
half8 h;
h.ax = float2half(a.x);
h.ay = float2half(a.y);
h.az = float2half(a.z);
h.aw = float2half(a.w);
h.bx = float2half(b.x);
h.by = float2half(b.y);
h.bz = float2half(b.z);
h.bw = float2half(b.w);
return h;
}
static accelerator_inline void HtoS (half8 h,float4 &sa,float4 &sb) {
sa.x = half2float(h.ax);
sa.y = half2float(h.ay);
sa.z = half2float(h.az);
sa.w = half2float(h.aw);
sb.x = half2float(h.bx);
sb.y = half2float(h.by);
sb.z = half2float(h.bz);
sb.w = half2float(h.bw);
}
static accelerator_inline float4 DtoS (double2 a,double2 b) {
float4 s;
s.x = a.x;
s.y = a.y;
s.z = b.x;
s.w = b.y;
return s;
}
static accelerator_inline void StoD (float4 s,double2 &a,double2 &b) {
a.x = s.x;
a.y = s.y;
b.x = s.z;
b.y = s.w;
}
static accelerator_inline half8 DtoH (double2 a,double2 b,double2 c,double2 d) {
float4 sa,sb;
sa = DtoS(a,b);
sb = DtoS(c,d);
return StoH(sa,sb);
}
static accelerator_inline void HtoD (half8 h,double2 &a,double2 &b,double2 &c,double2 &d) {
float4 sa,sb;
HtoS(h,sa,sb);
StoD(sa,a,b);
StoD(sb,c,d);
}
};
struct Exchange{
// 3210 ordering
static accelerator_inline void Exchange0(float4 &out1,float4 &out2,float4 in1,float4 in2){
out1.x = in1.x;
out1.y = in1.y;
out1.z = in2.x;
out1.w = in2.y;
out2.x = in1.z;
out2.y = in1.w;
out2.z = in2.z;
out2.w = in2.w;
// out1= _mm_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(1,0,1,0));
// out2= _mm_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,2,3,2));
return;
};
static accelerator_inline void Exchange1(float4 &out1,float4 &out2,float4 in1,float4 in2){
out1.x = in1.x;
out1.y = in2.x;
out1.z = in1.z;
out1.w = in2.z;
out2.x = in1.y;
out2.y = in2.y;
out2.z = in1.w;
out2.w = in2.w;
// out1= _mm_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(2,0,2,0)); /*ACEG*/
// out2= _mm_shuffle_ps(in1,in2,_MM_SELECT_FOUR_FOUR(3,1,3,1)); /*BDFH*/
// out1= _mm_shuffle_ps(out1,out1,_MM_SELECT_FOUR_FOUR(3,1,2,0)); /*AECG*/
// out2= _mm_shuffle_ps(out2,out2,_MM_SELECT_FOUR_FOUR(3,1,2,0)); /*AECG*/
};
static accelerator_inline void Exchange2(float4 &out1,float4 &out2,float4 in1,float4 in2){
assert(0);
return;
};
static accelerator_inline void Exchange3(float4 &out1,float4 &out2,float4 in1,float4 in2){
assert(0);
return;
};
static accelerator_inline void Exchange0(double2 &out1,double2 &out2,double2 in1,double2 in2){
out1.x = in1.x;
out1.y = in2.x;
out2.x = in1.y;
out2.y = in2.y;
// out1= _mm_shuffle_pd(in1,in2,0x0);
// out2= _mm_shuffle_pd(in1,in2,0x3);
};
static accelerator_inline void Exchange1(double2 &out1,double2 &out2,double2 in1,double2 in2){
assert(0);
return;
};
static accelerator_inline void Exchange2(double2 &out1,double2 &out2,double2 in1,double2 in2){
assert(0);
return;
};
static accelerator_inline void Exchange3(double2 &out1,double2 &out2,double2 in1,double2 in2){
assert(0);
return;
};
};
struct Rotate{
static accelerator_inline float4 rotate(float4 in,int n){
float4 ret;
switch(n){
case 0: ret = in ; break;
case 1: ret.x = in.y; ret.y = in.z ; ret.z = in.w ; ret.w = in.x; break;
case 2: ret.x = in.z; ret.y = in.w ; ret.z = in.x ; ret.w = in.y; break;
case 3: ret.x = in.w; ret.y = in.x ; ret.z = in.y ; ret.w = in.z; break;
default: break;
}
return ret;
}
static accelerator_inline double2 rotate(double2 in,int n){
double2 ret;
switch(n){
case 0: ret = in; break;
case 1: ret.x = in.y; ret.y = in.x ; break;
default: break;
}
return ret;
}
template<int n> static accelerator_inline float4 tRotate(float4 in){
return rotate(in,n);
};
template<int n> static accelerator_inline double2 tRotate(double2 in){
return rotate(in,n);
};
};
//////////////////////////////////////////////
// Some Template specialization
//Complex float Reduce
template<>
accelerator_inline Grid::ComplexF Reduce<Grid::ComplexF, float4>::operator()(float4 in){
Grid::ComplexF ret(in.x+in.z,in.y+in.w);
return ret;
}
//Real float Reduce
template<>
accelerator_inline Grid::RealF Reduce<Grid::RealF, float4>::operator()(float4 in){
return in.x+in.y+in.z+in.w;
}
//Complex double Reduce
template<>
accelerator_inline Grid::ComplexD Reduce<Grid::ComplexD, double2>::operator()(double2 in){
return Grid::ComplexD(in.x,in.y);
}
//Real double Reduce
template<>
accelerator_inline Grid::RealD Reduce<Grid::RealD, double2>::operator()(double2 in){
return in.x+in.y;
}
//Integer Reduce
template<>
accelerator_inline Integer Reduce<Integer, int4>::operator()(int4 in){
return in.x+in.y+in.z+in.w;
}
}
//////////////////////////////////////////////////////////////////////////////////////
// Here assign types
//////////////////////////////////////////////////////////////////////////////////////
typedef half8 SIMD_Htype; // Single precision type
typedef float4 SIMD_Ftype; // Single precision type
typedef double2 SIMD_Dtype; // Double precision type
typedef int4 SIMD_Itype; // Integer type
// prefetch utilities
accelerator_inline void v_prefetch0(int size, const char *ptr){};
accelerator_inline void prefetch_HINT_T0(const char *ptr){};
// Function name aliases
typedef Optimization::Vsplat VsplatSIMD;
typedef Optimization::Vstore VstoreSIMD;
typedef Optimization::Vset VsetSIMD;
typedef Optimization::Vstream VstreamSIMD;
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
// Arithmetic operations
typedef Optimization::Sum SumSIMD;
typedef Optimization::Sub SubSIMD;
typedef Optimization::Div DivSIMD;
typedef Optimization::Mult MultSIMD;
typedef Optimization::MultComplex MultComplexSIMD;
typedef Optimization::MultRealPart MultRealPartSIMD;
typedef Optimization::MaddRealPart MaddRealPartSIMD;
typedef Optimization::Conj ConjSIMD;
typedef Optimization::TimesMinusI TimesMinusISIMD;
typedef Optimization::TimesI TimesISIMD;
}

View File

@ -11,6 +11,7 @@ Author: Guido Cossu <cossu@iroiro-pc.kek.jp>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
Author: paboyle <paboyle@ph.ed.ac.uk>
Author: Michael Marshall <michael.marshall@ed.ac.au>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -109,9 +110,6 @@ accelerator_inline Grid_half sfw_float_to_half(float ff) {
#ifdef GPU_VEC
#include "Grid_gpu_vec.h"
#endif
#ifdef GPU
#include "Grid_gpu.h"
#endif
#ifdef GEN
#include "Grid_generic.h"
#endif
@ -162,17 +160,25 @@ template <typename Condition, typename ReturnType> using NotEnableIf = Invoke<st
////////////////////////////////////////////////////////
// Check for complexity with type traits
template <typename T> struct is_complex : public std::false_type {};
template <> struct is_complex<complex<double> > : public std::true_type {};
template <> struct is_complex<complex<float> > : public std::true_type {};
template <> struct is_complex<ComplexD> : public std::true_type {};
template <> struct is_complex<ComplexF> : public std::true_type {};
template <typename T> using IfReal = Invoke<std::enable_if<std::is_floating_point<T>::value, int> >;
template<typename T, typename V=void> struct is_real : public std::false_type {};
template<typename T> struct is_real<T, typename std::enable_if<std::is_floating_point<T>::value,
void>::type> : public std::true_type {};
template<typename T, typename V=void> struct is_integer : public std::false_type {};
template<typename T> struct is_integer<T, typename std::enable_if<std::is_integral<T>::value,
void>::type> : public std::true_type {};
template <typename T> using IfReal = Invoke<std::enable_if<is_real<T>::value, int> >;
template <typename T> using IfComplex = Invoke<std::enable_if<is_complex<T>::value, int> >;
template <typename T> using IfInteger = Invoke<std::enable_if<std::is_integral<T>::value, int> >;
template <typename T> using IfInteger = Invoke<std::enable_if<is_integer<T>::value, int> >;
template <typename T1,typename T2> using IfSame = Invoke<std::enable_if<std::is_same<T1,T2>::value, int> >;
template <typename T> using IfNotReal = Invoke<std::enable_if<!std::is_floating_point<T>::value, int> >;
template <typename T> using IfNotReal = Invoke<std::enable_if<!is_real<T>::value, int> >;
template <typename T> using IfNotComplex = Invoke<std::enable_if<!is_complex<T>::value, int> >;
template <typename T> using IfNotInteger = Invoke<std::enable_if<!std::is_integral<T>::value, int> >;
template <typename T> using IfNotInteger = Invoke<std::enable_if<!is_integer<T>::value, int> >;
template <typename T1,typename T2> using IfNotSame = Invoke<std::enable_if<!std::is_same<T1,T2>::value, int> >;
////////////////////////////////////////////////////////
@ -957,8 +963,10 @@ template <typename T>
struct is_simd : public std::false_type {};
template <> struct is_simd<vRealF> : public std::true_type {};
template <> struct is_simd<vRealD> : public std::true_type {};
template <> struct is_simd<vRealH> : public std::true_type {};
template <> struct is_simd<vComplexF> : public std::true_type {};
template <> struct is_simd<vComplexD> : public std::true_type {};
template <> struct is_simd<vComplexH> : public std::true_type {};
template <> struct is_simd<vInteger> : public std::true_type {};
template <typename T> using IfSimd = Invoke<std::enable_if<is_simd<T>::value, int> >;

View File

@ -5,6 +5,7 @@ Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Michael Marshall <michael.marshall@ed.ac.au>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -21,8 +22,7 @@ See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_MATH_TENSORS_H
#define GRID_MATH_TENSORS_H
#pragma once
NAMESPACE_BEGIN(Grid);
@ -42,27 +42,26 @@ NAMESPACE_BEGIN(Grid);
//
class GridTensorBase {};
// Too late to remove these traits from Grid Tensors, so inherit from GridTypeMapper
#define GridVector_CopyTraits \
using element = vtype; \
using scalar_type = typename Traits::scalar_type; \
using vector_type = typename Traits::vector_type; \
using vector_typeD = typename Traits::vector_typeD; \
using tensor_reduced = typename Traits::tensor_reduced; \
using scalar_object = typename Traits::scalar_object; \
using Complexified = typename Traits::Complexified; \
using Realified = typename Traits::Realified; \
using DoublePrecision = typename Traits::DoublePrecision; \
static constexpr int TensorLevel = Traits::TensorLevel
template <class vtype>
class iScalar {
public:
vtype _internal;
typedef vtype element;
typedef typename GridTypeMapper<vtype>::scalar_type scalar_type;
typedef typename GridTypeMapper<vtype>::vector_type vector_type;
typedef typename GridTypeMapper<vtype>::vector_typeD vector_typeD;
typedef typename GridTypeMapper<vtype>::tensor_reduced tensor_reduced_v;
typedef typename GridTypeMapper<vtype>::scalar_object recurse_scalar_object;
typedef iScalar<tensor_reduced_v> tensor_reduced;
typedef iScalar<recurse_scalar_object> scalar_object;
// substitutes a real or complex version with same tensor structure
typedef iScalar<typename GridTypeMapper<vtype>::Complexified> Complexified;
typedef iScalar<typename GridTypeMapper<vtype>::Realified> Realified;
// get double precision version
typedef iScalar<typename GridTypeMapper<vtype>::DoublePrecision> DoublePrecision;
enum { TensorLevel = GridTypeMapper<vtype>::TensorLevel + 1 };
using Traits = GridTypeMapper<iScalar<vtype> >;
GridVector_CopyTraits;
static accelerator_inline constexpr int Nsimd(void) { return sizeof(vector_type)/sizeof(scalar_type); }
@ -160,6 +159,10 @@ public:
stream << "S {" << o._internal << "}";
return stream;
};
strong_inline const scalar_type * begin() const { return reinterpret_cast<const scalar_type *>(&_internal); }
strong_inline scalar_type * begin() { return reinterpret_cast< scalar_type *>(&_internal); }
strong_inline const scalar_type * end() const { return begin() + Traits::count; }
strong_inline scalar_type * end() { return begin() + Traits::count; }
};
///////////////////////////////////////////////////////////
@ -180,35 +183,22 @@ template <class vtype, int N>
class iVector {
public:
vtype _internal[N];
using Traits = GridTypeMapper<iVector<vtype, N> >;
typedef vtype element;
typedef typename GridTypeMapper<vtype>::scalar_type scalar_type;
typedef typename GridTypeMapper<vtype>::vector_type vector_type;
typedef typename GridTypeMapper<vtype>::vector_typeD vector_typeD;
typedef typename GridTypeMapper<vtype>::tensor_reduced tensor_reduced_v;
typedef typename GridTypeMapper<vtype>::scalar_object recurse_scalar_object;
typedef iScalar<tensor_reduced_v> tensor_reduced;
typedef iVector<recurse_scalar_object, N> scalar_object;
// substitutes a real or complex version with same tensor structure
typedef iVector<typename GridTypeMapper<vtype>::Complexified, N> Complexified;
typedef iVector<typename GridTypeMapper<vtype>::Realified, N> Realified;
// get double precision version
typedef iVector<typename GridTypeMapper<vtype>::DoublePrecision, N> DoublePrecision;
GridVector_CopyTraits;
static accelerator_inline constexpr int Nsimd(void) { return sizeof(vector_type)/sizeof(scalar_type); }
template <class T, typename std::enable_if<!isGridTensor<T>::value, T>::type * = nullptr>
accelerator_inline auto operator=(T arg) -> iVector<vtype, N> {
strong_inline auto operator=(T arg) -> iVector<vtype, N> {
zeroit(*this);
for (int i = 0; i < N; i++) _internal[i] = arg;
return *this;
}
enum { TensorLevel = GridTypeMapper<vtype>::TensorLevel + 1 };
accelerator_inline iVector(const Zero &z) { zeroit(*this); };
accelerator iVector() = default;
accelerator_inline iVector(const Zero &z) { zeroit(*this); };
accelerator_inline iVector<vtype, N> &operator=(const Zero &hero) {
zeroit(*this);
@ -281,6 +271,15 @@ public:
stream << "}";
return stream;
};
// strong_inline vtype && operator ()(int i) {
// return _internal[i];
// }
strong_inline const scalar_type * begin() const { return reinterpret_cast<const scalar_type *>(_internal); }
strong_inline scalar_type * begin() { return reinterpret_cast< scalar_type *>(_internal); }
strong_inline const scalar_type * end() const { return begin() + Traits::count; }
strong_inline scalar_type * end() { return begin() + Traits::count; }
};
template <class vtype, int N>
@ -288,25 +287,9 @@ class iMatrix {
public:
vtype _internal[N][N];
typedef vtype element;
typedef typename GridTypeMapper<vtype>::scalar_type scalar_type;
typedef typename GridTypeMapper<vtype>::vector_type vector_type;
typedef typename GridTypeMapper<vtype>::vector_typeD vector_typeD;
typedef typename GridTypeMapper<vtype>::tensor_reduced tensor_reduced_v;
typedef typename GridTypeMapper<vtype>::scalar_object recurse_scalar_object;
using Traits = GridTypeMapper<iMatrix<vtype, N> >;
// substitutes a real or complex version with same tensor structure
typedef iMatrix<typename GridTypeMapper<vtype>::Complexified, N> Complexified;
typedef iMatrix<typename GridTypeMapper<vtype>::Realified, N> Realified;
// get double precision version
typedef iMatrix<typename GridTypeMapper<vtype>::DoublePrecision, N> DoublePrecision;
// Tensor removal
typedef iScalar<tensor_reduced_v> tensor_reduced;
typedef iMatrix<recurse_scalar_object, N> scalar_object;
enum { TensorLevel = GridTypeMapper<vtype>::TensorLevel + 1 };
GridVector_CopyTraits;
static accelerator_inline constexpr int Nsimd(void) { return sizeof(vector_type)/sizeof(scalar_type); }
@ -428,6 +411,14 @@ public:
return stream;
};
// strong_inline vtype && operator ()(int i,int j) {
// return _internal[i][j];
// }
strong_inline const scalar_type * begin() const { return reinterpret_cast<const scalar_type *>(_internal[0]); }
strong_inline scalar_type * begin() { return reinterpret_cast< scalar_type *>(_internal[0]); }
strong_inline const scalar_type * end() const { return begin() + Traits::count; }
strong_inline scalar_type * end() { return begin() + Traits::count; }
};
template <class v> accelerator_inline
@ -451,4 +442,3 @@ void vprefetch(const iMatrix<v, N> &vv) {
NAMESPACE_END(Grid);
#endif

View File

@ -1,10 +1,11 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/tensors/Tensor_traits.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Christopher Kelly <ckelly@phys.columbia.edu>
Author: Michael Marshall <michael.marshall@ed.ac.au>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@ -17,14 +18,25 @@ Author: Christopher Kelly <ckelly@phys.columbia.edu>
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_MATH_TRAITS_H
#define GRID_MATH_TRAITS_H
#include <type_traits>
NAMESPACE_BEGIN(Grid);
namespace Grid {
// Forward declarations
template<class T> class iScalar;
template<class T, int N> class iVector;
template<class T, int N> class iMatrix;
// These are the Grid tensors
template<typename T> struct isGridTensor : public std::false_type { static constexpr bool notvalue = true; };
template<class T> struct isGridTensor<iScalar<T>> : public std::true_type { static constexpr bool notvalue = false; };
template<class T, int N> struct isGridTensor<iVector<T, N>> : public std::true_type { static constexpr bool notvalue = false; };
template<class T, int N> struct isGridTensor<iMatrix<T, N>> : public std::true_type { static constexpr bool notvalue = false; };
//////////////////////////////////////////////////////////////////////////////////
// Want to recurse: GridTypeMapper<Matrix<vComplexD> >::scalar_type == ComplexD.
@ -40,258 +52,236 @@ NAMESPACE_BEGIN(Grid);
// to study C++11's type_traits.h file. (std::enable_if<isGridTensorType<vtype> >)
//
//////////////////////////////////////////////////////////////////////////////////
template <class T> class GridTypeMapper {
public:
typedef typename T::scalar_type scalar_type;
typedef typename T::vector_type vector_type;
typedef typename T::vector_typeD vector_typeD;
typedef typename T::tensor_reduced tensor_reduced;
typedef typename T::scalar_object scalar_object;
typedef typename T::Complexified Complexified;
typedef typename T::Realified Realified;
typedef typename T::DoublePrecision DoublePrecision;
enum { TensorLevel = T::TensorLevel };
};
// This saves repeating common properties for supported Grid Scalar types
// TensorLevel How many nested grid tensors
// Rank Rank of the grid tensor
// count Total number of elements, i.e. product of dimensions
// Dimension(dim) Size of dimension dim
struct GridTypeMapper_Base {
static constexpr int TensorLevel = 0;
static constexpr int Rank = 0;
static constexpr std::size_t count = 1;
static constexpr int Dimension(int dim) { return 0; }
};
//////////////////////////////////////////////////////////////////////////////////
// Recursion stops with these template specialisations
//////////////////////////////////////////////////////////////////////////////////
template<> class GridTypeMapper<RealF> {
public:
typedef RealF scalar_type;
typedef RealF vector_type;
typedef RealD vector_typeD;
typedef RealF tensor_reduced ;
typedef RealF scalar_object;
typedef ComplexF Complexified;
typedef RealF Realified;
typedef RealD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> class GridTypeMapper<RealD> {
public:
typedef RealD scalar_type;
typedef RealD vector_type;
typedef RealD vector_typeD;
typedef RealD tensor_reduced;
typedef RealD scalar_object;
typedef ComplexD Complexified;
typedef RealD Realified;
typedef RealD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> class GridTypeMapper<ComplexF> {
public:
typedef ComplexF scalar_type;
typedef ComplexF vector_type;
typedef ComplexD vector_typeD;
typedef ComplexF tensor_reduced;
typedef ComplexF scalar_object;
typedef ComplexF Complexified;
typedef RealF Realified;
typedef ComplexD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> class GridTypeMapper<ComplexD> {
public:
typedef ComplexD scalar_type;
typedef ComplexD vector_type;
typedef ComplexD vector_typeD;
typedef ComplexD tensor_reduced;
typedef ComplexD scalar_object;
typedef ComplexD Complexified;
typedef RealD Realified;
typedef ComplexD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> class GridTypeMapper<Integer> {
public:
typedef Integer scalar_type;
typedef Integer vector_type;
typedef Integer vector_typeD;
typedef Integer tensor_reduced;
typedef Integer scalar_object;
typedef void Complexified;
typedef void Realified;
typedef void DoublePrecision;
enum { TensorLevel = 0 };
};
template<> class GridTypeMapper<vRealF> {
public:
typedef RealF scalar_type;
typedef vRealF vector_type;
typedef vRealD vector_typeD;
typedef vRealF tensor_reduced;
typedef RealF scalar_object;
typedef vComplexF Complexified;
typedef vRealF Realified;
typedef vRealD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> class GridTypeMapper<vRealD> {
public:
typedef RealD scalar_type;
typedef vRealD vector_type;
typedef vRealD vector_typeD;
typedef vRealD tensor_reduced;
typedef RealD scalar_object;
typedef vComplexD Complexified;
typedef vRealD Realified;
typedef vRealD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> class GridTypeMapper<vComplexH> {
public:
typedef ComplexF scalar_type;
typedef vComplexH vector_type;
typedef vComplexD vector_typeD;
typedef vComplexH tensor_reduced;
typedef ComplexF scalar_object;
typedef vComplexH Complexified;
typedef vRealH Realified;
typedef vComplexD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> class GridTypeMapper<vComplexF> {
public:
typedef ComplexF scalar_type;
typedef vComplexF vector_type;
typedef vComplexD vector_typeD;
typedef vComplexF tensor_reduced;
typedef ComplexF scalar_object;
typedef vComplexF Complexified;
typedef vRealF Realified;
typedef vComplexD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> class GridTypeMapper<vComplexD> {
public:
typedef ComplexD scalar_type;
typedef vComplexD vector_type;
typedef vComplexD vector_typeD;
typedef vComplexD tensor_reduced;
typedef ComplexD scalar_object;
typedef vComplexD Complexified;
typedef vRealD Realified;
typedef vComplexD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> class GridTypeMapper<vInteger> {
public:
typedef Integer scalar_type;
typedef vInteger vector_type;
typedef vInteger vector_typeD;
typedef vInteger tensor_reduced;
typedef Integer scalar_object;
typedef void Complexified;
typedef void Realified;
typedef void DoublePrecision;
enum { TensorLevel = 0 };
};
template<typename T> struct GridTypeMapper {};
// First some of my own traits
template<typename T> struct isGridTensor {
static const bool value = true;
static const bool notvalue = false;
};
template<> struct isGridTensor<int > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<RealD > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<RealF > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<ComplexD > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<ComplexF > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<Integer > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<vRealD > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<vRealF > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<vComplexD > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<vComplexF > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<vInteger > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct GridTypeMapper<RealF> : public GridTypeMapper_Base {
typedef RealF scalar_type;
typedef RealF vector_type;
typedef RealD vector_typeD;
typedef RealF tensor_reduced ;
typedef RealF scalar_object;
typedef ComplexF Complexified;
typedef RealF Realified;
typedef RealD DoublePrecision;
};
template<> struct GridTypeMapper<RealD> : public GridTypeMapper_Base {
typedef RealD scalar_type;
typedef RealD vector_type;
typedef RealD vector_typeD;
typedef RealD tensor_reduced;
typedef RealD scalar_object;
typedef ComplexD Complexified;
typedef RealD Realified;
typedef RealD DoublePrecision;
};
template<> struct GridTypeMapper<ComplexF> : public GridTypeMapper_Base {
typedef ComplexF scalar_type;
typedef ComplexF vector_type;
typedef ComplexD vector_typeD;
typedef ComplexF tensor_reduced;
typedef ComplexF scalar_object;
typedef ComplexF Complexified;
typedef RealF Realified;
typedef ComplexD DoublePrecision;
};
template<> struct GridTypeMapper<ComplexD> : public GridTypeMapper_Base {
typedef ComplexD scalar_type;
typedef ComplexD vector_type;
typedef ComplexD vector_typeD;
typedef ComplexD tensor_reduced;
typedef ComplexD scalar_object;
typedef ComplexD Complexified;
typedef RealD Realified;
typedef ComplexD DoublePrecision;
};
template<> struct GridTypeMapper<Integer> : public GridTypeMapper_Base {
typedef Integer scalar_type;
typedef Integer vector_type;
typedef Integer vector_typeD;
typedef Integer tensor_reduced;
typedef Integer scalar_object;
typedef void Complexified;
typedef void Realified;
typedef void DoublePrecision;
};
// Match the index
template<typename T,int Level> struct matchGridTensorIndex {
static const bool value = (Level==T::TensorLevel);
static const bool notvalue = (Level!=T::TensorLevel);
};
// What is the vtype
template<typename T> struct isComplex {
static const bool value = false;
};
template<> struct isComplex<ComplexF> {
static const bool value = true;
};
template<> struct isComplex<ComplexD> {
static const bool value = true;
};
template<> struct GridTypeMapper<vRealF> : public GridTypeMapper_Base {
typedef RealF scalar_type;
typedef vRealF vector_type;
typedef vRealD vector_typeD;
typedef vRealF tensor_reduced;
typedef RealF scalar_object;
typedef vComplexF Complexified;
typedef vRealF Realified;
typedef vRealD DoublePrecision;
};
template<> struct GridTypeMapper<vRealD> : public GridTypeMapper_Base {
typedef RealD scalar_type;
typedef vRealD vector_type;
typedef vRealD vector_typeD;
typedef vRealD tensor_reduced;
typedef RealD scalar_object;
typedef vComplexD Complexified;
typedef vRealD Realified;
typedef vRealD DoublePrecision;
};
template<> struct GridTypeMapper<vRealH> : public GridTypeMapper_Base {
// Fixme this is incomplete until Grid supports fp16 or bfp16 arithmetic types
typedef RealF scalar_type;
typedef vRealH vector_type;
typedef vRealD vector_typeD;
typedef vRealH tensor_reduced;
typedef RealF scalar_object;
typedef vComplexH Complexified;
typedef vRealH Realified;
typedef vRealD DoublePrecision;
};
template<> struct GridTypeMapper<vComplexH> : public GridTypeMapper_Base {
// Fixme this is incomplete until Grid supports fp16 or bfp16 arithmetic types
typedef ComplexF scalar_type;
typedef vComplexH vector_type;
typedef vComplexD vector_typeD;
typedef vComplexH tensor_reduced;
typedef ComplexF scalar_object;
typedef vComplexH Complexified;
typedef vRealH Realified;
typedef vComplexD DoublePrecision;
};
template<> struct GridTypeMapper<vComplexF> : public GridTypeMapper_Base {
typedef ComplexF scalar_type;
typedef vComplexF vector_type;
typedef vComplexD vector_typeD;
typedef vComplexF tensor_reduced;
typedef ComplexF scalar_object;
typedef vComplexF Complexified;
typedef vRealF Realified;
typedef vComplexD DoublePrecision;
};
template<> struct GridTypeMapper<vComplexD> : public GridTypeMapper_Base {
typedef ComplexD scalar_type;
typedef vComplexD vector_type;
typedef vComplexD vector_typeD;
typedef vComplexD tensor_reduced;
typedef ComplexD scalar_object;
typedef vComplexD Complexified;
typedef vRealD Realified;
typedef vComplexD DoublePrecision;
};
template<> struct GridTypeMapper<vInteger> : public GridTypeMapper_Base {
typedef Integer scalar_type;
typedef vInteger vector_type;
typedef vInteger vector_typeD;
typedef vInteger tensor_reduced;
typedef Integer scalar_object;
typedef void Complexified;
typedef void Realified;
typedef void DoublePrecision;
};
//Get the SIMD vector type from a Grid tensor or Lattice<Tensor>
template<typename T>
struct getVectorType{
typedef T type;
};
#define GridTypeMapper_RepeatedTypes \
using BaseTraits = GridTypeMapper<T>; \
using scalar_type = typename BaseTraits::scalar_type; \
using vector_type = typename BaseTraits::vector_type; \
using vector_typeD = typename BaseTraits::vector_typeD; \
static constexpr int TensorLevel = BaseTraits::TensorLevel + 1
template<typename T> struct GridTypeMapper<iScalar<T>> {
GridTypeMapper_RepeatedTypes;
using tensor_reduced = iScalar<typename BaseTraits::tensor_reduced>;
using scalar_object = iScalar<typename BaseTraits::scalar_object>;
using Complexified = iScalar<typename BaseTraits::Complexified>;
using Realified = iScalar<typename BaseTraits::Realified>;
using DoublePrecision = iScalar<typename BaseTraits::DoublePrecision>;
static constexpr int Rank = BaseTraits::Rank + 1;
static constexpr std::size_t count = BaseTraits::count;
static constexpr int Dimension(int dim) {
return ( dim == 0 ) ? 1 : BaseTraits::Dimension(dim - 1); }
};
template<typename T, int N> struct GridTypeMapper<iVector<T, N>> {
GridTypeMapper_RepeatedTypes;
using tensor_reduced = iScalar<typename BaseTraits::tensor_reduced>;
using scalar_object = iVector<typename BaseTraits::scalar_object, N>;
using Complexified = iVector<typename BaseTraits::Complexified, N>;
using Realified = iVector<typename BaseTraits::Realified, N>;
using DoublePrecision = iVector<typename BaseTraits::DoublePrecision, N>;
static constexpr int Rank = BaseTraits::Rank + 1;
static constexpr std::size_t count = BaseTraits::count * N;
static constexpr int Dimension(int dim) {
return ( dim == 0 ) ? N : BaseTraits::Dimension(dim - 1); }
};
template<typename T, int N> struct GridTypeMapper<iMatrix<T, N>> {
GridTypeMapper_RepeatedTypes;
using tensor_reduced = iScalar<typename BaseTraits::tensor_reduced>;
using scalar_object = iMatrix<typename BaseTraits::scalar_object, N>;
using Complexified = iMatrix<typename BaseTraits::Complexified, N>;
using Realified = iMatrix<typename BaseTraits::Realified, N>;
using DoublePrecision = iMatrix<typename BaseTraits::DoublePrecision, N>;
static constexpr int Rank = BaseTraits::Rank + 2;
static constexpr std::size_t count = BaseTraits::count * N * N;
static constexpr int Dimension(int dim) {
return ( dim == 0 || dim == 1 ) ? N : BaseTraits::Dimension(dim - 2); }
};
// Match the index
template<typename T,int Level> struct matchGridTensorIndex {
static const bool value = (Level==T::TensorLevel);
static const bool notvalue = (Level!=T::TensorLevel);
};
// What is the vtype
template<typename T> struct isComplex {
static const bool value = false;
};
template<> struct isComplex<ComplexF> {
static const bool value = true;
};
template<> struct isComplex<ComplexD> {
static const bool value = true;
};
//Get the SIMD vector type from a Grid tensor or Lattice<Tensor>
template<typename T>
struct getVectorType{
typedef T type;
};
//Query if a tensor or Lattice<Tensor> is SIMD vector or scalar
template<typename T>
class isSIMDvectorized{
template<typename U>
static typename std::enable_if<
!std::is_same< typename GridTypeMapper<typename getVectorType<U>::type>::scalar_type,
typename GridTypeMapper<typename getVectorType<U>::type>::vector_type>::value,
char>::type test(void *);
//Query whether a tensor or Lattice<Tensor> is SIMD vector or scalar
template<typename T, typename V=void> struct isSIMDvectorized : public std::false_type {};
template<typename U> struct isSIMDvectorized<U, typename std::enable_if< !std::is_same<
typename GridTypeMapper<typename getVectorType<U>::type>::scalar_type,
typename GridTypeMapper<typename getVectorType<U>::type>::vector_type>::value, void>::type>
: public std::true_type {};
template<typename U> static double test(...);
public:
enum {value = sizeof(test<T>(0)) == sizeof(char) };
};
//Get the precision of a Lattice, tensor or scalar type in units of sizeof(float)
template<typename T>
class getPrecision{
public:
//get the vector_obj (i.e. a grid Tensor) if its a Lattice<vobj>, do nothing otherwise (i.e. if fundamental or grid Tensor)
typedef typename getVectorType<T>::type vector_obj;
typedef typename GridTypeMapper<vector_obj>::scalar_type scalar_type; //get the associated scalar type. Works on fundamental and tensor types
typedef typename GridTypeMapper<scalar_type>::Realified real_scalar_type; //remove any std::complex wrapper, should get us to the fundamental type
enum { value = sizeof(real_scalar_type)/sizeof(float) };
};
NAMESPACE_END(Grid);
//Get the precision of a Lattice, tensor or scalar type in units of sizeof(float)
template<typename T>
class getPrecision{
public:
//get the vector_obj (i.e. a grid Tensor) if its a Lattice<vobj>, do nothing otherwise (i.e. if fundamental or grid Tensor)
typedef typename getVectorType<T>::type vector_obj;
typedef typename GridTypeMapper<vector_obj>::scalar_type scalar_type; //get the associated scalar type. Works on fundamental and tensor types
typedef typename GridTypeMapper<scalar_type>::Realified real_scalar_type; //remove any std::complex wrapper, should get us to the fundamental type
enum { value = sizeof(real_scalar_type)/sizeof(float) };
};
}
#endif

View File

@ -272,6 +272,11 @@ void GridBanner(void)
std::cout << "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the"<<std::endl;
std::cout << "GNU General Public License for more details."<<std::endl;
printHash();
#ifdef GRID_BUILD_REF
#define _GRID_BUILD_STR(x) #x
#define GRID_BUILD_STR(x) _GRID_BUILD_STR(x)
std::cout << "Build " << GRID_BUILD_STR(GRID_BUILD_REF) << std::endl;
#endif
std::cout << std::endl;
printed=1;
}
@ -419,7 +424,6 @@ void Grid_init(int *argc,char ***argv)
MemoryProfiler::stats = &dbgMemStats;
}
////////////////////////////////////
// Logging
////////////////////////////////////

6
HMC/Makefile.am Normal file
View File

@ -0,0 +1,6 @@
SUBDIRS = .
include Make.inc

198
HMC/Mobius2p1f.cc Normal file
View File

@ -0,0 +1,198 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_hmc_EODWFRatio.cc
Copyright (C) 2015-2016
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
Author: Guido Cossu <guido.cossu@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
int main(int argc, char **argv) {
using namespace Grid;
using namespace Grid::QCD;
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
// Typedefs to simplify notation
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
// MD.name = std::string("Force Gradient");
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
MD.name = std::string("MinimumNorm2");
MD.MDsteps = 20;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 0;
HMCparams.Trajectories = 200;
HMCparams.NoMetropolisUntil= 20;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
HMCparams.StartingType =std::string("ColdStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_EODWF_lat";
CPparams.rng_prefix = "ckpoint_EODWF_rng";
CPparams.saveInterval = 10;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
// Construct observables
// here there is too much indirection
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.04;
Real pv_mass = 1.0;
RealD M5 = 1.8;
RealD b = 1.0; // Scale factor two
RealD c = 0.0;
OneFlavourRationalParams OFRp;
OFRp.lo = 1.0e-2;
OFRp.hi = 64;
OFRp.MaxIter = 10000;
OFRp.tolerance= 1.0e-10;
OFRp.degree = 14;
OFRp.precision= 40;
std::vector<Real> hasenbusch({ 0.1 });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
IwasakiGaugeActionR GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeField U(GridPtr);
// These lines are unecessary if BC are all periodic
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
double StoppingCondition = 1e-10;
double MaxCGIterations = 30000;
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(4);
////////////////////////////////////
// Strange action
////////////////////////////////////
// FermionAction StrangeOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_mass,M5,b,c, Params);
// DomainWallEOFAFermionR Strange_Op_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5);
// DomainWallEOFAFermionR Strange_Op_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5);
// ExactOneFlavourRatioPseudoFermionAction EOFA(Strange_Op_L,Strange_Op_R,CG,ofp, false);
FermionAction StrangeOp (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,strange_mass,M5,b,c, Params);
FermionAction StrangePauliVillarsOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,pv_mass, M5,b,c, Params);
// OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion(StrangePauliVillarsOp,StrangeOp,OFRp);
OneFlavourRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion(StrangePauliVillarsOp,StrangeOp,OFRp);
// TwoFlavourRationalTesterPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion1F(StrangeOp,OFRp);
// TwoFlavourPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion2F(StrangeOp,CG,CG);
// Level1.push_back(&StrangePseudoFermion2F);
// Level1.push_back(&StrangePseudoFermion);
////////////////////////////////////
// up down action
////////////////////////////////////
std::vector<Real> light_den;
std::vector<Real> light_num;
int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass);
for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]);
light_num.push_back(hasenbusch[h]);
}
light_num.push_back(pv_mass);
std::vector<FermionAction *> Numerators;
std::vector<FermionAction *> Denominators;
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
for(int h=0;h<n_hasenbusch+1;h++){
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h] << " / " << light_den[h]<< std::endl;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],CG,CG));
}
for(int h=0;h<n_hasenbusch+1;h++){
Level1.push_back(Quotients[h]);
}
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level2.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
std::cout << GridLogMessage << " Action complete "<< std::endl;
/////////////////////////////////////////////////////////////
// HMC parameters are serialisable
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.Run(); // no smearing
Grid_finalize();
} // main

449
HMC/Mobius2p1fEOFA.cc Normal file
View File

@ -0,0 +1,449 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file:
Copyright (C) 2015-2016
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
Author: Guido Cossu
Author: David Murphy
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#define MIXED_PRECISION
namespace Grid{
namespace QCD{
/*
* Need a plan for gauge field update for mixed precision in HMC (2x speed up)
* -- Store the single prec action operator.
* -- Clone the gauge field from the operator function argument.
* -- Build the mixed precision operator dynamically from the passed operator and single prec clone.
*/
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
public:
typedef typename FermionOperatorD::FermionField FieldD;
typedef typename FermionOperatorF::FermionField FieldF;
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
Integer MaxOuterIterations;
GridBase* SinglePrecGrid4; //Grid for single-precision fields
GridBase* SinglePrecGrid5; //Grid for single-precision fields
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
FermionOperatorF &FermOpF;
FermionOperatorD &FermOpD;;
SchurOperatorF &LinOpF;
SchurOperatorD &LinOpD;
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
MixedPrecisionConjugateGradientOperatorFunction(RealD tol,
Integer maxinnerit,
Integer maxouterit,
GridBase* _sp_grid4,
GridBase* _sp_grid5,
FermionOperatorF &_FermOpF,
FermionOperatorD &_FermOpD,
SchurOperatorF &_LinOpF,
SchurOperatorD &_LinOpD):
LinOpF(_LinOpF),
LinOpD(_LinOpD),
FermOpF(_FermOpF),
FermOpD(_FermOpD),
Tolerance(tol),
InnerTolerance(tol),
MaxInnerIterations(maxinnerit),
MaxOuterIterations(maxouterit),
SinglePrecGrid4(_sp_grid4),
SinglePrecGrid5(_sp_grid5),
OuterLoopNormMult(100.)
{
/* Debugging instances of objects; references are stored
std::cout << GridLogMessage << " Mixed precision CG wrapper LinOpF " <<std::hex<< &LinOpF<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper LinOpD " <<std::hex<< &LinOpD<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper FermOpF " <<std::hex<< &FermOpF<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper FermOpD " <<std::hex<< &FermOpD<<std::dec <<std::endl;
*/
};
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
// std::cout << GridLogMessage << " Mixed precision CG wrapper operator() FermOpU " <<std::hex<< &(SchurOpU->_Mat)<<std::dec <<std::endl;
// std::cout << GridLogMessage << " Mixed precision CG wrapper operator() FermOpD " <<std::hex<< &(LinOpD._Mat) <<std::dec <<std::endl;
// Assumption made in code to extract gauge field
// We could avoid storing LinopD reference alltogether ?
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
////////////////////////////////////////////////////////////////////////////////////
// Must snarf a single precision copy of the gauge field in Linop_d argument
////////////////////////////////////////////////////////////////////////////////////
typedef typename FermionOperatorF::GaugeField GaugeFieldF;
typedef typename FermionOperatorF::GaugeLinkField GaugeLinkFieldF;
typedef typename FermionOperatorD::GaugeField GaugeFieldD;
typedef typename FermionOperatorD::GaugeLinkField GaugeLinkFieldD;
GridBase * GridPtrF = SinglePrecGrid4;
GridBase * GridPtrD = FermOpD.Umu._grid;
GaugeFieldF U_f (GridPtrF);
GaugeLinkFieldF Umu_f(GridPtrF);
// std::cout << " Dim gauge field "<<GridPtrF->Nd()<<std::endl; // 4d
// std::cout << " Dim gauge field "<<GridPtrD->Nd()<<std::endl; // 4d
////////////////////////////////////////////////////////////////////////////////////
// Moving this to a Clone method of fermion operator would allow to duplicate the
// physics parameters and decrease gauge field copies
////////////////////////////////////////////////////////////////////////////////////
GaugeLinkFieldD Umu_d(GridPtrD);
for(int mu=0;mu<Nd*2;mu++){
Umu_d = PeekIndex<LorentzIndex>(FermOpD.Umu, mu);
precisionChange(Umu_f,Umu_d);
PokeIndex<LorentzIndex>(FermOpF.Umu, Umu_f, mu);
}
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
////////////////////////////////////////////////////////////////////////////////////
// Could test to make sure that LinOpF and LinOpD agree to single prec?
////////////////////////////////////////////////////////////////////////////////////
/*
GridBase *Fgrid = psi._grid;
FieldD tmp2(Fgrid);
FieldD tmp1(Fgrid);
LinOpU.Op(src,tmp1);
LinOpD.Op(src,tmp2);
std::cout << " Double gauge field "<< norm2(FermOpD.Umu)<<std::endl;
std::cout << " Single gauge field "<< norm2(FermOpF.Umu)<<std::endl;
std::cout << " Test of operators "<<norm2(tmp1)<<std::endl;
std::cout << " Test of operators "<<norm2(tmp2)<<std::endl;
tmp1=tmp1-tmp2;
std::cout << " Test of operators diff "<<norm2(tmp1)<<std::endl;
*/
////////////////////////////////////////////////////////////////////////////////////
// Make a mixed precision conjugate gradient
////////////////////////////////////////////////////////////////////////////////////
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid5,LinOpF,LinOpD);
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient" <<std::endl;
MPCG(src,psi);
}
};
}};
int main(int argc, char **argv) {
using namespace Grid;
using namespace Grid::QCD;
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
// Typedefs to simplify notation
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef MobiusFermionF FermionActionF;
typedef MobiusEOFAFermionR FermionEOFAAction;
typedef MobiusEOFAFermionF FermionEOFAActionF;
typedef typename FermionAction::FermionField FermionField;
typedef typename FermionActionF::FermionField FermionFieldF;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
typedef GenericHMCRunner<ForceGradient> HMCWrapper;
MD.name = std::string("Force Gradient");
// typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
// MD.name = std::string("MinimumNorm2");
MD.MDsteps = 6;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 590;
HMCparams.Trajectories = 1000;
HMCparams.NoMetropolisUntil= 0;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
// HMCparams.StartingType =std::string("ColdStart");
HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_EODWF_lat";
CPparams.rng_prefix = "ckpoint_EODWF_rng";
CPparams.saveInterval = 10;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
// Construct observables
// here there is too much indirection
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.04;
Real pv_mass = 1.0;
RealD M5 = 1.8;
RealD b = 1.0;
RealD c = 0.0;
std::vector<Real> hasenbusch({ 0.1, 0.3, 0.6 });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
std::vector<int> latt = GridDefaultLatt();
std::vector<int> mpi = GridDefaultMpi();
std::vector<int> simdF = GridDefaultSimd(Nd,vComplexF::Nsimd());
std::vector<int> simdD = GridDefaultSimd(Nd,vComplexD::Nsimd());
auto GridPtrF = SpaceTimeGrid::makeFourDimGrid(latt,simdF,mpi);
auto GridRBPtrF = SpaceTimeGrid::makeFourDimRedBlackGrid(GridPtrF);
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrF);
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrF);
IwasakiGaugeActionR GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeField U(GridPtr);
LatticeGaugeFieldF UF(GridPtrF);
// These lines are unecessary if BC are all periodic
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
FermionActionF::ImplParams ParamsF(boundary);
double ActionStoppingCondition = 1e-10;
double DerivativeStoppingCondition = 1e-6;
double MaxCGIterations = 30000;
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(8);
////////////////////////////////////
// Strange action
////////////////////////////////////
typedef SchurDiagMooeeOperator<FermionActionF,FermionFieldF> LinearOperatorF;
typedef SchurDiagMooeeOperator<FermionAction ,FermionField > LinearOperatorD;
typedef SchurDiagMooeeOperator<FermionEOFAActionF,FermionFieldF> LinearOperatorEOFAF;
typedef SchurDiagMooeeOperator<FermionEOFAAction ,FermionField > LinearOperatorEOFAD;
typedef MixedPrecisionConjugateGradientOperatorFunction<MobiusFermionD,MobiusFermionF,LinearOperatorD,LinearOperatorF> MxPCG;
typedef MixedPrecisionConjugateGradientOperatorFunction<MobiusEOFAFermionD,MobiusEOFAFermionF,LinearOperatorEOFAD,LinearOperatorEOFAF> MxPCG_EOFA;
// DJM: setup for EOFA ratio (Mobius)
OneFlavourRationalParams OFRp;
OFRp.lo = 0.1;
OFRp.hi = 25.0;
OFRp.MaxIter = 10000;
OFRp.tolerance= 1.0e-9;
OFRp.degree = 14;
OFRp.precision= 50;
MobiusEOFAFermionR Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionF Strange_Op_LF(UF, *FGridF, *FrbGridF, *GridPtrF, *GridRBPtrF, strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionR Strange_Op_R (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
MobiusEOFAFermionF Strange_Op_RF(UF, *FGridF, *FrbGridF, *GridPtrF, *GridRBPtrF, pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
ConjugateGradient<FermionField> ActionCG(ActionStoppingCondition,MaxCGIterations);
ConjugateGradient<FermionField> DerivativeCG(DerivativeStoppingCondition,MaxCGIterations);
#ifdef MIXED_PRECISION
const int MX_inner = 1000;
// Mixed precision EOFA
LinearOperatorEOFAD Strange_LinOp_L (Strange_Op_L);
LinearOperatorEOFAD Strange_LinOp_R (Strange_Op_R);
LinearOperatorEOFAF Strange_LinOp_LF(Strange_Op_LF);
LinearOperatorEOFAF Strange_LinOp_RF(Strange_Op_RF);
MxPCG_EOFA ActionCGL(ActionStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_LF,Strange_Op_L,
Strange_LinOp_LF,Strange_LinOp_L);
MxPCG_EOFA DerivativeCGL(DerivativeStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_LF,Strange_Op_L,
Strange_LinOp_LF,Strange_LinOp_L);
MxPCG_EOFA ActionCGR(ActionStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_RF,Strange_Op_R,
Strange_LinOp_RF,Strange_LinOp_R);
MxPCG_EOFA DerivativeCGR(DerivativeStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_RF,Strange_Op_R,
Strange_LinOp_RF,Strange_LinOp_R);
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy>
EOFA(Strange_Op_L, Strange_Op_R,
ActionCG,
ActionCGL, ActionCGR,
DerivativeCGL, DerivativeCGR,
OFRp, true);
#else
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy>
EOFA(Strange_Op_L, Strange_Op_R,
ActionCG, ActionCG,
DerivativeCG, DerivativeCG,
OFRp, true);
#endif
Level1.push_back(&EOFA);
////////////////////////////////////
// up down action
////////////////////////////////////
std::vector<Real> light_den;
std::vector<Real> light_num;
int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass);
for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]);
light_num.push_back(hasenbusch[h]);
}
light_num.push_back(pv_mass);
//////////////////////////////////////////////////////////////
// Forced to replicate the MxPCG and DenominatorsF etc.. because
// there is no convenient way to "Clone" physics params from double op
// into single op for any operator pair.
// Same issue prevents using MxPCG in the Heatbath step
//////////////////////////////////////////////////////////////
std::vector<FermionAction *> Numerators;
std::vector<FermionAction *> Denominators;
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
std::vector<MxPCG *> ActionMPCG;
std::vector<MxPCG *> MPCG;
std::vector<FermionActionF *> DenominatorsF;
std::vector<LinearOperatorD *> LinOpD;
std::vector<LinearOperatorF *> LinOpF;
for(int h=0;h<n_hasenbusch+1;h++){
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h] << " / " << light_den[h]<< std::endl;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
#ifdef MIXED_PRECISION
////////////////////////////////////////////////////////////////////////////
// Mixed precision CG for 2f force
////////////////////////////////////////////////////////////////////////////
DenominatorsF.push_back(new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[h],M5,b,c, ParamsF));
LinOpD.push_back(new LinearOperatorD(*Denominators[h]));
LinOpF.push_back(new LinearOperatorF(*DenominatorsF[h]));
MPCG.push_back(new MxPCG(DerivativeStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
*DenominatorsF[h],*Denominators[h],
*LinOpF[h], *LinOpD[h]) );
ActionMPCG.push_back(new MxPCG(ActionStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
*DenominatorsF[h],*Denominators[h],
*LinOpF[h], *LinOpD[h]) );
// Heatbath not mixed yet. As inverts numerators not so important as raised mass.
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],*MPCG[h],*ActionMPCG[h],ActionCG));
#else
////////////////////////////////////////////////////////////////////////////
// Standard CG for 2f force
////////////////////////////////////////////////////////////////////////////
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],DerivativeCG,ActionCG));
#endif
}
for(int h=0;h<n_hasenbusch+1;h++){
Level1.push_back(Quotients[h]);
}
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level2.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
std::cout << GridLogMessage << " Action complete "<< std::endl;
/////////////////////////////////////////////////////////////
// HMC parameters are serialisable
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.Run(); // no smearing
Grid_finalize();
} // main

198
HMC/Mobius2p1fRHMC.cc Normal file
View File

@ -0,0 +1,198 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_hmc_EODWFRatio.cc
Copyright (C) 2015-2016
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
Author: Guido Cossu <guido.cossu@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
int main(int argc, char **argv) {
using namespace Grid;
using namespace Grid::QCD;
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
// Typedefs to simplify notation
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
// MD.name = std::string("Force Gradient");
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
MD.name = std::string("MinimumNorm2");
MD.MDsteps = 20;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 30;
HMCparams.Trajectories = 200;
HMCparams.NoMetropolisUntil= 0;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
// HMCparams.StartingType =std::string("ColdStart");
HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_EODWF_lat";
CPparams.rng_prefix = "ckpoint_EODWF_rng";
CPparams.saveInterval = 10;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
// Construct observables
// here there is too much indirection
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.04;
Real pv_mass = 1.0;
RealD M5 = 1.8;
RealD b = 1.0;
RealD c = 0.0;
// FIXME:
// Same in MC and MD
// Need to mix precision too
OneFlavourRationalParams OFRp;
OFRp.lo = 4.0e-3;
OFRp.hi = 30.0;
OFRp.MaxIter = 10000;
OFRp.tolerance= 1.0e-10;
OFRp.degree = 16;
OFRp.precision= 50;
std::vector<Real> hasenbusch({ 0.1 });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
IwasakiGaugeActionR GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeField U(GridPtr);
// These lines are unecessary if BC are all periodic
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
double StoppingCondition = 1e-10;
double MaxCGIterations = 30000;
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(4);
////////////////////////////////////
// Strange action
////////////////////////////////////
// FermionAction StrangeOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_mass,M5,b,c, Params);
// DomainWallEOFAFermionR Strange_Op_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5);
// DomainWallEOFAFermionR Strange_Op_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5);
// ExactOneFlavourRatioPseudoFermionAction EOFA(Strange_Op_L,Strange_Op_R,CG,ofp, false);
FermionAction StrangeOp (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,strange_mass,M5,b,c, Params);
FermionAction StrangePauliVillarsOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,pv_mass, M5,b,c, Params);
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion(StrangePauliVillarsOp,StrangeOp,OFRp);
Level1.push_back(&StrangePseudoFermion);
////////////////////////////////////
// up down action
////////////////////////////////////
std::vector<Real> light_den;
std::vector<Real> light_num;
int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass);
for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]);
light_num.push_back(hasenbusch[h]);
}
light_num.push_back(pv_mass);
std::vector<FermionAction *> Numerators;
std::vector<FermionAction *> Denominators;
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
for(int h=0;h<n_hasenbusch+1;h++){
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h] << " / " << light_den[h]<< std::endl;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],CG,CG));
}
for(int h=0;h<n_hasenbusch+1;h++){
Level1.push_back(Quotients[h]);
}
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level2.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
std::cout << GridLogMessage << " Action complete "<< std::endl;
/////////////////////////////////////////////////////////////
// HMC parameters are serialisable
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.Run(); // no smearing
Grid_finalize();
} // main

109
HMC/README Normal file
View File

@ -0,0 +1,109 @@
********************************************************************
TODO:
********************************************************************
i) Got mixed precision in 2f and EOFA force and action solves.
But need mixed precision in the heatbath solve. Best for Fermop to have a "clone" method, to
reduce the number of solver and action objects. Needed ideally for the EOFA heatbath.
15% perhaps
Combine with 2x trajectory length?
ii) Rational on EOFA HB -- relax order
-- Test the approx as per David email
Resume / roll.sh
----------------------------------------------------------------
- 16^3 Currently 10 traj per hour
- EOFA use a different derivative solver from action solver
- EOFA fix Davids hack to the SchurRedBlack guessing
*** Reduce precision/tolerance in EOFA with second CG param. (10% speed up)
*** Force gradient - reduced precision solve for the gradient (4/3x speedup)
*** Need a plan for gauge field update for mixed precision in HMC (2x speed up)
-- Store the single prec action operator.
-- Clone the gauge field from the operator function argument.
-- Build the mixed precision operator dynamically from the passed operator and single prec clone.
*** Mixed precision CG into EOFA portion
*** Further reduce precision in forces to 10^-6 ?
*** Overall: a 3x or so is still possible => 500s -> 160s and 20 traj per hour on 16^3.
- Use mixed precision CG in HMC
- SchurRedBlack.h: stop use of operator function; use LinearOperator or similar instead.
- Or make an OperatorFunction for mixed precision as a wrapper
********************************************************************
* Signed off 2+1f HMC with Hasenbush and strange RHMC 16^3 x 32 DWF Ls=16 Plaquette 0.5883 ish
* Signed off 2+1f HMC with Hasenbush and strange EOFA 16^3 x 32 DWF Ls=16 Plaquette 0.5883 ish
* Wilson plaquette cross checked against CPS and literature GwilsonFnone
********************************************************************
********************************************************************
* RHMC: Timesteps & eigenranges matched from previous CPS 16^3 x 32 runs:
********************************************************************
****
Strange (m=0.04) has eigenspan
****
16^3 done as 1+1+1 with separate PV's.
/dirac1/archive/QCDOC/host/QCDDWF/DWF/2+1f/16nt32/IWASAKI/b2.13/ls16/M1_8/ms0.04/mu0.01/rhmc_multitimescale/evol5/work
****
2+1f 16^3 - [ 4e^-4, 2.42 ] for strange
****
24^3 done as 1+1+1 at strange, and single quotient https://arxiv.org/pdf/0804.0473.pdf Eq 83,
****
double lambda_low = 4.0000000000000002e-04 <- strange
double lambda_low = 1.0000000000000000e-02 <- pauli villars
And high = 2.5
Array bsn_mass[3] = {
double bsn_mass[0] = 1.0000000000000000e+00
double bsn_mass[1] = 1.0000000000000000e+00
double bsn_mass[2] = 1.0000000000000000e+00
}
Array frm_mass[3] = {
double frm_mass[0] = 4.0000000000000001e-02
double frm_mass[1] = 4.0000000000000001e-02
double frm_mass[2] = 4.0000000000000001e-02
}
***
32^3
/dirac1/archive/QCDOC/host/QCDDWF/DWF/2+1f/32nt64/IWASAKI/b2.25/ls16/M1_8/ms0.03/mu0.004/evol6/work
***
Similar det scheme
double lambda_low = 4.0000000000000002e-04
double lambda_low = 1.0000000000000000e-02
Array bsn_mass[3] = {
double bsn_mass[0] = 1.0000000000000000e+00
double bsn_mass[1] = 1.0000000000000000e+00
double bsn_mass[2] = 1.0000000000000000e+00
}
Array frm_mass[3] = {
double frm_mass[0] = 3.0000000000000002e-02
double frm_mass[1] = 3.0000000000000002e-02
double frm_mass[2] = 3.0000000000000002e-02
}
********************************************************************
* Grid: Power method bounds check
********************************************************************
- Finding largest eigenvalue approx 25 not 2.5
- Conventions:
Grid MpcDagMpc based on:
(Moo-Moe Mee^-1 Meo)^dag(Moo-Moe Mee^-1 Meo)
- with Moo = 5-M5 = 3.2
- CPS use(d) Moo = 1
- Eigenrange in Grid is 3.2^2 rescaled so factor of 10 accounted for

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/A2AMatrix.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/A2AVectors.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: fionnoh <fionnoh@gmail.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Application.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
@ -31,7 +31,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Hadrons/Modules.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
#define BIG_SEP "================"
@ -48,28 +48,32 @@ Application::Application(void)
{
initLogger();
auto dim = GridDefaultLatt(), mpi = GridDefaultMpi(), loc(dim);
locVol_ = 1;
for (unsigned int d = 0; d < dim.size(); ++d)
if (dim.size())
{
loc[d] /= mpi[d];
locVol_ *= loc[d];
locVol_ = 1;
for (unsigned int d = 0; d < dim.size(); ++d)
{
loc[d] /= mpi[d];
locVol_ *= loc[d];
}
LOG(Message) << "====== HADRONS APPLICATION INITIALISATION ======" << std::endl;
LOG(Message) << "** Dimensions" << std::endl;
LOG(Message) << "Global lattice: " << dim << std::endl;
LOG(Message) << "MPI partition : " << mpi << std::endl;
LOG(Message) << "Local lattice : " << loc << std::endl;
LOG(Message) << std::endl;
LOG(Message) << "** Default parameters (and associated C macros)" << std::endl;
LOG(Message) << "ASCII output precision : " << MACOUT(DEFAULT_ASCII_PREC) << std::endl;
LOG(Message) << "Fermion implementation : " << MACOUTS(FIMPLBASE) << std::endl;
LOG(Message) << "z-Fermion implementation: " << MACOUTS(ZFIMPLBASE) << std::endl;
LOG(Message) << "Scalar implementation : " << MACOUTS(SIMPLBASE) << std::endl;
LOG(Message) << "Gauge implementation : " << MACOUTS(GIMPLBASE) << std::endl;
LOG(Message) << "Eigenvector base size : "
<< MACOUT(HADRONS_DEFAULT_LANCZOS_NBASIS) << std::endl;
LOG(Message) << "Schur decomposition : " << MACOUTS(HADRONS_DEFAULT_SCHUR) << std::endl;
LOG(Message) << std::endl;
}
LOG(Message) << "====== HADRONS APPLICATION INITIALISATION ======" << std::endl;
LOG(Message) << "** Dimensions" << std::endl;
LOG(Message) << "Global lattice: " << dim << std::endl;
LOG(Message) << "MPI partition : " << mpi << std::endl;
LOG(Message) << "Local lattice : " << loc << std::endl;
LOG(Message) << std::endl;
LOG(Message) << "** Default parameters (and associated C macros)" << std::endl;
LOG(Message) << "ASCII output precision : " << MACOUT(DEFAULT_ASCII_PREC) << std::endl;
LOG(Message) << "Fermion implementation : " << MACOUTS(FIMPLBASE) << std::endl;
LOG(Message) << "z-Fermion implementation: " << MACOUTS(ZFIMPLBASE) << std::endl;
LOG(Message) << "Scalar implementation : " << MACOUTS(SIMPLBASE) << std::endl;
LOG(Message) << "Gauge implementation : " << MACOUTS(GIMPLBASE) << std::endl;
LOG(Message) << "Eigenvector base size : "
<< MACOUT(HADRONS_DEFAULT_LANCZOS_NBASIS) << std::endl;
LOG(Message) << "Schur decomposition : " << MACOUTS(HADRONS_DEFAULT_SCHUR) << std::endl;
LOG(Message) << std::endl;
}
Application::Application(const Application::GlobalPar &par)
@ -114,7 +118,22 @@ void Application::run(void)
vm().setRunId(getPar().runId);
vm().printContent();
env().printContent();
schedule();
if (getPar().saveSchedule or getPar().scheduleFile.empty())
{
schedule();
if (getPar().saveSchedule)
{
std::string filename;
filename = (getPar().scheduleFile.empty()) ?
"hadrons.sched" : getPar().scheduleFile;
saveSchedule(filename);
}
}
else
{
loadSchedule(getPar().scheduleFile);
}
printSchedule();
if (!getPar().graphFile.empty())
{
@ -161,29 +180,30 @@ void Application::parseParameterFile(const std::string parameterFileName)
pop(reader);
}
void Application::saveParameterFile(const std::string parameterFileName)
void Application::saveParameterFile(const std::string parameterFileName, unsigned int prec)
{
LOG(Message) << "Saving application to '" << parameterFileName << "'..." << std::endl;
if (env().getGrid()->IsBoss())
{
XmlWriter writer(parameterFileName);
ObjectId id;
const unsigned int nMod = vm().getNModule();
write(writer, "parameters", getPar());
push(writer, "modules");
for (unsigned int i = 0; i < nMod; ++i)
{
push(writer, "module");
id.name = vm().getModuleName(i);
id.type = vm().getModule(i)->getRegisteredName();
write(writer, "id", id);
vm().getModule(i)->saveParameters(writer, "options");
XmlWriter writer(parameterFileName);
writer.setPrecision(prec);
ObjectId id;
const unsigned int nMod = vm().getNModule();
write(writer, "parameters", getPar());
push(writer, "modules");
for (unsigned int i = 0; i < nMod; ++i)
{
push(writer, "module");
id.name = vm().getModuleName(i);
id.type = vm().getModule(i)->getRegisteredName();
write(writer, "id", id);
vm().getModule(i)->saveParameters(writer, "options");
pop(writer);
}
pop(writer);
pop(writer);
}
pop(writer);
pop(writer);
}
}
// schedule computation ////////////////////////////////////////////////////////
@ -202,20 +222,20 @@ void Application::saveSchedule(const std::string filename)
<< std::endl;
if (env().getGrid()->IsBoss())
{
TextWriter writer(filename);
std::vector<std::string> program;
if (!scheduled_)
{
TextWriter writer(filename);
std::vector<std::string> program;
if (!scheduled_)
{
HADRONS_ERROR(Definition, "Computation not scheduled");
}
}
for (auto address: program_)
{
program.push_back(vm().getModuleName(address));
for (auto address: program_)
{
program.push_back(vm().getModuleName(address));
}
write(writer, "schedule", program);
}
write(writer, "schedule", program);
}
}
void Application::loadSchedule(const std::string filename)

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Application.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
@ -57,6 +57,8 @@ public:
VirtualMachine::GeneticPar, genetic,
std::string, runId,
std::string, graphFile,
std::string, scheduleFile,
bool, saveSchedule,
int, parallelWriteMaxRetry);
GlobalPar(void): parallelWriteMaxRetry{-1} {}
};
@ -79,7 +81,7 @@ public:
void run(void);
// XML parameter file I/O
void parseParameterFile(const std::string parameterFileName);
void saveParameterFile(const std::string parameterFileName);
void saveParameterFile(const std::string parameterFileName, unsigned int prec=15);
// schedule computation
void schedule(void);
void saveSchedule(const std::string filename);

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MScalar/ScalarVP.cc
Source file: Hadrons/Archive/Modules/ScalarVP.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: James Harrison <jch1g10@soton.ac.uk>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MScalar/ScalarVP.hpp
Source file: Hadrons/Archive/Modules/ScalarVP.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: James Harrison <jch1g10@soton.ac.uk>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MUtilities/TestSeqConserved.cc
Source file: Hadrons/Archive/Modules/TestSeqConserved.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MUtilities/TestSeqConserved.hpp
Source file: Hadrons/Archive/Modules/TestSeqConserved.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MUtilities/TestSeqGamma.cc
Source file: Hadrons/Archive/Modules/TestSeqGamma.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MUtilities/TestSeqGamma.hpp
Source file: Hadrons/Archive/Modules/TestSeqGamma.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MScalar/VPCounterTerms.cc
Source file: Hadrons/Archive/Modules/VPCounterTerms.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: James Harrison <jch1g10@soton.ac.uk>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MScalar/VPCounterTerms.hpp
Source file: Hadrons/Archive/Modules/VPCounterTerms.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: James Harrison <jch1g10@soton.ac.uk>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WardIdentity.cc
Source file: Hadrons/Archive/Modules/WardIdentity.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WardIdentity.hpp
Source file: Hadrons/Archive/Modules/WardIdentity.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WeakHamiltonian.hpp
Source file: Hadrons/Archive/Modules/WeakHamiltonian.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WeakHamiltonianEye.cc
Source file: Hadrons/Archive/Modules/WeakHamiltonianEye.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp
Source file: Hadrons/Archive/Modules/WeakHamiltonianEye.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc
Source file: Hadrons/Archive/Modules/WeakHamiltonianNonEye.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp
Source file: Hadrons/Archive/Modules/WeakHamiltonianNonEye.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc
Source file: Hadrons/Archive/Modules/WeakNeutral4ptDisc.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>

View File

@ -2,9 +2,9 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp
Source file: Hadrons/Archive/Modules/WeakNeutral4ptDisc.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>

View File

@ -4,10 +4,11 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/DilutedNoise.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Vera Guelpers <Vera.Guelpers@ed.ac.uk>
Author: Vera Guelpers <vmg1n14@soton.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/DiskVector.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
@ -395,12 +395,26 @@ void DiskVectorBase<T>::cacheInsert(const unsigned int i, const T &obj) const
auto &freeInd = *freePtr_;
auto &loads = *loadsPtr_;
evict();
index[i] = freeInd.top();
freeInd.pop();
cache[index.at(i)] = obj;
loads.push_back(i);
modified[index.at(i)] = false;
// cache miss, evict and store
if (index.find(i) == index.end())
{
evict();
index[i] = freeInd.top();
freeInd.pop();
cache[index.at(i)] = obj;
loads.push_back(i);
modified[index.at(i)] = false;
}
// cache hit, modify current value
else
{
auto pos = std::find(loads.begin(), loads.end(), i);
cache[index.at(i)] = obj;
modified[index.at(i)] = true;
loads.erase(pos);
loads.push_back(i);
}
#ifdef DV_DEBUG
std::string msg;

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/EigenPack.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
@ -308,7 +308,9 @@ template <typename FineF, typename CoarseF,
class CoarseEigenPack: public EigenPack<FineF, FineFIo>
{
public:
typedef CoarseF CoarseField;
typedef CoarseF CoarseField;
typedef CoarseFIo CoarseFieldIo;
public:
std::vector<CoarseF> evecCoarse;
std::vector<RealD> evalCoarse;
public:

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Environment.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
@ -45,13 +45,11 @@ Environment::Environment(void)
{
dim_ = GridDefaultLatt().toVector();
nd_ = dim_.size();
createGrid<vComplex>(1);
vol_ = 1.;
for (auto d: dim_)
{
vol_ *= d;
}
rng4d_.reset(new GridParallelRNG(getGrid()));
}
// grids ///////////////////////////////////////////////////////////////////////
@ -76,8 +74,13 @@ double Environment::getVolume(void) const
}
// random number generator /////////////////////////////////////////////////////
GridParallelRNG * Environment::get4dRng(void) const
GridParallelRNG * Environment::get4dRng(void)
{
if (rng4d_ == nullptr)
{
rng4d_.reset(new GridParallelRNG(getGrid()));
}
return rng4d_.get();
}

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Environment.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
@ -113,7 +113,7 @@ public:
unsigned int getNd(void) const;
double getVolume(void) const;
// random number generator
GridParallelRNG * get4dRng(void) const;
GridParallelRNG * get4dRng(void);
// general memory management
void addObject(const std::string name,
const int moduleAddress = -1);
@ -182,7 +182,7 @@ private:
std::map<CoarseGridKey, GridPt> gridCoarse5d_;
unsigned int nd_;
// random number generator
RngPt rng4d_;
RngPt rng4d_{nullptr};
// object store
std::vector<ObjInfo> object_;
std::map<std::string, unsigned int> objectAddress_;

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Exceptions.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Exceptions.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Factory.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/GeneticScheduler.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Global.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Global.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>
@ -44,6 +44,8 @@ See the full license in the file "LICENSE" in the top level distribution directo
#define DEFAULT_ASCII_PREC 16
#endif
#define ARG(...) __VA_ARGS__
/* the 'using Grid::operator<<;' statement prevents a very nasty compilation
* error with GCC 5 (clang & GCC 6 compile fine without it).
*/
@ -100,15 +102,17 @@ BEGIN_HADRONS_NAMESPACE
typedef typename Impl::Field ScalarField##suffix;\
typedef typename Impl::PropagatorField PropagatorField##suffix;\
typedef typename Impl::SitePropagator::scalar_object SitePropagator##suffix;\
typedef std::vector<SitePropagator##suffix> SlicedPropagator##suffix;
typedef typename Impl::ComplexField ComplexField##suffix;\
typedef std::vector<SitePropagator##suffix> SlicedPropagator##suffix;\
typedef std::vector<typename ComplexField##suffix::vector_object::scalar_object> SlicedComplex##suffix;
#define FERM_TYPE_ALIASES(FImpl, suffix)\
BASIC_TYPE_ALIASES(FImpl, suffix);\
typedef FermionOperator<FImpl> FMat##suffix;\
typedef typename FImpl::FermionField FermionField##suffix;\
typedef typename FImpl::GaugeField GaugeField##suffix;\
typedef typename FImpl::DoubledGaugeField DoubledGaugeField##suffix;\
typedef typename FImpl::ComplexField ComplexField##suffix;
typedef FermionOperator<FImpl> FMat##suffix;\
typedef typename FImpl::FermionField FermionField##suffix;\
typedef typename FImpl::GaugeField GaugeField##suffix;\
typedef typename FImpl::DoubledGaugeField DoubledGaugeField##suffix;\
typedef Lattice<iSpinMatrix<typename FImpl::Simd>> SpinMatrixField##suffix;
#define GAUGE_TYPE_ALIASES(GImpl, suffix)\
typedef typename GImpl::GaugeField GaugeField##suffix;
@ -262,6 +266,15 @@ void tokenReplace(std::string &str, const std::string token,
}
}
// generic correlator class
template <typename Metadata, typename Scalar = Complex>
struct Correlator: Serializable
{
GRID_SERIALIZABLE_CLASS_MEMBERS(ARG(Correlator<Metadata, Scalar>),
Metadata, info,
std::vector<Complex>, corr);
};
END_HADRONS_NAMESPACE
#include <Hadrons/Exceptions.hpp>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Graph.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Module.cc
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Module.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
@ -65,7 +65,6 @@ static ns##mod##ModuleRegistrar ns##mod##ModuleRegistrarInstance;
extern template class base;\
MODULE_REGISTER(mod, ARG(base), ns);
#define ARG(...) __VA_ARGS__
#define HADRONS_MACRO_REDIRECT_12(arg1, arg2, macro, ...) macro
#define HADRONS_MACRO_REDIRECT_23(arg1, arg2, arg3, macro, ...) macro
@ -78,6 +77,15 @@ env().template getGrid<typename latticeType::vector_type>(Ls)
#define envGetGrid(...)\
HADRONS_MACRO_REDIRECT_12(__VA_ARGS__, envGetGrid5, envGetGrid4)(__VA_ARGS__)
#define envGetCoarseGrid4(latticeType, blockSize)\
env().template getCoarseGrid<typename latticeType::vector_type>(blockSize)
#define envGetCoarseGrid5(latticeType, blockSize, Ls)\
env().template getCoarseGrid<typename latticeType::vector_type>(blockSize, Ls)
#define envGetCoarseGrid(...)\
HADRONS_MACRO_REDIRECT_23(__VA_ARGS__, envGetCoarseGrid5, envGetCoarseGrid4)(__VA_ARGS__)
#define envGetRbGrid4(latticeType)\
env().template getRbGrid<typename latticeType::vector_type>()

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/ModuleFactory.hpp
Copyright (C) 2015-2018
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -1,17 +1,18 @@
#include <Hadrons/Modules/MContraction/WeakEye3pt.hpp>
#include <Hadrons/Modules/MContraction/Baryon.hpp>
#include <Hadrons/Modules/MContraction/A2AAslashField.hpp>
#include <Hadrons/Modules/MContraction/A2ALoop.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonField.hpp>
#include <Hadrons/Modules/MContraction/Meson.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonian.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp>
#include <Hadrons/Modules/MContraction/DiscLoop.hpp>
#include <Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp>
#include <Hadrons/Modules/MContraction/Gamma3pt.hpp>
#include <Hadrons/Modules/MContraction/WardIdentity.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp>
#include <Hadrons/Modules/MContraction/WeakMesonDecayKl2.hpp>
#include <Hadrons/Modules/MContraction/WeakNonEye3pt.hpp>
#include <Hadrons/Modules/MFermion/FreeProp.hpp>
#include <Hadrons/Modules/MFermion/GaugeProp.hpp>
#include <Hadrons/Modules/MFermion/EMLepton.hpp>
#include <Hadrons/Modules/MSource/SeqGamma.hpp>
#include <Hadrons/Modules/MSource/SeqAslash.hpp>
#include <Hadrons/Modules/MSource/Point.hpp>
#include <Hadrons/Modules/MSource/Wall.hpp>
#include <Hadrons/Modules/MSource/Z2.hpp>
@ -21,28 +22,23 @@
#include <Hadrons/Modules/MSink/Point.hpp>
#include <Hadrons/Modules/MSolver/MixedPrecisionRBPrecCG.hpp>
#include <Hadrons/Modules/MSolver/LocalCoherenceLanczos.hpp>
#include <Hadrons/Modules/MSolver/A2AAslashVectors.hpp>
#include <Hadrons/Modules/MSolver/Guesser.hpp>
#include <Hadrons/Modules/MSolver/RBPrecCG.hpp>
#include <Hadrons/Modules/MSolver/A2AVectors.hpp>
#include <Hadrons/Modules/MSolver/A2AAslashVectors.hpp>
#include <Hadrons/Modules/MGauge/UnitEm.hpp>
#include <Hadrons/Modules/MGauge/StoutSmearing.hpp>
#include <Hadrons/Modules/MGauge/Unit.hpp>
#include <Hadrons/Modules/MGauge/Electrify.hpp>
#include <Hadrons/Modules/MGauge/Random.hpp>
#include <Hadrons/Modules/MGauge/GaugeFix.hpp>
#include <Hadrons/Modules/MGauge/FundtoHirep.hpp>
#include <Hadrons/Modules/MGauge/StochEm.hpp>
#include <Hadrons/Modules/MGauge/Electrify.hpp>
#include <Hadrons/Modules/MNoise/TimeDilutedSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MNoise/FullVolumeSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MUtilities/PrecisionCast.hpp>
#include <Hadrons/Modules/MUtilities/RandomVectors.hpp>
#include <Hadrons/Modules/MUtilities/TestSeqGamma.hpp>
#include <Hadrons/Modules/MUtilities/TestSeqConserved.hpp>
#include <Hadrons/Modules/MLoop/NoiseLoop.hpp>
#include <Hadrons/Modules/MScalar/FreeProp.hpp>
#include <Hadrons/Modules/MScalar/VPCounterTerms.hpp>
#include <Hadrons/Modules/MScalar/ScalarVP.hpp>
#include <Hadrons/Modules/MScalar/Scalar.hpp>
#include <Hadrons/Modules/MScalar/ChargedProp.hpp>
#include <Hadrons/Modules/MNPR/Bilinear.hpp>
@ -56,7 +52,6 @@
#include <Hadrons/Modules/MAction/ScaledDWF.hpp>
#include <Hadrons/Modules/MScalarSUN/StochFreeField.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPointNPR.hpp>
#include <Hadrons/Modules/MScalarSUN/ShiftProbe.hpp>
#include <Hadrons/Modules/MScalarSUN/Div.hpp>
#include <Hadrons/Modules/MScalarSUN/TrMag.hpp>
#include <Hadrons/Modules/MScalarSUN/EMT.hpp>

Some files were not shown because too many files have changed in this diff Show More