1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-07-04 15:37:05 +01:00

Compare commits

..

3 Commits

Author SHA1 Message Date
de8b2dcca3 Hadrons: faster A2A matrix load 2019-01-11 16:12:49 +00:00
efe000341d Hadrons: contractor fixes 2019-01-11 16:12:16 +00:00
11086c5c25 Hadrons: first stab at MPI contractor 2019-01-10 16:29:57 +00:00
289 changed files with 3118 additions and 6436 deletions

1
.gitignore vendored
View File

@ -114,4 +114,3 @@ gh-pages/
#####################
Grid/qcd/spin/gamma-gen/*.h
Grid/qcd/spin/gamma-gen/*.cc
Grid/util/Version.h

View File

@ -1,5 +0,0 @@
Version : 0.8.0
- Clang 3.5 and above, ICPC v16 and above, GCC 6.3 and above recommended
- MPI and MPI3 comms optimisations for KNL and OPA finished
- Half precision comms

View File

@ -42,7 +42,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#include <Grid/GridQCDcore.h>
#include <Grid/qcd/action/Action.h>
#include <Grid/qcd/utils/GaugeFix.h>
#include <Grid/qcd/utils/CovariantSmearing.h>
#include <Grid/qcd/smearing/Smearing.h>
#include <Grid/parallelIO/MetaData.h>
#include <Grid/qcd/hmc/HMC_aggregate.h>

View File

@ -55,9 +55,13 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/algorithms/iterative/FlexibleCommunicationAvoidingGeneralisedMinimalResidual.h>
#include <Grid/algorithms/iterative/MixedPrecisionFlexibleGeneralisedMinimalResidual.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
#include <Grid/algorithms/iterative/PowerMethod.h>
#include <Grid/algorithms/CoarsenedMatrix.h>
#include <Grid/algorithms/FFT.h>
// EigCg
// Pcg
// Hdcg
// GCR
// etc..
#endif

View File

@ -178,7 +178,7 @@ namespace Grid {
//////////////////////////////////////////////////////////
template<class Field>
class SchurOperatorBase : public LinearOperatorBase<Field> {
class SchurOperatorBase : public LinearOperatorBase<Field> {
public:
virtual RealD Mpc (const Field &in, Field &out) =0;
virtual RealD MpcDag (const Field &in, Field &out) =0;
@ -211,9 +211,10 @@ namespace Grid {
}
};
template<class Matrix,class Field>
class SchurDiagMooeeOperator : public SchurOperatorBase<Field> {
public:
class SchurDiagMooeeOperator : public SchurOperatorBase<Field> {
protected:
Matrix &_Mat;
public:
SchurDiagMooeeOperator (Matrix &Mat): _Mat(Mat){};
virtual RealD Mpc (const Field &in, Field &out) {
Field tmp(in._grid);

View File

@ -60,7 +60,7 @@ namespace Grid {
// Query the even even properties to make algorithmic decisions
//////////////////////////////////////////////////////////////////////
virtual RealD Mass(void) { return 0.0; };
virtual int ConstEE(void) { return 1; }; // Disable assumptions unless overridden
virtual int ConstEE(void) { return 0; }; // Disable assumptions unless overridden
virtual int isTrivialEE(void) { return 0; }; // by a derived class that knows better
// half checkerboard operaions

View File

@ -89,8 +89,6 @@ class ConjugateGradient : public OperatorFunction<Field> {
// Check if guess is really REALLY good :)
if (cp <= rsq) {
std::cout << GridLogMessage << "ConjugateGradient guess is converged already " << std::endl;
IterationsToComplete = 0;
return;
}
@ -106,7 +104,7 @@ class ConjugateGradient : public OperatorFunction<Field> {
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations; k++) {
for (k = 1; k <= MaxIterations*1000; k++) {
c = cp;
MatrixTimer.Start();
@ -167,7 +165,8 @@ class ConjugateGradient : public OperatorFunction<Field> {
return;
}
}
std::cout << GridLogMessage << "ConjugateGradient did NOT converge "<<k<<" / "<< MaxIterations<< std::endl;
std::cout << GridLogMessage << "ConjugateGradient did NOT converge"
<< std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;

View File

@ -30,11 +30,8 @@ Author: Christopher Kelly <ckelly@phys.columbia.edu>
namespace Grid {
//Mixed precision restarted defect correction CG
template<class FieldD,class FieldF,
typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,
typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
template<class FieldD,class FieldF, typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class MixedPrecisionConjugateGradient : public LinearFunction<FieldD> {
public:
RealD Tolerance;
@ -53,12 +50,7 @@ namespace Grid {
//Option to speed up *inner single precision* solves using a LinearFunction that produces a guess
LinearFunction<FieldF> *guesser;
MixedPrecisionConjugateGradient(RealD tol,
Integer maxinnerit,
Integer maxouterit,
GridBase* _sp_grid,
LinearOperatorBase<FieldF> &_Linop_f,
LinearOperatorBase<FieldD> &_Linop_d) :
MixedPrecisionConjugateGradient(RealD tol, Integer maxinnerit, Integer maxouterit, GridBase* _sp_grid, LinearOperatorBase<FieldF> &_Linop_f, LinearOperatorBase<FieldD> &_Linop_d) :
Linop_f(_Linop_f), Linop_d(_Linop_d),
Tolerance(tol), InnerTolerance(tol), MaxInnerIterations(maxinnerit), MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid),
OuterLoopNormMult(100.), guesser(NULL){ };
@ -157,8 +149,6 @@ namespace Grid {
}
};
}
#endif

View File

@ -35,11 +35,7 @@ class ZeroGuesser: public LinearFunction<Field> {
public:
virtual void operator()(const Field &src, Field &guess) { guess = zero; };
};
template<class Field>
class DoNothingGuesser: public LinearFunction<Field> {
public:
virtual void operator()(const Field &src, Field &guess) { };
};
template<class Field>
class SourceGuesser: public LinearFunction<Field> {
public:

View File

@ -1,45 +0,0 @@
#pragma once
namespace Grid {
template<class Field> class PowerMethod
{
public:
template<typename T> static RealD normalise(T& v)
{
RealD nn = norm2(v);
nn = sqrt(nn);
v = v * (1.0/nn);
return nn;
}
RealD operator()(LinearOperatorBase<Field> &HermOp, const Field &src)
{
GridBase *grid = src._grid;
// quickly get an idea of the largest eigenvalue to more properly normalize the residuum
RealD evalMaxApprox = 0.0;
auto src_n = src;
auto tmp = src;
const int _MAX_ITER_EST_ = 50;
for (int i=0;i<_MAX_ITER_EST_;i++) {
normalise(src_n);
HermOp.HermOp(src_n,tmp);
RealD vnum = real(innerProduct(src_n,tmp)); // HermOp.
RealD vden = norm2(src_n);
RealD na = vnum/vden;
if ( (fabs(evalMaxApprox/na - 1.0) < 0.01) || (i==_MAX_ITER_EST_-1) ) {
evalMaxApprox = na;
return evalMaxApprox;
}
evalMaxApprox = na;
std::cout << GridLogMessage << " Approximation of largest eigenvalue: " << evalMaxApprox << std::endl;
src_n = tmp;
}
assert(0);
return 0;
}
};
}

View File

@ -99,13 +99,10 @@ namespace Grid {
OperatorFunction<Field> & _HermitianRBSolver;
int CBfactorise;
bool subGuess;
bool useSolnAsInitGuess; // if true user-supplied solution vector is used as initial guess for solver
public:
SchurRedBlackBase(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false) :
_HermitianRBSolver(HermitianRBSolver),
useSolnAsInitGuess(_solnAsInitGuess)
SchurRedBlackBase(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false) :
_HermitianRBSolver(HermitianRBSolver)
{
CBfactorise = 0;
subtractGuess(initSubGuess);
@ -159,11 +156,7 @@ namespace Grid {
if ( subGuess ) guess_save.resize(nblock,grid);
for(int b=0;b<nblock;b++){
if(useSolnAsInitGuess) {
pickCheckerboard(Odd, sol_o[b], out[b]);
} else {
guess(src_o[b],sol_o[b]);
}
guess(src_o[b],sol_o[b]);
if ( subGuess ) {
guess_save[b] = sol_o[b];
@ -223,11 +216,8 @@ namespace Grid {
////////////////////////////////
// Construct the guess
////////////////////////////////
if(useSolnAsInitGuess) {
pickCheckerboard(Odd, sol_o, out);
} else {
guess(src_o,sol_o);
}
Field tmp(grid);
guess(src_o,sol_o);
Field guess_save(grid);
guess_save = sol_o;
@ -261,7 +251,7 @@ namespace Grid {
}
/////////////////////////////////////////////////////////////
// Override in derived.
// Override in derived. Not virtual as template methods
/////////////////////////////////////////////////////////////
virtual void RedBlackSource (Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o) =0;
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol) =0;
@ -274,9 +264,8 @@ namespace Grid {
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
SchurRedBlackStaggeredSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess,_solnAsInitGuess)
SchurRedBlackStaggeredSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess)
{
}
@ -344,9 +333,8 @@ namespace Grid {
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess,_solnAsInitGuess) {};
SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess) {};
//////////////////////////////////////////////////////
@ -417,9 +405,8 @@ namespace Grid {
/////////////////////////////////////////////////////
// Wrap the usual normal equations Schur trick
/////////////////////////////////////////////////////
SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false,
const bool _solnAsInitGuess = false)
: SchurRedBlackBase<Field>(HermitianRBSolver,initSubGuess,_solnAsInitGuess) {};
SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)
: SchurRedBlackBase<Field>(HermitianRBSolver,initSubGuess) {};
virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
{

View File

@ -53,12 +53,10 @@ void CartesianCommunicator::Init(int *argc, char ***argv)
// Never clean up as done once.
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
Grid_quiesce_nodes();
GlobalSharedMemory::Init(communicator_world);
GlobalSharedMemory::SharedMemoryAllocate(
GlobalSharedMemory::MAX_MPI_SHM_BYTES,
GlobalSharedMemory::Hugepages);
Grid_unquiesce_nodes();
}
///////////////////////////////////////////////////////////////////////////
@ -107,7 +105,8 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
//////////////////////////////////
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank)
{
_ndimension = processors.size(); assert(_ndimension>=1);
_ndimension = processors.size();
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
std::vector<int> parent_processor_coor(_ndimension,0);
std::vector<int> parent_processors (_ndimension,1);

View File

@ -52,7 +52,7 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
{
_processors = processors;
_ndimension = processors.size(); assert(_ndimension>=1);
_ndimension = processors.size();
_processor_coor.resize(_ndimension);
// Require 1^N processor grid for fake

View File

@ -103,8 +103,6 @@ class GlobalSharedMemory {
//////////////////////////////////////////////////////////////////////////////////////
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
static void OptimalCommunicator(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
static void OptimalCommunicatorHypercube(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
static void OptimalCommunicatorSharedMemory(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
///////////////////////////////////////////////////
// Provide shared memory facilities off comm world
///////////////////////////////////////////////////

View File

@ -132,22 +132,7 @@ int Log2Size(int TwoToPower,int MAXLOG2)
}
void GlobalSharedMemory::OptimalCommunicator(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm)
{
//////////////////////////////////////////////////////////////////////////////
// Look and see if it looks like an HPE 8600 based on hostname conventions
//////////////////////////////////////////////////////////////////////////////
const int namelen = _POSIX_HOST_NAME_MAX;
char name[namelen];
int R;
int I;
int N;
gethostname(name,namelen);
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
if(nscan==3) OptimalCommunicatorHypercube(processors,optimal_comm);
else OptimalCommunicatorSharedMemory(processors,optimal_comm);
}
void GlobalSharedMemory::OptimalCommunicatorHypercube(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm)
{
#ifdef HYPERCUBE
////////////////////////////////////////////////////////////////
// Assert power of two shm_size.
////////////////////////////////////////////////////////////////
@ -268,9 +253,7 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const std::vector<int> &pr
/////////////////////////////////////////////////////////////////
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
assert(ierr==0);
}
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm)
{
#else
////////////////////////////////////////////////////////////////
// Assert power of two shm_size.
////////////////////////////////////////////////////////////////
@ -323,6 +306,7 @@ void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const std::vector<int>
/////////////////////////////////////////////////////////////////
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
assert(ierr==0);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////
// SHMGET
@ -353,7 +337,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
int errsv = errno;
printf("Errno %d\n",errsv);
printf("key %d\n",key);
printf("size %ld\n",size);
printf("size %lld\n",size);
printf("flags %d\n",flags);
perror("shmget");
exit(1);

View File

@ -85,7 +85,7 @@ class LatticeTrinaryExpression :public std::pair<Op,std::tuple<T1,T2,T3> >, publ
void inline conformable(GridBase *lhs,GridBase *rhs)
{
assert((lhs == rhs) && " conformable check pointers mismatch ");
assert(lhs == rhs);
}
template<class vobj>

View File

@ -77,18 +77,19 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
GridLogIterative.Active(0);
GridLogDebug.Active(0);
GridLogPerformance.Active(0);
GridLogIntegrator.Active(1);
GridLogIntegrator.Active(0);
GridLogColours.Active(0);
for (int i = 0; i < logstreams.size(); i++) {
if (logstreams[i] == std::string("Error")) GridLogError.Active(1);
if (logstreams[i] == std::string("Warning")) GridLogWarning.Active(1);
if (logstreams[i] == std::string("NoMessage")) GridLogMessage.Active(0);
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
if (logstreams[i] == std::string("Integrator")) GridLogIntegrator.Active(1);
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
if (logstreams[i] == std::string("Error")) GridLogError.Active(1);
if (logstreams[i] == std::string("Warning")) GridLogWarning.Active(1);
if (logstreams[i] == std::string("NoMessage")) GridLogMessage.Active(0);
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
if (logstreams[i] == std::string("Performance"))
GridLogPerformance.Active(1);
if (logstreams[i] == std::string("Integrator")) GridLogIntegrator.Active(1);
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
}
}

View File

@ -210,10 +210,10 @@ PARALLEL_CRITICAL
static inline void le32toh_v(void *file_object,uint64_t bytes)
{
uint32_t *fp = (uint32_t *)file_object;
uint32_t f;
uint64_t count = bytes/sizeof(uint32_t);
parallel_for(uint64_t i=0;i<count;i++){
uint32_t f;
f = fp[i];
// got network order and the network to host
f = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ;
@ -235,9 +235,10 @@ PARALLEL_CRITICAL
static inline void le64toh_v(void *file_object,uint64_t bytes)
{
uint64_t *fp = (uint64_t *)file_object;
uint64_t f,g;
uint64_t count = bytes/sizeof(uint64_t);
parallel_for(uint64_t i=0;i<count;i++){
uint64_t f,g;
f = fp[i];
// got network order and the network to host
g = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ;
@ -348,8 +349,7 @@ PARALLEL_CRITICAL
int ieee32 = (format == std::string("IEEE32"));
int ieee64big = (format == std::string("IEEE64BIG"));
int ieee64 = (format == std::string("IEEE64"));
assert(ieee64||ieee32|ieee64big||ieee32big);
assert((ieee64+ieee32+ieee64big+ieee32big)==1);
//////////////////////////////////////////////////////////////////////////////
// Do the I/O
//////////////////////////////////////////////////////////////////////////////
@ -619,7 +619,6 @@ PARALLEL_CRITICAL
{
std::cout << GridLogMessage << "writeLatticeObject: read test checksum failure, re-writing (" << attemptsLeft << " attempt(s) remaining)" << std::endl;
offset = offsetCopy;
parallel_for(uint64_t x=0;x<lsites;x++) munge(scalardata[x],iodata[x]);
}
else
{

View File

@ -46,12 +46,6 @@ extern "C" {
namespace Grid {
namespace QCD {
#define GRID_FIELD_NORM "FieldNormMetaData"
#define GRID_FIELD_NORM_CALC(FieldNormMetaData_, n2ck) \
0.5*fabs(FieldNormMetaData_.norm2 - n2ck)/(FieldNormMetaData_.norm2 + n2ck)
#define GRID_FIELD_NORM_CHECK(FieldNormMetaData_, n2ck) \
assert(GRID_FIELD_NORM_CALC(FieldNormMetaData_, n2ck) < 1.0e-5);
/////////////////////////////////
// Encode word types as strings
/////////////////////////////////
@ -211,7 +205,6 @@ class GridLimeReader : public BinaryIO {
{
typedef typename vobj::scalar_object sobj;
scidacChecksum scidacChecksum_;
FieldNormMetaData FieldNormMetaData_;
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
std::string format = getFormatString<vobj>();
@ -240,52 +233,21 @@ class GridLimeReader : public BinaryIO {
// std::cout << " ReadLatticeObject from offset "<<offset << std::endl;
BinarySimpleMunger<sobj,sobj> munge;
BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
std::cout << GridLogMessage << "SciDAC checksum A " << std::hex << scidac_csuma << std::dec << std::endl;
std::cout << GridLogMessage << "SciDAC checksum B " << std::hex << scidac_csumb << std::dec << std::endl;
std::cout << GridLogMessage << "SciDAC checksum A " << std::hex << scidac_csuma << std::dec << std::endl;
std::cout << GridLogMessage << "SciDAC checksum B " << std::hex << scidac_csumb << std::dec << std::endl;
/////////////////////////////////////////////
// Insist checksum is next record
/////////////////////////////////////////////
readScidacChecksum(scidacChecksum_,FieldNormMetaData_);
readLimeObject(scidacChecksum_,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM));
/////////////////////////////////////////////
// Verify checksums
/////////////////////////////////////////////
if(FieldNormMetaData_.norm2 != 0.0){
RealD n2ck = norm2(field);
std::cout << GridLogMessage << "Field norm: metadata= " << FieldNormMetaData_.norm2
<< " / field= " << n2ck << " / rdiff= " << GRID_FIELD_NORM_CALC(FieldNormMetaData_,n2ck) << std::endl;
GRID_FIELD_NORM_CHECK(FieldNormMetaData_,n2ck);
}
assert(scidacChecksumVerify(scidacChecksum_,scidac_csuma,scidac_csumb)==1);
// find out if next field is a GridFieldNorm
return;
}
}
}
void readScidacChecksum(scidacChecksum &scidacChecksum_,
FieldNormMetaData &FieldNormMetaData_)
{
FieldNormMetaData_.norm2 =0.0;
std::string scidac_str(SCIDAC_CHECKSUM);
std::string field_norm_str(GRID_FIELD_NORM);
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
uint64_t nbytes = limeReaderBytes(LimeR);//size of this record (configuration)
std::vector<char> xmlc(nbytes+1,'\0');
limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);
std::string xmlstring = std::string(&xmlc[0]);
XmlReader RD(xmlstring, true, "");
if ( !strncmp(limeReaderType(LimeR), field_norm_str.c_str(),strlen(field_norm_str.c_str()) ) ) {
// std::cout << "FieldNormMetaData "<<xmlstring<<std::endl;
read(RD,field_norm_str,FieldNormMetaData_);
}
if ( !strncmp(limeReaderType(LimeR), scidac_str.c_str(),strlen(scidac_str.c_str()) ) ) {
// std::cout << SCIDAC_CHECKSUM << " " <<xmlstring<<std::endl;
read(RD,std::string("scidacChecksum"),scidacChecksum_);
return;
}
}
assert(0);
}
////////////////////////////////////////////
// Read a generic serialisable object
////////////////////////////////////////////
@ -304,7 +266,7 @@ class GridLimeReader : public BinaryIO {
limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);
// std::cout << GridLogMessage<< " readLimeObject matches XML " << &xmlc[0] <<std::endl;
xmlstring = std::string(&xmlc[0]);
xmlstring = std::string(&xmlc[0]);
return;
}
@ -318,8 +280,8 @@ class GridLimeReader : public BinaryIO {
std::string xmlstring;
readLimeObject(xmlstring, record_name);
XmlReader RD(xmlstring, true, "");
read(RD,object_name,object);
XmlReader RD(xmlstring, true, "");
read(RD,object_name,object);
}
};
@ -428,8 +390,6 @@ class GridLimeWriter : public BinaryIO
GridBase *grid = field._grid;
assert(boss_node == field._grid->IsBoss() );
FieldNormMetaData FNMD; FNMD.norm2 = norm2(field);
////////////////////////////////////////////
// Create record header
////////////////////////////////////////////
@ -488,7 +448,6 @@ class GridLimeWriter : public BinaryIO
checksum.suma= streama.str();
checksum.sumb= streamb.str();
if ( boss_node ) {
writeLimeObject(0,0,FNMD,std::string(GRID_FIELD_NORM),std::string(GRID_FIELD_NORM));
writeLimeObject(0,1,checksum,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM));
}
}
@ -666,12 +625,6 @@ class IldgWriter : public ScidacWriter {
assert(header.nd==4);
assert(header.nd==header.dimension.size());
//////////////////////////////////////////////////////////////////////////////
// Field norm tests
//////////////////////////////////////////////////////////////////////////////
FieldNormMetaData FieldNormMetaData_;
FieldNormMetaData_.norm2 = norm2(Umu);
//////////////////////////////////////////////////////////////////////////////
// Fill the USQCD info field
//////////////////////////////////////////////////////////////////////////////
@ -680,12 +633,11 @@ class IldgWriter : public ScidacWriter {
info.plaq = header.plaquette;
info.linktr = header.link_trace;
// std::cout << GridLogMessage << " Writing config; IldgIO n2 "<< FieldNormMetaData_.norm2<<std::endl;
std::cout << GridLogMessage << " Writing config; IldgIO "<<std::endl;
//////////////////////////////////////////////
// Fill the Lime file record by record
//////////////////////////////////////////////
writeLimeObject(1,0,header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message
writeLimeObject(0,0,FieldNormMetaData_,FieldNormMetaData_.SerialisableClassName(),std::string(GRID_FIELD_NORM));
writeLimeObject(0,0,_scidacFile,_scidacFile.SerialisableClassName(),std::string(SCIDAC_PRIVATE_FILE_XML));
writeLimeObject(0,1,info,info.SerialisableClassName(),std::string(SCIDAC_FILE_XML));
writeLimeObject(1,0,_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML));
@ -728,7 +680,6 @@ class IldgReader : public GridLimeReader {
std::string ildgLFN_ ;
scidacChecksum scidacChecksum_;
usqcdInfo usqcdInfo_ ;
FieldNormMetaData FieldNormMetaData_;
// track what we read from file
int found_ildgFormat =0;
@ -737,7 +688,7 @@ class IldgReader : public GridLimeReader {
int found_usqcdInfo =0;
int found_ildgBinary =0;
int found_FieldMetaData =0;
int found_FieldNormMetaData =0;
uint32_t nersc_csum;
uint32_t scidac_csuma;
uint32_t scidac_csumb;
@ -771,7 +722,7 @@ class IldgReader : public GridLimeReader {
//////////////////////////////////
// ILDG format record
std::string xmlstring(&xmlc[0]);
std::string xmlstring(&xmlc[0]);
if ( !strncmp(limeReaderType(LimeR), ILDG_FORMAT,strlen(ILDG_FORMAT)) ) {
XmlReader RD(xmlstring, true, "");
@ -824,17 +775,11 @@ class IldgReader : public GridLimeReader {
found_scidacChecksum = 1;
}
if ( !strncmp(limeReaderType(LimeR), GRID_FIELD_NORM,strlen(GRID_FIELD_NORM)) ) {
XmlReader RD(xmlstring, true, "");
read(RD,GRID_FIELD_NORM,FieldNormMetaData_);
found_FieldNormMetaData = 1;
}
} else {
/////////////////////////////////
// Binary data
/////////////////////////////////
// std::cout << GridLogMessage << "ILDG Binary record found : " ILDG_BINARY_DATA << std::endl;
std::cout << GridLogMessage << "ILDG Binary record found : " ILDG_BINARY_DATA << std::endl;
uint64_t offset= ftello(File);
if ( format == std::string("IEEE64BIG") ) {
GaugeSimpleMunger<dobj, sobj> munge;
@ -901,13 +846,6 @@ class IldgReader : public GridLimeReader {
////////////////////////////////////////////////////////////
// Really really want to mandate a scidac checksum
////////////////////////////////////////////////////////////
if ( found_FieldNormMetaData ) {
RealD nn = norm2(Umu);
GRID_FIELD_NORM_CHECK(FieldNormMetaData_,nn);
std::cout << GridLogMessage<<"FieldNormMetaData matches " << std::endl;
} else {
std::cout << GridLogWarning<<"FieldNormMetaData not found. " << std::endl;
}
if ( found_scidacChecksum ) {
FieldMetaData_.scidac_checksuma = stoull(scidacChecksum_.suma,0,16);
FieldMetaData_.scidac_checksumb = stoull(scidacChecksum_.sumb,0,16);

View File

@ -56,10 +56,6 @@ namespace Grid {
////////////////////////////////////////////////////////////////////////////////
// header specification/interpretation
////////////////////////////////////////////////////////////////////////////////
class FieldNormMetaData : Serializable {
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(FieldNormMetaData, double, norm2);
};
class FieldMetaData : Serializable {
public:

View File

@ -66,8 +66,7 @@ namespace QCD {
int, MaxIter,
RealD, tolerance,
int, degree,
int, precision,
int, BoundsCheckFreq);
int, precision);
// MaxIter and tolerance, vectors??
@ -77,15 +76,13 @@ namespace QCD {
int _maxit = 1000,
RealD tol = 1.0e-8,
int _degree = 10,
int _precision = 64,
int _BoundsCheckFreq=20)
int _precision = 64)
: lo(_lo),
hi(_hi),
MaxIter(_maxit),
tolerance(tol),
degree(_degree),
precision(_precision),
BoundsCheckFreq(_BoundsCheckFreq){};
precision(_precision){};
};

View File

@ -43,7 +43,7 @@ namespace Grid {
INHERIT_IMPL_TYPES(Impl);
public:
void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<Complex> boundary, std::vector<double> twist, bool fiveD) {
void FreePropagator(const FermionField &in,FermionField &out,RealD mass, std::vector<double> twist, bool fiveD) {
FermionField in_k(in._grid);
FermionField prop_k(in._grid);
@ -53,22 +53,17 @@ namespace Grid {
ComplexField coor(in._grid);
ComplexField ph(in._grid); ph = zero;
FermionField in_buf(in._grid); in_buf = zero;
Scalar ci(0.0,1.0);
Complex ci(0.0,1.0);
assert(twist.size() == Nd);//check that twist is Nd
assert(boundary.size() == Nd);//check that boundary conditions is Nd
int shift = 0;
if(fiveD) shift = 1;
for(unsigned int nu = 0; nu < Nd; nu++)
{
// Shift coordinate lattice index by 1 to account for 5th dimension.
LatticeCoordinate(coor, nu + shift);
double boundary_phase = ::acos(real(boundary[nu]));
ph = ph + boundary_phase*coor*((1./(in._grid->_fdimensions[nu+shift])));
//momenta for propagator shifted by twist+boundary
twist[nu] = twist[nu] + boundary_phase/((2.0*M_PI));
ph = ph + twist[nu]*coor*((1./(in._grid->_fdimensions[nu+shift])));
}
in_buf = exp(ci*ph*(-1.0))*in;
in_buf = exp((Real)(2.0*M_PI)*ci*ph*(-1.0))*in;
if(fiveD){//FFT only on temporal and spatial dimensions
std::vector<int> mask(Nd+1,1); mask[0] = 0;
@ -81,28 +76,25 @@ namespace Grid {
this->MomentumSpacePropagatorHt(prop_k,in_k,mass,twist);
theFFT.FFT_all_dim(out,prop_k,FFT::backward);
}
//phase for boundary condition
out = out * exp(ci*ph);
out = out * exp((Real)(2.0*M_PI)*ci*ph);
};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<Complex> boundary,std::vector<double> twist) {
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<double> twist) {
bool fiveD = true; //5d propagator by default
FreePropagator(in,out,mass,boundary,twist,fiveD);
FreePropagator(in,out,mass,twist,fiveD);
};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass, bool fiveD) {
std::vector<double> twist(Nd,0.0); //default: periodic boundarys in all directions
std::vector<Complex> boundary;
for(int i=0;i<Nd;i++) boundary.push_back(1);//default: periodic boundary conditions
FreePropagator(in,out,mass,boundary,twist,fiveD);
FreePropagator(in,out,mass,twist,fiveD);
};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass) {
bool fiveD = true; //5d propagator by default
std::vector<double> twist(Nd,0.0); //default: twist angle 0
std::vector<Complex> boundary;
for(int i=0;i<Nd;i++) boundary.push_back(1); //default: periodic boundary conditions
FreePropagator(in,out,mass,boundary,twist,fiveD);
std::vector<double> twist(Nd,0.0); //default: periodic boundarys in all directions
FreePropagator(in,out,mass,twist,fiveD);
};
virtual void Instantiatable(void) {};

View File

@ -96,7 +96,7 @@ namespace Grid {
virtual void MomentumSpacePropagator(FermionField &out,const FermionField &in,RealD _m,std::vector<double> twist) { assert(0);};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<Complex> boundary,std::vector<double> twist) {
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass,std::vector<double> twist) {
FFT theFFT((GridCartesian *) in._grid);
FermionField in_k(in._grid);
@ -106,33 +106,26 @@ namespace Grid {
ComplexField coor(in._grid);
ComplexField ph(in._grid); ph = zero;
FermionField in_buf(in._grid); in_buf = zero;
Scalar ci(0.0,1.0);
Complex ci(0.0,1.0);
assert(twist.size() == Nd);//check that twist is Nd
assert(boundary.size() == Nd);//check that boundary conditions is Nd
for(unsigned int nu = 0; nu < Nd; nu++)
{
LatticeCoordinate(coor, nu);
double boundary_phase = ::acos(real(boundary[nu]));
ph = ph + boundary_phase*coor*((1./(in._grid->_fdimensions[nu])));
//momenta for propagator shifted by twist+boundary
twist[nu] = twist[nu] + boundary_phase/((2.0*M_PI));
ph = ph + twist[nu]*coor*((1./(in._grid->_fdimensions[nu])));
}
in_buf = exp(ci*ph*(-1.0))*in;
in_buf = exp((Real)(2.0*M_PI)*ci*ph*(-1.0))*in;
theFFT.FFT_all_dim(in_k,in_buf,FFT::forward);
this->MomentumSpacePropagator(prop_k,in_k,mass,twist);
theFFT.FFT_all_dim(out,prop_k,FFT::backward);
//phase for boundary condition
out = out * exp(ci*ph);
out = out * exp((Real)(2.0*M_PI)*ci*ph);
};
virtual void FreePropagator(const FermionField &in,FermionField &out,RealD mass) {
std::vector<Complex> boundary;
for(int i=0;i<Nd;i++) boundary.push_back(1);//default: periodic boundary conditions
std::vector<double> twist(Nd,0.0); //default: periodic boundarys in all directions
FreePropagator(in,out,mass,boundary,twist);
FreePropagator(in,out,mass,twist);
};
///////////////////////////////////////////////
@ -143,7 +136,6 @@ namespace Grid {
//////////////////////////////////////////////////////////////////////
// Conserved currents, either contract at sink or insert sequentially.
//////////////////////////////////////////////////////////////////////
virtual void ContractConservedCurrent(PropagatorField &q_in_1,
PropagatorField &q_in_2,
PropagatorField &q_out,
@ -156,12 +148,6 @@ namespace Grid {
unsigned int tmin,
unsigned int tmax,
ComplexField &lattice_cmplx)=0;
// Only reimplemented in Wilson5D
// Default to just a zero correlation function
virtual void ContractJ5q(FermionField &q_in ,ComplexField &J5q) { J5q=zero; };
virtual void ContractJ5q(PropagatorField &q_in,ComplexField &J5q) { J5q=zero; };
///////////////////////////////////////////////
// Physical field import/export
///////////////////////////////////////////////

View File

@ -26,7 +26,7 @@ See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid.h>
namespace Grid {
namespace QCD {

View File

@ -26,11 +26,11 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid.h>
#ifdef AVX512
#include <Grid/simd/Intel512common.h>
#include <Grid/simd/Intel512avx.h>
#include <simd/Intel512common.h>
#include <simd/Intel512avx.h>
#endif
// Interleave operations from two directions
@ -679,7 +679,7 @@ void StaggeredKernels<Impl>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
gauge3 =(uint64_t)&UU._odata[sU]( T );
// This is the single precision 5th direction vectorised kernel
#include <Grid/simd/Intel512single.h>
#include <simd/Intel512single.h>
template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
@ -732,7 +732,7 @@ template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilImpl
}
#include <Grid/simd/Intel512double.h>
#include <simd/Intel512double.h>
template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
@ -816,7 +816,7 @@ template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilImpl
// This is the single precision 5th direction vectorised kernel
#include <Grid/simd/Intel512single.h>
#include <simd/Intel512single.h>
template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,
@ -884,7 +884,7 @@ template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilImpl &st,
#endif
}
#include <Grid/simd/Intel512double.h>
#include <simd/Intel512double.h>
template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
DoubledGaugeField &U, DoubledGaugeField &UUU,
SiteSpinor *buf, int LLs, int sU,

View File

@ -26,7 +26,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid.h>
#define LOAD_CHI(b) \

View File

@ -67,7 +67,6 @@ public:
public:
typedef WilsonFermion<Impl> WilsonBase;
virtual int ConstEE(void) { return 0; };
virtual void Instantiatable(void){};
// Constructors
WilsonCloverFermion(GaugeField &_Umu, GridCartesian &Fgrid,

View File

@ -939,75 +939,6 @@ void WilsonFermion5D<Impl>::MomentumSpacePropagatorHw(FermionField &out,const Fe
merge(qSiteRev, qSiteVec); \
}
// psi = chiralProjectPlus(Result_s[Ls/2-1]);
// psi+= chiralProjectMinus(Result_s[Ls/2]);
// PJ5q+=localInnerProduct(psi,psi);
template<class vobj>
Lattice<vobj> spProj5p(const Lattice<vobj> & in)
{
GridBase *grid=in._grid;
Gamma G5(Gamma::Algebra::Gamma5);
Lattice<vobj> ret(grid);
parallel_for(int ss=0;ss<grid->oSites();ss++){
ret._odata[ss] = in._odata[ss] + G5*in._odata[ss];
}
return ret;
}
template<class vobj>
Lattice<vobj> spProj5m(const Lattice<vobj> & in)
{
Gamma G5(Gamma::Algebra::Gamma5);
GridBase *grid=in._grid;
Lattice<vobj> ret(grid);
parallel_for(int ss=0;ss<grid->oSites();ss++){
ret._odata[ss] = in._odata[ss] - G5*in._odata[ss];
}
return ret;
}
template <class Impl>
void WilsonFermion5D<Impl>::ContractJ5q(FermionField &q_in,ComplexField &J5q)
{
conformable(GaugeGrid(), J5q._grid);
conformable(q_in._grid, FermionGrid());
// 4d field
int Ls = this->Ls;
FermionField psi(GaugeGrid());
FermionField p_plus (GaugeGrid());
FermionField p_minus(GaugeGrid());
FermionField p(GaugeGrid());
ExtractSlice(p_plus , q_in, Ls/2 , 0);
ExtractSlice(p_minus, q_in, Ls/2-1 , 0);
p_plus = spProj5p(p_plus );
p_minus= spProj5m(p_minus);
p=p_plus+p_minus;
J5q = localInnerProduct(p,p);
}
template <class Impl>
void WilsonFermion5D<Impl>::ContractJ5q(PropagatorField &q_in,ComplexField &J5q)
{
conformable(GaugeGrid(), J5q._grid);
conformable(q_in._grid, FermionGrid());
// 4d field
int Ls = this->Ls;
PropagatorField psi(GaugeGrid());
PropagatorField p_plus (GaugeGrid());
PropagatorField p_minus(GaugeGrid());
PropagatorField p(GaugeGrid());
ExtractSlice(p_plus , q_in, Ls/2 , 0);
ExtractSlice(p_minus, q_in, Ls/2-1 , 0);
p_plus = spProj5p(p_plus );
p_minus= spProj5m(p_minus);
p=p_plus+p_minus;
J5q = localInnerProduct(p,p);
}
template <class Impl>
void WilsonFermion5D<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
PropagatorField &q_in_2,
@ -1018,7 +949,6 @@ void WilsonFermion5D<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
conformable(q_in_1._grid, FermionGrid());
conformable(q_in_1._grid, q_in_2._grid);
conformable(_FourDimGrid, q_out._grid);
PropagatorField tmp1(FermionGrid()), tmp2(FermionGrid());
unsigned int LLs = q_in_1._grid->_rdimensions[0];
q_out = zero;
@ -1065,6 +995,7 @@ void WilsonFermion5D<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
}
template <class Impl>
void WilsonFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
PropagatorField &q_out,

View File

@ -230,10 +230,6 @@ namespace QCD {
unsigned int tmin,
unsigned int tmax,
ComplexField &lattice_cmplx);
void ContractJ5q(PropagatorField &q_in,ComplexField &J5q);
void ContractJ5q(FermionField &q_in,ComplexField &J5q);
};
}}

View File

@ -81,8 +81,8 @@ WilsonKernels<Impl >::AsmDhopSiteDagExt(StencilImpl &st,LebesgueOrder & lo,Doubl
assert(0);
}
#include <Grid/qcd/action/fermion/WilsonKernelsAsmAvx512.h>
#include <Grid/qcd/action/fermion/WilsonKernelsAsmQPX.h>
#include <qcd/action/fermion/WilsonKernelsAsmAvx512.h>
#include <qcd/action/fermion/WilsonKernelsAsmQPX.h>
#define INSTANTIATE_ASM(A)\
template void WilsonKernels<A>::AsmDhopSite(StencilImpl &st,LebesgueOrder & lo,DoubledGaugeField &U, SiteHalfSpinor *buf,\

View File

@ -29,14 +29,6 @@ directory
#ifndef GRID_GAUGE_IMPL_TYPES_H
#define GRID_GAUGE_IMPL_TYPES_H
#define CPS_MD_TIME
#ifdef CPS_MD_TIME
#define HMC_MOMENTUM_DENOMINATOR (2.0)
#else
#define HMC_MOMENTUM_DENOMINATOR (1.0)
#endif
namespace Grid {
namespace QCD {
@ -46,7 +38,6 @@ namespace QCD {
#define INHERIT_GIMPL_TYPES(GImpl) \
typedef typename GImpl::Simd Simd; \
typedef typename GImpl::Scalar Scalar; \
typedef typename GImpl::LinkField GaugeLinkField; \
typedef typename GImpl::Field GaugeField; \
typedef typename GImpl::ComplexField ComplexField;\
@ -64,8 +55,7 @@ namespace QCD {
template <class S, int Nrepresentation = Nc, int Nexp = 12 > class GaugeImplTypes {
public:
typedef S Simd;
typedef typename Simd::scalar_type scalar_type;
typedef scalar_type Scalar;
template <typename vtype> using iImplScalar = iScalar<iScalar<iScalar<vtype> > >;
template <typename vtype> using iImplGaugeLink = iScalar<iScalar<iMatrix<vtype, Nrepresentation> > >;
template <typename vtype> using iImplGaugeField = iVector<iScalar<iMatrix<vtype, Nrepresentation> >, Nd>;
@ -97,32 +87,12 @@ public:
///////////////////////////////////////////////////////////
// Move these to another class
// HMC auxiliary functions
static inline void generate_momenta(Field &P, GridParallelRNG &pRNG)
{
// Zbigniew Srocinsky thesis:
//
// P(p) = N \Prod_{x\mu}e^-{1/2 Tr (p^2_mux)}
//
// p_x,mu = c_x,mu,a T_a
//
// Tr p^2 = sum_a,x,mu 1/2 (c_x,mu,a)^2
//
// Which implies P(p) = N \Prod_{x,\mu,a} e^-{1/4 c_xmua^2 }
//
// = N \Prod_{x,\mu,a} e^-{1/2 (c_xmua/sqrt{2})^2 }
//
// Expect c' = cxmua/sqrt(2) to be a unit variance gaussian.
//
// Expect cxmua variance sqrt(2).
//
// Must scale the momentum by sqrt(2) to invoke CPS and UKQCD conventions
//
static inline void generate_momenta(Field &P, GridParallelRNG &pRNG) {
// specific for SU gauge fields
LinkField Pmu(P._grid);
Pmu = Zero();
Pmu = zero;
for (int mu = 0; mu < Nd; mu++) {
SU<Nrepresentation>::GaussianFundamentalLieAlgebraMatrix(pRNG, Pmu);
RealD scale = ::sqrt(HMC_MOMENTUM_DENOMINATOR) ;
Pmu = Pmu*scale;
PokeIndex<LorentzIndex>(P, Pmu, mu);
}
}

View File

@ -38,7 +38,6 @@ namespace QCD{
{
public:
typedef S Simd;
typedef typename Simd::scalar_type Scalar;
template <typename vtype>
using iImplGaugeLink = iScalar<iScalar<iScalar<vtype>>>;

View File

@ -75,7 +75,7 @@ namespace Grid{
virtual void deriv(const GaugeField &Umu,GaugeField & dSdU) {
//extend Ta to include Lorentz indexes
RealD factor_p = c_plaq/RealD(Nc)*0.5;
RealD factor_r = c_rect/RealD(Nc)*0.5;
RealD factor_r = c_rect/RealD(Nc)*0.5;
GridBase *grid = Umu._grid;

View File

@ -1,53 +0,0 @@
#pragma once
namespace Grid{
namespace QCD{
template<class Field>
void HighBoundCheck(LinearOperatorBase<Field> &HermOp,
Field &Phi,
RealD hi)
{
// Eigenvalue bound check at high end
PowerMethod<Field> power_method;
auto lambda_max = power_method(HermOp,Phi);
std::cout << GridLogMessage << "Pseudofermion action lamda_max "<<lambda_max<<"( bound "<<hi<<")"<<std::endl;
assert( (lambda_max < hi) && " High Bounds Check on operator failed" );
}
template<class Field> void InverseSqrtBoundsCheck(int MaxIter,double tol,
LinearOperatorBase<Field> &HermOp,
Field &GaussNoise,
MultiShiftFunction &PowerNegHalf)
{
GridBase *FermionGrid = GaussNoise._grid;
Field X(FermionGrid);
Field Y(FermionGrid);
Field Z(FermionGrid);
X=GaussNoise;
RealD Nx = norm2(X);
ConjugateGradientMultiShift<Field> msCG(MaxIter,PowerNegHalf);
msCG(HermOp,X,Y);
msCG(HermOp,Y,Z);
RealD Nz = norm2(Z);
HermOp.HermOp(Z,Y);
RealD Ny = norm2(Y);
X=X-Y;
RealD Nd = norm2(X);
std::cout << "************************* "<<std::endl;
std::cout << " noise = "<<Nx<<std::endl;
std::cout << " (MdagM^-1/2)^2 noise = "<<Nz<<std::endl;
std::cout << " MdagM (MdagM^-1/2)^2 noise = "<<Ny<<std::endl;
std::cout << " noise - MdagM (MdagM^-1/2)^2 noise = "<<Nd<<std::endl;
std::cout << "************************* "<<std::endl;
assert( (std::sqrt(Nd/Nx)<tol) && " InverseSqrtBoundsCheck ");
}
}
}

View File

@ -58,30 +58,13 @@ namespace QCD{
bool use_heatbath_forecasting;
AbstractEOFAFermion<Impl>& Lop; // the basic LH operator
AbstractEOFAFermion<Impl>& Rop; // the basic RH operator
SchurRedBlackDiagMooeeSolve<FermionField> SolverHB;
SchurRedBlackDiagMooeeSolve<FermionField> SolverL;
SchurRedBlackDiagMooeeSolve<FermionField> SolverR;
SchurRedBlackDiagMooeeSolve<FermionField> DerivativeSolverL;
SchurRedBlackDiagMooeeSolve<FermionField> DerivativeSolverR;
SchurRedBlackDiagMooeeSolve<FermionField> Solver;
FermionField Phi; // the pseudofermion field for this trajectory
public:
ExactOneFlavourRatioPseudoFermionAction(AbstractEOFAFermion<Impl>& _Lop,
AbstractEOFAFermion<Impl>& _Rop,
OperatorFunction<FermionField>& HeatbathCG,
OperatorFunction<FermionField>& ActionCGL, OperatorFunction<FermionField>& ActionCGR,
OperatorFunction<FermionField>& DerivCGL , OperatorFunction<FermionField>& DerivCGR,
Params& p,
bool use_fc=false) :
Lop(_Lop),
Rop(_Rop),
SolverHB(HeatbathCG,false,true),
SolverL(ActionCGL, false, true), SolverR(ActionCGR, false, true),
DerivativeSolverL(DerivCGL, false, true), DerivativeSolverR(DerivCGR, false, true),
Phi(_Lop.FermionGrid()),
param(p),
use_heatbath_forecasting(use_fc)
ExactOneFlavourRatioPseudoFermionAction(AbstractEOFAFermion<Impl>& _Lop, AbstractEOFAFermion<Impl>& _Rop,
OperatorFunction<FermionField>& S, Params& p, bool use_fc=false) : Lop(_Lop), Rop(_Rop), Solver(S),
Phi(_Lop.FermionGrid()), param(p), use_heatbath_forecasting(use_fc)
{
AlgRemez remez(param.lo, param.hi, param.precision);
@ -115,9 +98,6 @@ namespace QCD{
// We generate a Gaussian noise vector \eta, and then compute
// \Phi = M_{\rm EOFA}^{-1/2} * \eta
// using a rational approximation to the inverse square root
//
// As a check of rational require \Phi^dag M_{EOFA} \Phi == eta^dag M^-1/2^dag M M^-1/2 eta = eta^dag eta
//
virtual void refresh(const GaugeField& U, GridParallelRNG& pRNG)
{
Lop.ImportGauge(U);
@ -138,6 +118,7 @@ namespace QCD{
RealD scale = std::sqrt(0.5);
gaussian(pRNG,eta);
eta = eta * scale;
printf("Heatbath source vector: <\\eta|\\eta> = %1.15e\n", norm2(eta));
// \Phi = ( \alpha_{0} + \sum_{k=1}^{N_{p}} \alpha_{l} * \gamma_{l} ) * \eta
RealD N(PowerNegHalf.norm);
@ -158,11 +139,11 @@ namespace QCD{
if(use_heatbath_forecasting){ // Forecast CG guess using solutions from previous poles
Lop.Mdag(CG_src, Forecast_src);
CG_soln = Forecast(Lop, Forecast_src, prev_solns);
SolverHB(Lop, CG_src, CG_soln);
Solver(Lop, CG_src, CG_soln);
prev_solns.push_back(CG_soln);
} else {
CG_soln = zero; // Just use zero as the initial guess
SolverHB(Lop, CG_src, CG_soln);
Solver(Lop, CG_src, CG_soln);
}
Lop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
tmp[1] = tmp[1] + ( PowerNegHalf.residues[k]*gamma_l*gamma_l*Lop.k ) * tmp[0];
@ -185,11 +166,11 @@ namespace QCD{
if(use_heatbath_forecasting){
Rop.Mdag(CG_src, Forecast_src);
CG_soln = Forecast(Rop, Forecast_src, prev_solns);
SolverHB(Rop, CG_src, CG_soln);
Solver(Rop, CG_src, CG_soln);
prev_solns.push_back(CG_soln);
} else {
CG_soln = zero;
SolverHB(Rop, CG_src, CG_soln);
Solver(Rop, CG_src, CG_soln);
}
Rop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
tmp[1] = tmp[1] - ( PowerNegHalf.residues[k]*gamma_l*gamma_l*Rop.k ) * tmp[0];
@ -201,47 +182,8 @@ namespace QCD{
// Reset shift coefficients for energy and force evals
Lop.RefreshShiftCoefficients(0.0);
Rop.RefreshShiftCoefficients(-1.0);
// Bounds check
RealD EtaDagEta = norm2(eta);
// RealD PhiDagMPhi= norm2(eta);
};
void Meofa(const GaugeField& U,const FermionField &phi, FermionField & Mphi)
{
#if 0
Lop.ImportGauge(U);
Rop.ImportGauge(U);
FermionField spProj_Phi(Lop.FermionGrid());
FermionField mPhi(Lop.FermionGrid());
std::vector<FermionField> tmp(2, Lop.FermionGrid());
mPhi = phi;
// LH term: S = S - k <\Phi| P_{-} \Omega_{-}^{\dagger} H(mf)^{-1} \Omega_{-} P_{-} |\Phi>
spProj(Phi, spProj_Phi, -1, Lop.Ls);
Lop.Omega(spProj_Phi, tmp[0], -1, 0);
G5R5(tmp[1], tmp[0]);
tmp[0] = zero;
SolverL(Lop, tmp[1], tmp[0]);
Lop.Dtilde(tmp[0], tmp[1]); // We actually solved Cayley preconditioned system: transform back
Lop.Omega(tmp[1], tmp[0], -1, 1);
mPhi = mPhi - Lop.k * innerProduct(spProj_Phi, tmp[0]).real();
// RH term: S = S + k <\Phi| P_{+} \Omega_{+}^{\dagger} ( H(mb)
// - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{-} P_{-} |\Phi>
spProj(Phi, spProj_Phi, 1, Rop.Ls);
Rop.Omega(spProj_Phi, tmp[0], 1, 0);
G5R5(tmp[1], tmp[0]);
tmp[0] = zero;
SolverR(Rop, tmp[1], tmp[0]);
Rop.Dtilde(tmp[0], tmp[1]);
Rop.Omega(tmp[1], tmp[0], 1, 1);
action += Rop.k * innerProduct(spProj_Phi, tmp[0]).real();
#endif
}
// EOFA action: see Eqn. (10) of arXiv:1706.05843
virtual RealD S(const GaugeField& U)
{
@ -259,7 +201,7 @@ namespace QCD{
Lop.Omega(spProj_Phi, tmp[0], -1, 0);
G5R5(tmp[1], tmp[0]);
tmp[0] = zero;
SolverL(Lop, tmp[1], tmp[0]);
Solver(Lop, tmp[1], tmp[0]);
Lop.Dtilde(tmp[0], tmp[1]); // We actually solved Cayley preconditioned system: transform back
Lop.Omega(tmp[1], tmp[0], -1, 1);
action -= Lop.k * innerProduct(spProj_Phi, tmp[0]).real();
@ -270,7 +212,7 @@ namespace QCD{
Rop.Omega(spProj_Phi, tmp[0], 1, 0);
G5R5(tmp[1], tmp[0]);
tmp[0] = zero;
SolverR(Rop, tmp[1], tmp[0]);
Solver(Rop, tmp[1], tmp[0]);
Rop.Dtilde(tmp[0], tmp[1]);
Rop.Omega(tmp[1], tmp[0], 1, 1);
action += Rop.k * innerProduct(spProj_Phi, tmp[0]).real();
@ -292,22 +234,17 @@ namespace QCD{
GaugeField force(Lop.GaugeGrid());
/////////////////////////////////////////////
// PAB:
// Optional single precision derivative ?
/////////////////////////////////////////////
// LH: dSdU = k \chi_{L}^{\dagger} \gamma_{5} R_{5} ( \partial_{x,\mu} D_{w} ) \chi_{L}
// \chi_{L} = H(mf)^{-1} \Omega_{-} P_{-} \Phi
spProj(Phi, spProj_Phi, -1, Lop.Ls);
Lop.Omega(spProj_Phi, Omega_spProj_Phi, -1, 0);
G5R5(CG_src, Omega_spProj_Phi);
spProj_Phi = zero;
DerivativeSolverL(Lop, CG_src, spProj_Phi);
Solver(Lop, CG_src, spProj_Phi);
Lop.Dtilde(spProj_Phi, Chi);
G5R5(g5_R5_Chi, Chi);
Lop.MDeriv(force, g5_R5_Chi, Chi, DaggerNo);
dSdU = -Lop.k * force;
dSdU = Lop.k * force;
// RH: dSdU = dSdU - k \chi_{R}^{\dagger} \gamma_{5} R_{5} ( \partial_{x,\mu} D_{w} ) \chi_{}
// \chi_{R} = ( H(mb) - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} \Phi
@ -315,11 +252,11 @@ namespace QCD{
Rop.Omega(spProj_Phi, Omega_spProj_Phi, 1, 0);
G5R5(CG_src, Omega_spProj_Phi);
spProj_Phi = zero;
DerivativeSolverR(Rop, CG_src, spProj_Phi);
Solver(Rop, CG_src, spProj_Phi);
Rop.Dtilde(spProj_Phi, Chi);
G5R5(g5_R5_Chi, Chi);
Lop.MDeriv(force, g5_R5_Chi, Chi, DaggerNo);
dSdU = dSdU + Rop.k * force;
dSdU = dSdU - Rop.k * force;
};
};
}}

View File

@ -157,13 +157,6 @@ class OneFlavourEvenOddRationalPseudoFermionAction
msCG(Mpc, PhiOdd, Y);
if ( (rand()%param.BoundsCheckFreq)==0 ) {
FermionField gauss(FermOp.FermionRedBlackGrid());
gauss = PhiOdd;
HighBoundCheck(Mpc,gauss,param.hi);
InverseSqrtBoundsCheck(param.MaxIter,param.tolerance*100,Mpc,gauss,PowerNegHalf);
}
RealD action = norm2(Y);
std::cout << GridLogMessage << "Pseudofermion action FIXME -- is -1/4 "
"solve or -1/2 solve faster??? "

View File

@ -170,14 +170,6 @@ namespace Grid{
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegQuarter);
msCG_M(MdagM,X,Y);
// Randomly apply rational bounds checks.
if ( (rand()%param.BoundsCheckFreq)==0 ) {
FermionField gauss(NumOp.FermionRedBlackGrid());
gauss = PhiOdd;
HighBoundCheck(MdagM,gauss,param.hi);
InverseSqrtBoundsCheck(param.MaxIter,param.tolerance*100,MdagM,gauss,PowerNegHalf);
}
// Phidag VdagV^1/4 MdagM^-1/4 MdagM^-1/4 VdagV^1/4 Phi
RealD action = norm2(Y);

View File

@ -143,14 +143,6 @@ namespace Grid{
msCG(MdagMOp,Phi,Y);
if ( (rand()%param.BoundsCheckFreq)==0 ) {
FermionField gauss(FermOp.FermionGrid());
gauss = Phi;
HighBoundCheck(MdagMOp,gauss,param.hi);
InverseSqrtBoundsCheck(param.MaxIter,param.tolerance*100,MdagMOp,gauss,PowerNegHalf);
}
RealD action = norm2(Y);
std::cout << GridLogMessage << "Pseudofermion action FIXME -- is -1/4 solve or -1/2 solve faster??? "<<action<<std::endl;
return action;

View File

@ -156,14 +156,6 @@ namespace Grid{
ConjugateGradientMultiShift<FermionField> msCG_M(param.MaxIter,PowerNegQuarter);
msCG_M(MdagM,X,Y);
// Randomly apply rational bounds checks.
if ( (rand()%param.BoundsCheckFreq)==0 ) {
FermionField gauss(NumOp.FermionGrid());
gauss = Phi;
HighBoundCheck(MdagM,gauss,param.hi);
InverseSqrtBoundsCheck(param.MaxIter,param.tolerance*100,MdagM,gauss,PowerNegHalf);
}
// Phidag VdagV^1/4 MdagM^-1/4 MdagM^-1/4 VdagV^1/4 Phi
RealD action = norm2(Y);

View File

@ -29,9 +29,6 @@ directory
#ifndef QCD_PSEUDOFERMION_AGGREGATE_H
#define QCD_PSEUDOFERMION_AGGREGATE_H
// Rational functions
#include <Grid/qcd/action/pseudofermion/Bounds.h>
#include <Grid/qcd/action/pseudofermion/EvenOddSchurDifferentiable.h>
#include <Grid/qcd/action/pseudofermion/TwoFlavour.h>
#include <Grid/qcd/action/pseudofermion/TwoFlavourRatio.h>

View File

@ -85,20 +85,21 @@ class TwoFlavourPseudoFermionAction : public Action<typename Impl::GaugeField> {
// and must multiply by 0.707....
//
// Chroma has this scale factor: two_flavor_monomial_w.h
// CPS uses this factor
// IroIro: does not use this scale. It is absorbed by a change of vars
// in the Phi integral, and thus is only an irrelevant prefactor for
// the partition function.
//
const RealD scale = std::sqrt(0.5);
RealD scale = std::sqrt(0.5);
FermionField eta(FermOp.FermionGrid());
gaussian(pRNG, eta); eta = scale *eta;
gaussian(pRNG, eta);
FermOp.ImportGauge(U);
FermOp.Mdag(eta, Phi);
Phi = Phi * scale;
};
//////////////////////////////////////////////////////

View File

@ -46,7 +46,6 @@ namespace Grid{
OperatorFunction<FermionField> &DerivativeSolver;
OperatorFunction<FermionField> &ActionSolver;
OperatorFunction<FermionField> &HeatbathSolver;
FermionField PhiOdd; // the pseudo fermion field for this trajectory
FermionField PhiEven; // the pseudo fermion field for this trajectory
@ -55,18 +54,11 @@ namespace Grid{
TwoFlavourEvenOddRatioPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
OperatorFunction<FermionField> & DS,
OperatorFunction<FermionField> & AS ) :
TwoFlavourEvenOddRatioPseudoFermionAction(_NumOp,_DenOp, DS,AS,AS) {};
TwoFlavourEvenOddRatioPseudoFermionAction(FermionOperator<Impl> &_NumOp,
FermionOperator<Impl> &_DenOp,
OperatorFunction<FermionField> & DS,
OperatorFunction<FermionField> & AS, OperatorFunction<FermionField> & HS) :
OperatorFunction<FermionField> & AS) :
NumOp(_NumOp),
DenOp(_DenOp),
DerivativeSolver(DS),
ActionSolver(AS),
HeatbathSolver(HS),
PhiEven(_NumOp.FermionRedBlackGrid()),
PhiOdd(_NumOp.FermionRedBlackGrid())
{
@ -119,7 +111,7 @@ namespace Grid{
// Odd det factors
Mpc.MpcDag(etaOdd,PhiOdd);
tmp=zero;
HeatbathSolver(Vpc,PhiOdd,tmp);
ActionSolver(Vpc,PhiOdd,tmp);
Vpc.Mpc(tmp,PhiOdd);
// Even det factors

View File

@ -54,8 +54,8 @@ public:
template <class ReaderClass, typename std::enable_if<isReader<ReaderClass>::value, int >::type = 0 >
IntegratorParameters(ReaderClass & Reader){
std::cout << GridLogMessage << "Reading integrator\n";
read(Reader, "Integrator", *this);
std::cout << "Reading integrator\n";
read(Reader, "Integrator", *this);
}
void print_parameters() const {
@ -88,7 +88,8 @@ class Integrator {
t_P[level] += ep;
update_P(P, U, level, ep);
std::cout << GridLogIntegrator << "[" << level << "] P " << " dt " << ep << " : t_P " << t_P[level] << std::endl;
std::cout << GridLogIntegrator << "[" << level << "] P "
<< " dt " << ep << " : t_P " << t_P[level] << std::endl;
}
// to be used by the actionlevel class to iterate
@ -104,7 +105,7 @@ class Integrator {
GF force = Rep.RtoFundamentalProject(forceR); // Ta for the fundamental rep
Real force_abs = std::sqrt(norm2(force)/(U._grid->gSites()));
std::cout << GridLogIntegrator << "Hirep Force average: " << force_abs << std::endl;
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
Mom -= force * ep ;
}
}
} update_P_hireps{};
@ -128,11 +129,11 @@ class Integrator {
double end_force = usecond();
Real force_abs = std::sqrt(norm2(force)/U._grid->gSites());
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] Force average: " << force_abs << std::endl;
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
Mom -= force * ep;
double end_full = usecond();
double time_full = (end_full - start_full) / 1e3;
double time_force = (end_force - start_force) / 1e3;
std::cout << GridLogMessage << "["<<level<<"]["<<a<<"] P update elapsed time: " << time_full << " ms (force: " << time_force << " ms)" << std::endl;
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] P update elapsed time: " << time_full << " ms (force: " << time_force << " ms)" << std::endl;
}
// Force from the other representations
@ -237,7 +238,8 @@ class Integrator {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
// get gauge field from the SmearingPolicy and
// based on the boolean is_smeared in actionID
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
Field& Us =
Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
as[level].actions.at(actionID)->refresh(Us, pRNG);
}
@ -250,11 +252,13 @@ class Integrator {
// over the representations
struct _S {
template <class FieldType, class Repr>
void operator()(std::vector<Action<FieldType>*> repr_set, Repr& Rep, int level, RealD& H) {
void operator()(std::vector<Action<FieldType>*> repr_set, Repr& Rep,
int level, RealD& H) {
for (int a = 0; a < repr_set.size(); ++a) {
RealD Hterm = repr_set.at(a)->S(Rep.U);
std::cout << GridLogMessage << "S Level " << level << " term " << a << " H Hirep = " << Hterm << std::endl;
std::cout << GridLogMessage << "S Level " << level << " term " << a
<< " H Hirep = " << Hterm << std::endl;
H += Hterm;
}
@ -264,21 +268,20 @@ class Integrator {
// Calculate action
RealD S(Field& U) { // here also U not used
std::cout << GridLogIntegrator << "Integrator action\n";
RealD H = - FieldImplementation::FieldSquareNorm(P)/HMC_MOMENTUM_DENOMINATOR; // - trace (P*P)/denom
RealD H = - FieldImplementation::FieldSquareNorm(P); // - trace (P*P)
RealD Hterm;
std::cout << GridLogMessage << "Momentum action H_p = " << H << "\n";
// Actions
for (int level = 0; level < as.size(); ++level) {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
// get gauge field from the SmearingPolicy and
// based on the boolean is_smeared in actionID
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
Field& Us =
Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
Hterm = as[level].actions.at(actionID)->S(Us);
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
std::cout << GridLogMessage << "S Level " << level << " term "
<< actionID << " H = " << Hterm << std::endl;
H += Hterm;
}
as[level].apply(S_hireps, Representations, level, H);
@ -303,7 +306,8 @@ class Integrator {
// Check the clocks all match on all levels
for (int level = 0; level < as.size(); ++level) {
assert(fabs(t_U - t_P[level]) < 1.0e-6); // must be the same
std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl;
std::cout << GridLogIntegrator << " times[" << level
<< "]= " << t_P[level] << " " << t_U << std::endl;
}
// and that we indeed got to the end of the trajectory

View File

@ -231,7 +231,8 @@ class ForceGradient : public Integrator<FieldImplementation, SmearingPolicy,
Field Pfg(U._grid);
Ufg = U;
Pfg = zero;
std::cout << GridLogIntegrator << "FG update " << fg_dt << " " << ep << std::endl;
std::cout << GridLogIntegrator << "FG update " << fg_dt << " " << ep
<< std::endl;
// prepare_fg; no prediction/result cache for now
// could relax CG stopping conditions for the
// derivatives in the small step since the force gets multiplied by
@ -270,7 +271,8 @@ class ForceGradient : public Integrator<FieldImplementation, SmearingPolicy,
this->step(U, level + 1, first_step, 0);
}
this->FG_update_P(U, level, 2 * Chi / ((1.0 - 2.0 * lambda) * eps), (1.0 - 2.0 * lambda) * eps);
this->FG_update_P(U, level, 2 * Chi / ((1.0 - 2.0 * lambda) * eps),
(1.0 - 2.0 * lambda) * eps);
if (level == fl) { // lowest level
this->update_U(U, 0.5 * eps);

View File

@ -11,24 +11,6 @@ const std::array<const Gamma, 4> Gamma::gmu = {{
Gamma(Gamma::Algebra::GammaZ),
Gamma(Gamma::Algebra::GammaT)}};
const std::array<const Gamma, 16> Gamma::gall = {{
Gamma(Gamma::Algebra::Identity),
Gamma(Gamma::Algebra::Gamma5),
Gamma(Gamma::Algebra::GammaX),
Gamma(Gamma::Algebra::GammaY),
Gamma(Gamma::Algebra::GammaZ),
Gamma(Gamma::Algebra::GammaT),
Gamma(Gamma::Algebra::GammaXGamma5),
Gamma(Gamma::Algebra::GammaYGamma5),
Gamma(Gamma::Algebra::GammaZGamma5),
Gamma(Gamma::Algebra::GammaTGamma5),
Gamma(Gamma::Algebra::SigmaXT),
Gamma(Gamma::Algebra::SigmaXY),
Gamma(Gamma::Algebra::SigmaXZ),
Gamma(Gamma::Algebra::SigmaYT),
Gamma(Gamma::Algebra::SigmaYZ),
Gamma(Gamma::Algebra::SigmaZT)}};
const std::array<const char *, Gamma::nGamma> Gamma::name = {{
"-Gamma5 ",
"Gamma5 ",

View File

@ -48,7 +48,6 @@ class Gamma {
static const std::array<std::array<Algebra, nGamma>, nGamma> mul;
static const std::array<Algebra, nGamma> adj;
static const std::array<const Gamma, 4> gmu;
static const std::array<const Gamma, 16> gall;
Algebra g;
public:
Gamma(Algebra initg): g(initg) {}

View File

@ -10,10 +10,10 @@
NotebookFileLineBreakTest
NotebookFileLineBreakTest
NotebookDataPosition[ 158, 7]
NotebookDataLength[ 67118, 1714]
NotebookOptionsPosition[ 63485, 1652]
NotebookOutlinePosition[ 63842, 1668]
CellTagsIndexPosition[ 63799, 1665]
NotebookDataLength[ 75090, 1956]
NotebookOptionsPosition[ 69536, 1867]
NotebookOutlinePosition[ 69898, 1883]
CellTagsIndexPosition[ 69855, 1880]
WindowFrame->Normal*)
(* Beginning of Notebook Content *)
@ -76,6 +76,234 @@ Cell[BoxData["\<\"/Users/antonin/Development/Grid/lib/qcd/spin/gamma-gen\"\>"]\
Cell[CellGroupData[{
Cell[BoxData[
RowBox[{"FactorInteger", "[", "3152", "]"}]], "Input",
CellChangeTimes->{{3.7432347536316767`*^9, 3.7432347764739027`*^9}, {
3.743234833567358*^9,
3.743234862146022*^9}},ExpressionUUID->"d1a0fd03-85e1-43af-ba80-\
3ca4235675d8"],
Cell[BoxData[
RowBox[{"{",
RowBox[{
RowBox[{"{",
RowBox[{"2", ",", "4"}], "}"}], ",",
RowBox[{"{",
RowBox[{"197", ",", "1"}], "}"}]}], "}"}]], "Output",
CellChangeTimes->{{3.743234836792224*^9,
3.743234862493619*^9}},ExpressionUUID->"16d3f953-4b24-4ed2-ae62-\
306dcab66ca7"]
}, Open ]],
Cell[CellGroupData[{
Cell[BoxData[
RowBox[{"sol", "=",
RowBox[{"Solve", "[",
RowBox[{
RowBox[{
RowBox[{
SuperscriptBox["x", "2"], "+",
SuperscriptBox["y", "2"], "+",
SuperscriptBox["z", "2"]}], "\[Equal]", "2"}], ",",
RowBox[{"{",
RowBox[{"x", ",", "y", ",", "z"}], "}"}], ",", "Integers"}],
"]"}]}]], "Input",
CellChangeTimes->{{3.743235304127721*^9,
3.7432353087929983`*^9}},ExpressionUUID->"f0fa2a5c-3d81-4d75-a447-\
50c7ca3459ff"],
Cell[BoxData[
RowBox[{"{",
RowBox[{
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"y", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"z", "\[Rule]", "0"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"y", "\[Rule]", "0"}], ",",
RowBox[{"z", "\[Rule]",
RowBox[{"-", "1"}]}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"y", "\[Rule]", "0"}], ",",
RowBox[{"z", "\[Rule]", "1"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"y", "\[Rule]", "1"}], ",",
RowBox[{"z", "\[Rule]", "0"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "0"}], ",",
RowBox[{"y", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"z", "\[Rule]",
RowBox[{"-", "1"}]}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "0"}], ",",
RowBox[{"y", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"z", "\[Rule]", "1"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "0"}], ",",
RowBox[{"y", "\[Rule]", "1"}], ",",
RowBox[{"z", "\[Rule]",
RowBox[{"-", "1"}]}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "0"}], ",",
RowBox[{"y", "\[Rule]", "1"}], ",",
RowBox[{"z", "\[Rule]", "1"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "1"}], ",",
RowBox[{"y", "\[Rule]",
RowBox[{"-", "1"}]}], ",",
RowBox[{"z", "\[Rule]", "0"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "1"}], ",",
RowBox[{"y", "\[Rule]", "0"}], ",",
RowBox[{"z", "\[Rule]",
RowBox[{"-", "1"}]}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "1"}], ",",
RowBox[{"y", "\[Rule]", "0"}], ",",
RowBox[{"z", "\[Rule]", "1"}]}], "}"}], ",",
RowBox[{"{",
RowBox[{
RowBox[{"x", "\[Rule]", "1"}], ",",
RowBox[{"y", "\[Rule]", "1"}], ",",
RowBox[{"z", "\[Rule]", "0"}]}], "}"}]}], "}"}]], "Output",
CellChangeTimes->{{3.743235305220907*^9,
3.743235309139554*^9}},ExpressionUUID->"d9825c95-24bb-442a-8734-\
4c0f47e99dfc"]
}, Open ]],
Cell[BoxData[
RowBox[{
RowBox[{"xmlElem", "[", "x_", "]"}], ":=",
RowBox[{"Print", "[",
RowBox[{"\"\<<elem>\>\"", "<>",
RowBox[{"ToString", "[",
RowBox[{"x", "[",
RowBox[{"[", "1", "]"}], "]"}], "]"}], "<>", "\"\< \>\"", "<>",
RowBox[{"ToString", "[",
RowBox[{"x", "[",
RowBox[{"[", "2", "]"}], "]"}], "]"}], "<>", "\"\< \>\"", "<>",
RowBox[{"ToString", "[",
RowBox[{"x", "[",
RowBox[{"[", "3", "]"}], "]"}], "]"}], "<>", "\"\<</elem>\>\""}],
"]"}]}]], "Input",
CellChangeTimes->{{3.74323534002862*^9, 3.743235351000985*^9}, {
3.743235403233039*^9, 3.743235413488028*^9}, {3.743235473169856*^9,
3.7432354747126904`*^9}},ExpressionUUID->"aea76313-c89e-45e8-b429-\
3f454091666d"],
Cell[CellGroupData[{
Cell[BoxData[
RowBox[{
RowBox[{
RowBox[{"xmlElem", "[",
RowBox[{
RowBox[{"{",
RowBox[{"x", ",", "y", ",", "z"}], "}"}], "/.", "#"}], "]"}], "&"}], "/@",
"sol"}]], "Input",
CellChangeTimes->{{3.743235415820318*^9,
3.743235467025091*^9}},ExpressionUUID->"07da3998-8eab-40ba-8c0b-\
ac6b130cb4fb"],
Cell[CellGroupData[{
Cell[BoxData["\<\"<elem>-1 -1 0</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476581676*^9},ExpressionUUID->"c577ba06-b67a-405a-9ff5-\
2bf7dc898d03"],
Cell[BoxData["\<\"<elem>-1 0 -1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476588011*^9},ExpressionUUID->"d041aa36-0cea-457c-9d4b-\
1fe9be66e2ab"],
Cell[BoxData["\<\"<elem>-1 0 1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476596887*^9},ExpressionUUID->"bf141b55-86b2-4430-a994-\
5c03d5a19441"],
Cell[BoxData["\<\"<elem>-1 1 0</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476605785*^9},ExpressionUUID->"4968a660-4ecf-4b66-9071-\
8bd798c18d21"],
Cell[BoxData["\<\"<elem>0 -1 -1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476613523*^9},ExpressionUUID->"4e22d943-2680-416b-a1d7-\
a16ca20b781f"],
Cell[BoxData["\<\"<elem>0 -1 1</elem>\"\>"], "Print",
CellChangeTimes->{
3.7432354766218576`*^9},ExpressionUUID->"6dd38385-08b3-4dd9-932f-\
98a00c6db1b2"],
Cell[BoxData["\<\"<elem>0 1 -1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476629427*^9},ExpressionUUID->"ef3baad3-91d1-4735-9a22-\
53495a624c15"],
Cell[BoxData["\<\"<elem>0 1 1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476638257*^9},ExpressionUUID->"413fbb68-5017-4272-a62a-\
fa234e6daaea"],
Cell[BoxData["\<\"<elem>1 -1 0</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476646203*^9},ExpressionUUID->"3a832a60-ae00-414b-a9ac-\
f5e86e67e917"],
Cell[BoxData["\<\"<elem>1 0 -1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476653907*^9},ExpressionUUID->"bfc79ef6-f6c7-4f1e-88e8-\
005ac314be9c"],
Cell[BoxData["\<\"<elem>1 0 1</elem>\"\>"], "Print",
CellChangeTimes->{
3.743235476662575*^9},ExpressionUUID->"0f892891-f885-489c-9925-\
ddef4d698410"],
Cell[BoxData["\<\"<elem>1 1 0</elem>\"\>"], "Print",
CellChangeTimes->{
3.7432354766702337`*^9},ExpressionUUID->"2906f190-e673-4f33-9c34-\
e8e56efe7a27"]
}, Open ]],
Cell[BoxData[
RowBox[{"{",
RowBox[{
"Null", ",", "Null", ",", "Null", ",", "Null", ",", "Null", ",", "Null",
",", "Null", ",", "Null", ",", "Null", ",", "Null", ",", "Null", ",",
"Null"}], "}"}]], "Output",
CellChangeTimes->{
3.7432354246225967`*^9, {3.7432354674878073`*^9,
3.743235476678007*^9}},ExpressionUUID->"500ca3c1-88d8-46e5-a1a1-\
86a7878e5638"]
}, Open ]],
Cell[CellGroupData[{
Cell["Clifford algebra generation", "Section",
CellChangeTimes->{{3.6942089434583883`*^9,
3.694208978559093*^9}},ExpressionUUID->"a5b064b3-3011-4922-8559-\
@ -820,10 +1048,9 @@ generated by the Mathematica notebook gamma-gen/gamma-gen.nb\n\n#include \
"\"\< static const std::array<const char *, nGamma> \
name;\n static const std::array<std::array<Algebra, nGamma>, nGamma> mul;\n\
static const std::array<Algebra, nGamma> adj;\n \
static const std::array<const Gamma, 4> gmu;\n static \
const std::array<const Gamma, 16> gall;\n Algebra \
g;\n public:\n \
Gamma(Algebra initg): g(initg) {} \n};\n\n\>\""}]}], ";",
static const std::array<const Gamma, 4> gmu;\n \
Algebra g;\n public:\n \
Gamma(Algebra initg): g(initg) {} \n};\n\n\>\""}]}], ";",
"\[IndentingNewLine]",
RowBox[{"out", " ", "=",
RowBox[{"out", "<>", "funcCode"}]}], ";", "\[IndentingNewLine]",
@ -849,8 +1076,7 @@ Gamma(Algebra initg): g(initg) {} \n};\n\n\>\""}]}], ";",
3.694963343265525*^9}, {3.694964367519239*^9, 3.69496439461199*^9}, {
3.694964462130747*^9, 3.6949644669959793`*^9}, 3.694964509762739*^9, {
3.694964705045744*^9, 3.694964723148797*^9}, {3.694964992988984*^9,
3.6949649968504257`*^9}, {3.758291687176977*^9,
3.758291694181189*^9}},ExpressionUUID->"c7103bd6-b539-4495-b98c-\
3.6949649968504257`*^9}},ExpressionUUID->"c7103bd6-b539-4495-b98c-\
d4d12ac6cad8"],
Cell["Gamma enum generation:", "Text",
@ -1519,17 +1745,8 @@ namespace QCD {\>\""}]}], ";", "\[IndentingNewLine]",
"\"\<\n\nconst std::array<const Gamma, 4> Gamma::gmu = {{\n \
Gamma(Gamma::Algebra::GammaX),\n Gamma(Gamma::Algebra::GammaY),\n \
Gamma(Gamma::Algebra::GammaZ),\n Gamma(Gamma::Algebra::GammaT)}};\n\nconst \
std::array<const Gamma, 16> Gamma::gall = {{\n \
Gamma(Gamma::Algebra::Identity),\n Gamma(Gamma::Algebra::Gamma5),\n \
Gamma(Gamma::Algebra::GammaX),\n Gamma(Gamma::Algebra::GammaY),\n \
Gamma(Gamma::Algebra::GammaZ),\n Gamma(Gamma::Algebra::GammaT),\n \
Gamma(Gamma::Algebra::GammaXGamma5),\n Gamma(Gamma::Algebra::GammaYGamma5),\n\
Gamma(Gamma::Algebra::GammaZGamma5),\n \
Gamma(Gamma::Algebra::GammaTGamma5),\n Gamma(Gamma::Algebra::SigmaXT), \
\n Gamma(Gamma::Algebra::SigmaXY), \n Gamma(Gamma::Algebra::SigmaXZ), \
\n Gamma(Gamma::Algebra::SigmaYT),\n Gamma(Gamma::Algebra::SigmaYZ),\n \
Gamma(Gamma::Algebra::SigmaZT)}};\n\nconst std::array<const char *, \
Gamma::nGamma> Gamma::name = {{\n\>\""}]}], ";", "\[IndentingNewLine]",
std::array<const char *, Gamma::nGamma> Gamma::name = {{\n\>\""}]}], ";",
"\[IndentingNewLine]",
RowBox[{"Do", "[", "\[IndentingNewLine]",
RowBox[{
RowBox[{"out", " ", "=", " ",
@ -1630,9 +1847,7 @@ Gamma::nGamma> Gamma::mul = {{\\n\>\""}]}], ";", "\[IndentingNewLine]",
3.694963031525289*^9}, {3.694963065828494*^9, 3.694963098327538*^9}, {
3.6949632020836153`*^9, 3.6949632715940027`*^9}, {3.694963440035037*^9,
3.6949634418966017`*^9}, {3.6949651447067547`*^9, 3.694965161228381*^9}, {
3.694967957845581*^9, 3.694967958364184*^9}, {3.758291673792514*^9,
3.758291676983432*^9}},ExpressionUUID->"b1b309f8-a3a7-4081-a781-\
c3845e3cd372"],
3.694967957845581*^9, 3.694967958364184*^9}}],
Cell[BoxData[
RowBox[{
@ -1652,8 +1867,8 @@ Cell[BoxData[""], "Input",
},
WindowSize->{1246, 1005},
WindowMargins->{{282, Automatic}, {Automatic, 14}},
FrontEndVersion->"11.3 for Mac OS X x86 (32-bit, 64-bit Kernel) (March 5, \
2018)",
FrontEndVersion->"11.2 for Mac OS X x86 (32-bit, 64-bit Kernel) (September \
10, 2017)",
StyleDefinitions->"Default.nb"
]
(* End of Notebook Content *)
@ -1673,48 +1888,75 @@ Cell[1948, 43, 570, 11, 73, "Input",ExpressionUUID->"5c937a3e-adfd-4d7e-8fde-afb
Cell[2521, 56, 1172, 17, 34, "Output",ExpressionUUID->"72817ba6-2f6a-4a4d-8212-6f0970f49e7c"]
}, Open ]],
Cell[CellGroupData[{
Cell[3730, 78, 174, 3, 67, "Section",ExpressionUUID->"a5b064b3-3011-4922-8559-ead857cad102"],
Cell[3907, 83, 535, 16, 52, "Input",ExpressionUUID->"aa28f02b-31e1-4df2-9b5d-482177464b59"],
Cell[4445, 101, 250, 4, 35, "Text",ExpressionUUID->"c8896b88-f1db-4ce4-b7a6-0c9838bdb8f1"],
Cell[4698, 107, 5511, 169, 425, "Input",ExpressionUUID->"52a96ff6-047e-4043-86d0-e303866e5f8e"],
Cell[CellGroupData[{
Cell[10234, 280, 2183, 58, 135, "Input",ExpressionUUID->"8b0f4955-2c3f-418c-9226-9be8f87621e8"],
Cell[12420, 340, 1027, 27, 56, "Output",ExpressionUUID->"edd0619f-6f12-4070-a1d2-6b547877fadc"]
Cell[3730, 78, 248, 5, 30, "Input",ExpressionUUID->"d1a0fd03-85e1-43af-ba80-3ca4235675d8"],
Cell[3981, 85, 299, 9, 34, "Output",ExpressionUUID->"16d3f953-4b24-4ed2-ae62-306dcab66ca7"]
}, Open ]],
Cell[CellGroupData[{
Cell[13484, 372, 1543, 46, 114, "Input",ExpressionUUID->"fb45123c-c610-4075-99b0-7cd71c728ae7"],
Cell[15030, 420, 1311, 32, 87, "Output",ExpressionUUID->"2ae14565-b412-4dc0-9dce-bd6c1ba5ef27"]
Cell[4317, 99, 469, 14, 33, "Input",ExpressionUUID->"f0fa2a5c-3d81-4d75-a447-50c7ca3459ff"],
Cell[4789, 115, 2423, 77, 56, "Output",ExpressionUUID->"d9825c95-24bb-442a-8734-4c0f47e99dfc"]
}, Open ]],
Cell[16356, 455, 179, 3, 35, "Text",ExpressionUUID->"af247231-a58d-417b-987a-26908dafffdb"],
Cell[16538, 460, 2175, 65, 94, "Input",ExpressionUUID->"7c44cadd-e488-4f51-87d8-c64eef11f40c"],
Cell[18716, 527, 193, 3, 35, "Text",ExpressionUUID->"856f1746-1107-4509-a5ce-ac9c7f56cdb1"],
Cell[7227, 195, 751, 18, 30, "Input",ExpressionUUID->"aea76313-c89e-45e8-b429-3f454091666d"],
Cell[CellGroupData[{
Cell[18934, 534, 536, 16, 30, "Input",ExpressionUUID->"8674484a-8543-434f-b177-3b27f9353212"],
Cell[19473, 552, 1705, 35, 87, "Output",ExpressionUUID->"c3b3f84d-91f6-41af-af6b-a394ca020511"]
Cell[8003, 217, 323, 10, 30, "Input",ExpressionUUID->"07da3998-8eab-40ba-8c0b-ac6b130cb4fb"],
Cell[CellGroupData[{
Cell[8351, 231, 156, 3, 24, "Print",ExpressionUUID->"c577ba06-b67a-405a-9ff5-2bf7dc898d03"],
Cell[8510, 236, 156, 3, 24, "Print",ExpressionUUID->"d041aa36-0cea-457c-9d4b-1fe9be66e2ab"],
Cell[8669, 241, 155, 3, 24, "Print",ExpressionUUID->"bf141b55-86b2-4430-a994-5c03d5a19441"],
Cell[8827, 246, 155, 3, 24, "Print",ExpressionUUID->"4968a660-4ecf-4b66-9071-8bd798c18d21"],
Cell[8985, 251, 156, 3, 24, "Print",ExpressionUUID->"4e22d943-2680-416b-a1d7-a16ca20b781f"],
Cell[9144, 256, 157, 3, 24, "Print",ExpressionUUID->"6dd38385-08b3-4dd9-932f-98a00c6db1b2"],
Cell[9304, 261, 155, 3, 24, "Print",ExpressionUUID->"ef3baad3-91d1-4735-9a22-53495a624c15"],
Cell[9462, 266, 154, 3, 24, "Print",ExpressionUUID->"413fbb68-5017-4272-a62a-fa234e6daaea"],
Cell[9619, 271, 155, 3, 24, "Print",ExpressionUUID->"3a832a60-ae00-414b-a9ac-f5e86e67e917"],
Cell[9777, 276, 155, 3, 24, "Print",ExpressionUUID->"bfc79ef6-f6c7-4f1e-88e8-005ac314be9c"],
Cell[9935, 281, 154, 3, 24, "Print",ExpressionUUID->"0f892891-f885-489c-9925-ddef4d698410"],
Cell[10092, 286, 156, 3, 24, "Print",ExpressionUUID->"2906f190-e673-4f33-9c34-e8e56efe7a27"]
}, Open ]],
Cell[10263, 292, 376, 9, 34, "Output",ExpressionUUID->"500ca3c1-88d8-46e5-a1a1-86a7878e5638"]
}, Open ]],
Cell[21193, 590, 170, 3, 35, "Text",ExpressionUUID->"518a3040-54b1-4d43-8947-5c7d12efa94d"],
Cell[CellGroupData[{
Cell[21388, 597, 536, 14, 30, "Input",ExpressionUUID->"61a2e974-2b39-4a07-8043-2dfd39a70569"],
Cell[21927, 613, 6754, 167, 303, "Output",ExpressionUUID->"73480ac0-3043-4077-80cc-b952a94c822a"]
Cell[10676, 306, 174, 3, 67, "Section",ExpressionUUID->"a5b064b3-3011-4922-8559-ead857cad102"],
Cell[10853, 311, 535, 16, 52, "Input",ExpressionUUID->"aa28f02b-31e1-4df2-9b5d-482177464b59"],
Cell[11391, 329, 250, 4, 35, "Text",ExpressionUUID->"c8896b88-f1db-4ce4-b7a6-0c9838bdb8f1"],
Cell[11644, 335, 5511, 169, 425, "Input",ExpressionUUID->"52a96ff6-047e-4043-86d0-e303866e5f8e"],
Cell[CellGroupData[{
Cell[17180, 508, 2183, 58, 135, "Input",ExpressionUUID->"8b0f4955-2c3f-418c-9226-9be8f87621e8"],
Cell[19366, 568, 1027, 27, 67, "Output",ExpressionUUID->"edd0619f-6f12-4070-a1d2-6b547877fadc"]
}, Open ]],
Cell[CellGroupData[{
Cell[20430, 600, 1543, 46, 114, "Input",ExpressionUUID->"fb45123c-c610-4075-99b0-7cd71c728ae7"],
Cell[21976, 648, 1311, 32, 98, "Output",ExpressionUUID->"2ae14565-b412-4dc0-9dce-bd6c1ba5ef27"]
}, Open ]],
Cell[23302, 683, 179, 3, 35, "Text",ExpressionUUID->"af247231-a58d-417b-987a-26908dafffdb"],
Cell[23484, 688, 2175, 65, 94, "Input",ExpressionUUID->"7c44cadd-e488-4f51-87d8-c64eef11f40c"],
Cell[25662, 755, 193, 3, 35, "Text",ExpressionUUID->"856f1746-1107-4509-a5ce-ac9c7f56cdb1"],
Cell[CellGroupData[{
Cell[25880, 762, 536, 16, 30, "Input",ExpressionUUID->"8674484a-8543-434f-b177-3b27f9353212"],
Cell[26419, 780, 1705, 35, 87, "Output",ExpressionUUID->"c3b3f84d-91f6-41af-af6b-a394ca020511"]
}, Open ]],
Cell[28139, 818, 170, 3, 35, "Text",ExpressionUUID->"518a3040-54b1-4d43-8947-5c7d12efa94d"],
Cell[CellGroupData[{
Cell[28334, 825, 536, 14, 30, "Input",ExpressionUUID->"61a2e974-2b39-4a07-8043-2dfd39a70569"],
Cell[28873, 841, 6754, 167, 303, "Output",ExpressionUUID->"73480ac0-3043-4077-80cc-b952a94c822a"]
}, Open ]]
}, Open ]],
Cell[CellGroupData[{
Cell[28730, 786, 226, 4, 67, "Section",ExpressionUUID->"4e833cd6-9f0e-4aa3-a873-3d579e874720"],
Cell[28959, 792, 188, 4, 44, "Text",ExpressionUUID->"6d27fc04-3a60-4e03-8df7-3dd3aeee35b4"],
Cell[29150, 798, 3104, 55, 724, "Input",ExpressionUUID->"c7103bd6-b539-4495-b98c-d4d12ac6cad8"],
Cell[32257, 855, 221, 4, 44, "Text",ExpressionUUID->"0625593d-290f-4a39-9d80-8e2c6fdbc94e"],
Cell[32481, 861, 4936, 150, 682, "Input",ExpressionUUID->"1ad4904c-352f-4b1d-a7c7-91e1b0549409"],
Cell[37420, 1013, 2645, 56, 199, "Input",ExpressionUUID->"0221674f-9b63-4662-91bc-ccc8c6ae9589"],
Cell[40068, 1071, 209, 4, 44, "Text",ExpressionUUID->"d2d2257a-487b-416f-bc40-abd4482225f7"],
Cell[40280, 1077, 15306, 397, 2131, "Input",ExpressionUUID->"daea68a9-c9e8-46ab-9bc8-5186e2cf477c"],
Cell[55589, 1476, 137, 2, 44, "Text",ExpressionUUID->"76ba9d5a-7ee3-4888-be7e-6377003275e8"],
Cell[55729, 1480, 521, 12, 30, "Input",ExpressionUUID->"4ec61f4c-3fd3-49ea-b5ef-6f7f04a16b34"]
Cell[35676, 1014, 226, 4, 67, "Section",ExpressionUUID->"4e833cd6-9f0e-4aa3-a873-3d579e874720"],
Cell[35905, 1020, 188, 4, 44, "Text",ExpressionUUID->"6d27fc04-3a60-4e03-8df7-3dd3aeee35b4"],
Cell[36096, 1026, 2980, 53, 703, "Input",ExpressionUUID->"c7103bd6-b539-4495-b98c-d4d12ac6cad8"],
Cell[39079, 1081, 221, 4, 44, "Text",ExpressionUUID->"0625593d-290f-4a39-9d80-8e2c6fdbc94e"],
Cell[39303, 1087, 4936, 150, 682, "Input",ExpressionUUID->"1ad4904c-352f-4b1d-a7c7-91e1b0549409"],
Cell[44242, 1239, 2645, 56, 199, "Input",ExpressionUUID->"0221674f-9b63-4662-91bc-ccc8c6ae9589"],
Cell[46890, 1297, 209, 4, 44, "Text",ExpressionUUID->"d2d2257a-487b-416f-bc40-abd4482225f7"],
Cell[47102, 1303, 15306, 397, 2131, "Input",ExpressionUUID->"daea68a9-c9e8-46ab-9bc8-5186e2cf477c"],
Cell[62411, 1702, 137, 2, 44, "Text",ExpressionUUID->"76ba9d5a-7ee3-4888-be7e-6377003275e8"],
Cell[62551, 1706, 521, 12, 30, "Input",ExpressionUUID->"4ec61f4c-3fd3-49ea-b5ef-6f7f04a16b34"]
}, Open ]],
Cell[CellGroupData[{
Cell[56287, 1497, 167, 2, 67, "Section",ExpressionUUID->"a4458b3a-09b5-4e36-a1fc-781d6702b2dc"],
Cell[56457, 1501, 6464, 133, 1207, "Input",ExpressionUUID->"b1b309f8-a3a7-4081-a781-c3845e3cd372"],
Cell[62924, 1636, 448, 10, 30, "Input",ExpressionUUID->"cba42949-b0f2-42ce-aebd-ffadfd83ef88"],
Cell[63375, 1648, 94, 1, 30, "Input",ExpressionUUID->"6175b72c-af9f-43c2-b4ca-bd84c48a456d"]
Cell[63109, 1723, 167, 2, 67, "Section",ExpressionUUID->"a4458b3a-09b5-4e36-a1fc-781d6702b2dc"],
Cell[63279, 1727, 5693, 122, 829, "Input",ExpressionUUID->"b1b309f8-a3a7-4081-a781-c3845e3cd372"],
Cell[68975, 1851, 448, 10, 30, "Input",ExpressionUUID->"cba42949-b0f2-42ce-aebd-ffadfd83ef88"],
Cell[69426, 1863, 94, 1, 30, "Input",ExpressionUUID->"6175b72c-af9f-43c2-b4ca-bd84c48a456d"]
}, Open ]]
}
]

View File

@ -986,18 +986,17 @@ void A2Autils<FImpl>::ContractWWVV(std::vector<PropagatorField> &WWVV,
for(int t=0;t<N_t;t++){
for(int s=0;s<N_s;s++){
auto tmp1 = vs[s]._odata[ss];
vobj tmp2 = zero;
vobj tmp3 = zero;
vobj tmp2 = zero;
for(int d=d_o;d<MIN(d_o+d_unroll,N_d);d++){
Scalar_v coeff = WW_sd(t,s,d);
tmp3 = conjugate(vd[d]._odata[ss]);
mac(&tmp2, &coeff, &tmp3);
}
mac(&tmp2 ,& coeff, & vd[d]._odata[ss]);
}
//////////////////////////
// Fast outer product of tmp1 with a sum of terms suppressed by d_unroll
//////////////////////////
tmp2 = conjugate(tmp2);
for(int s1=0;s1<Ns;s1++){
for(int s2=0;s2<Ns;s2++){
WWVV[t]._odata[ss]()(s1,s2)(0,0) += tmp1()(s1)(0)*tmp2()(s2)(0);

View File

@ -1,87 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/scalar/CovariantLaplacian.h
Copyright (C) 2016
Author: Azusa Yamaguchi
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
#pragma once
namespace Grid {
namespace QCD {
template <class Gimpl> class CovariantSmearing : public Gimpl
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
typedef typename Gimpl::GaugeLinkField GaugeMat;
typedef typename Gimpl::GaugeField GaugeLorentz;
template<typename T>
static void GaussianSmear(const std::vector<LatticeColourMatrix>& U,
T& chi,
const Real& width, int Iterations, int orthog)
{
GridBase *grid = chi._grid;
T psi(grid);
////////////////////////////////////////////////////////////////////////////////////
// Follow Chroma conventions for width to keep compatibility with previous data
// Free field iterates
// chi = (1 - w^2/4N p^2)^N chi
//
// ~ (e^(-w^2/4N p^2)^N chi
// ~ (e^(-w^2/4 p^2) chi
// ~ (e^(-w'^2/2 p^2) chi [ w' = w/sqrt(2) ]
//
// Which in coordinate space is proportional to
//
// e^(-x^2/w^2) = e^(-x^2/2w'^2)
//
// The 4 is a bit unconventional from Gaussian width perspective, but... it's Chroma convention.
// 2nd derivative approx d^2/dx^2 = x+mu + x-mu - 2x
//
// d^2/dx^2 = - p^2
//
// chi = ( 1 + w^2/4N d^2/dx^2 )^N chi
//
////////////////////////////////////////////////////////////////////////////////////
Real coeff = (width*width) / Real(4*Iterations);
int dims = Nd;
if( orthog < Nd ) dims=Nd-1;
for(int n = 0; n < Iterations; ++n) {
psi = (-2.0*dims)*chi;
for(int mu=0;mu<Nd;mu++) {
if ( mu != orthog ) {
psi = psi + Gimpl::CovShiftForward(U[mu],mu,chi);
psi = psi + Gimpl::CovShiftBackward(U[mu],mu,chi);
}
}
chi = chi + coeff*psi;
}
}
};
}}

View File

@ -31,7 +31,6 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
namespace Grid {
namespace QCD {
template <class Gimpl>
class FourierAcceleratedGaugeFixer : public Gimpl {
public:
@ -46,22 +45,13 @@ class FourierAcceleratedGaugeFixer : public Gimpl {
A[mu] = Ta(U[mu]) * cmi;
}
}
static void DmuAmu(const std::vector<GaugeMat> &A,GaugeMat &dmuAmu,int orthog) {
static void DmuAmu(const std::vector<GaugeMat> &A,GaugeMat &dmuAmu) {
dmuAmu=zero;
for(int mu=0;mu<Nd;mu++){
if ( mu != orthog ) {
dmuAmu = dmuAmu + A[mu] - Cshift(A[mu],mu,-1);
}
dmuAmu = dmuAmu + A[mu] - Cshift(A[mu],mu,-1);
}
}
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1) {
GridBase *grid = Umu._grid;
GaugeMat xform(grid);
SteepestDescentGaugeFix(Umu,xform,alpha,maxiter,Omega_tol,Phi_tol,Fourier,orthog);
}
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,GaugeMat &xform,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1) {
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false) {
GridBase *grid = Umu._grid;
Real org_plaq =WilsonLoops<Gimpl>::avgPlaquette(Umu);
@ -69,35 +59,16 @@ class FourierAcceleratedGaugeFixer : public Gimpl {
Real old_trace = org_link_trace;
Real trG;
xform=1.0;
std::vector<GaugeMat> U(Nd,grid);
GaugeMat dmuAmu(grid);
GaugeMat dmuAmu(grid);
{
Real plaq =WilsonLoops<Gimpl>::avgPlaquette(Umu);
Real link_trace=WilsonLoops<Gimpl>::linkTrace(Umu);
if( (orthog>=0) && (orthog<Nd) ){
std::cout << GridLogMessage << " Gauge fixing to Coulomb gauge time="<<orthog<< " plaq= "<<plaq<<" link trace = "<<link_trace<< std::endl;
} else {
std::cout << GridLogMessage << " Gauge fixing to Landau gauge plaq= "<<plaq<<" link trace = "<<link_trace<< std::endl;
}
}
for(int i=0;i<maxiter;i++){
for(int mu=0;mu<Nd;mu++) U[mu]= PeekIndex<LorentzIndex>(Umu,mu);
if ( Fourier==false ) {
trG = SteepestDescentStep(U,xform,alpha,dmuAmu,orthog);
trG = SteepestDescentStep(U,alpha,dmuAmu);
} else {
trG = FourierAccelSteepestDescentStep(U,xform,alpha,dmuAmu,orthog);
trG = FourierAccelSteepestDescentStep(U,alpha,dmuAmu);
}
// std::cout << GridLogMessage << "trG "<< trG<< std::endl;
// std::cout << GridLogMessage << "xform "<< norm2(xform)<< std::endl;
// std::cout << GridLogMessage << "dmuAmu "<< norm2(dmuAmu)<< std::endl;
for(int mu=0;mu<Nd;mu++) PokeIndex<LorentzIndex>(Umu,U[mu],mu);
// Monitor progress and convergence test
// infrequently to minimise cost overhead
@ -113,6 +84,7 @@ class FourierAcceleratedGaugeFixer : public Gimpl {
Real Phi = 1.0 - old_trace / link_trace ;
Real Omega= 1.0 - trG;
std::cout << GridLogMessage << " Iteration "<<i<< " Phi= "<<Phi<< " Omega= " << Omega<< " trG " << trG <<std::endl;
if ( (Omega < Omega_tol) && ( ::fabs(Phi) < Phi_tol) ) {
std::cout << GridLogMessage << "Converged ! "<<std::endl;
@ -124,26 +96,25 @@ class FourierAcceleratedGaugeFixer : public Gimpl {
}
}
};
static Real SteepestDescentStep(std::vector<GaugeMat> &U,GaugeMat &xform,Real & alpha, GaugeMat & dmuAmu,int orthog) {
static Real SteepestDescentStep(std::vector<GaugeMat> &U,Real & alpha, GaugeMat & dmuAmu) {
GridBase *grid = U[0]._grid;
std::vector<GaugeMat> A(Nd,grid);
GaugeMat g(grid);
GaugeLinkToLieAlgebraField(U,A);
ExpiAlphaDmuAmu(A,g,alpha,dmuAmu,orthog);
ExpiAlphaDmuAmu(A,g,alpha,dmuAmu);
Real vol = grid->gSites();
Real trG = TensorRemove(sum(trace(g))).real()/vol/Nc;
xform = g*xform ;
SU<Nc>::GaugeTransform(U,g);
return trG;
}
static Real FourierAccelSteepestDescentStep(std::vector<GaugeMat> &U,GaugeMat &xform,Real & alpha, GaugeMat & dmuAmu,int orthog) {
static Real FourierAccelSteepestDescentStep(std::vector<GaugeMat> &U,Real & alpha, GaugeMat & dmuAmu) {
GridBase *grid = U[0]._grid;
@ -162,41 +133,38 @@ class FourierAcceleratedGaugeFixer : public Gimpl {
GaugeLinkToLieAlgebraField(U,A);
DmuAmu(A,dmuAmu,orthog);
DmuAmu(A,dmuAmu);
std::vector<int> mask(Nd,1);
for(int mu=0;mu<Nd;mu++) if (mu==orthog) mask[mu]=0;
theFFT.FFT_dim_mask(dmuAmu_p,dmuAmu,mask,FFT::forward);
theFFT.FFT_all_dim(dmuAmu_p,dmuAmu,FFT::forward);
//////////////////////////////////
// Work out Fp = psq_max/ psq...
// Avoid singularities in Fp
//////////////////////////////////
std::vector<int> latt_size = grid->GlobalDimensions();
std::vector<int> coor(grid->_ndimension,0);
for(int mu=0;mu<Nd;mu++) {
if ( mu != orthog ) {
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(pmu,mu);
pmu = TwoPiL * pmu ;
psq = psq + 4.0*sin(pmu*0.5)*sin(pmu*0.5);
}
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(pmu,mu);
pmu = TwoPiL * pmu ;
psq = psq + 4.0*sin(pmu*0.5)*sin(pmu*0.5);
}
Complex psqMax(16.0);
Fp = psqMax*one/psq;
pokeSite(TComplex(16.0),Fp,coor);
if( (orthog>=0) && (orthog<Nd) ){
for(int t=0;t<grid->GlobalDimensions()[orthog];t++){
coor[orthog]=t;
pokeSite(TComplex(16.0),Fp,coor);
}
}
/*
static int once;
if ( once == 0 ) {
std::cout << " Fp " << Fp <<std::endl;
once ++;
}*/
pokeSite(TComplex(1.0),Fp,coor);
dmuAmu_p = dmuAmu_p * Fp;
theFFT.FFT_dim_mask(dmuAmu,dmuAmu_p,mask,FFT::backward);
theFFT.FFT_all_dim(dmuAmu,dmuAmu_p,FFT::backward);
GaugeMat ciadmam(grid);
Complex cialpha(0.0,-alpha);
@ -205,17 +173,16 @@ class FourierAcceleratedGaugeFixer : public Gimpl {
Real trG = TensorRemove(sum(trace(g))).real()/vol/Nc;
xform = g*xform ;
SU<Nc>::GaugeTransform(U,g);
return trG;
}
static void ExpiAlphaDmuAmu(const std::vector<GaugeMat> &A,GaugeMat &g,Real & alpha, GaugeMat &dmuAmu,int orthog) {
static void ExpiAlphaDmuAmu(const std::vector<GaugeMat> &A,GaugeMat &g,Real & alpha, GaugeMat &dmuAmu) {
GridBase *grid = g._grid;
Complex cialpha(0.0,-alpha);
GaugeMat ciadmam(grid);
DmuAmu(A,dmuAmu,orthog);
DmuAmu(A,dmuAmu);
ciadmam = dmuAmu*cialpha;
SU<Nc>::taExp(ciadmam,g);
}

View File

@ -676,18 +676,10 @@ class SU {
}
}
/*
* Fundamental rep gauge xform
*/
template<typename Fundamental,typename GaugeMat>
static void GaugeTransformFundamental( Fundamental &ferm, GaugeMat &g){
GridBase *grid = ferm._grid;
conformable(grid,g._grid);
ferm = g*ferm;
}
/*
* Adjoint rep gauge xform
*/
template<typename GaugeField,typename GaugeMat>
add GaugeTrans
*/
template<typename GaugeField,typename GaugeMat>
static void GaugeTransform( GaugeField &Umu, GaugeMat &g){
GridBase *grid = Umu._grid;
conformable(grid,g._grid);

View File

@ -33,72 +33,8 @@ Author: Guido Cossu <guido.cossu@ed.ac.uk>
#include <type_traits>
#include <Grid/tensors/Tensors.h>
#include <Grid/serialisation/VectorUtils.h>
#include <Grid/Eigen/unsupported/CXX11/Tensor>
namespace Grid {
namespace EigenIO {
// EigenIO works for scalars that are not just Grid supported scalars
template<typename T, typename V = void> struct is_complex : public std::false_type {};
// Support all complex types (not just Grid complex types) - even if the definitions overlap (!)
template<typename T> struct is_complex< T , typename
std::enable_if< ::Grid::is_complex< T >::value>::type> : public std::true_type {};
template<typename T> struct is_complex<std::complex<T>, typename
std::enable_if<!::Grid::is_complex<std::complex<T>>::value>::type> : public std::true_type {};
// Helpers to support I/O for Eigen tensors of arithmetic scalars, complex types, or Grid tensors
template<typename T, typename V = void> struct is_scalar : public std::false_type {};
template<typename T> struct is_scalar<T, typename std::enable_if<std::is_arithmetic<T>::value || is_complex<T>::value>::type> : public std::true_type {};
// Is this an Eigen tensor
template<typename T> struct is_tensor : std::integral_constant<bool,
std::is_base_of<Eigen::TensorBase<T, Eigen::ReadOnlyAccessors>, T>::value> {};
// Is this an Eigen tensor of a supported scalar
template<typename T, typename V = void> struct is_tensor_of_scalar : public std::false_type {};
template<typename T> struct is_tensor_of_scalar<T, typename std::enable_if<is_tensor<T>::value && is_scalar<typename T::Scalar>::value>::type> : public std::true_type {};
// Is this an Eigen tensor of a supported container
template<typename T, typename V = void> struct is_tensor_of_container : public std::false_type {};
template<typename T> struct is_tensor_of_container<T, typename std::enable_if<is_tensor<T>::value && isGridTensor<typename T::Scalar>::value>::type> : public std::true_type {};
// These traits describe the scalars inside Eigen tensors
// I wish I could define these in reference to the scalar type (so there would be fewer traits defined)
// but I'm unable to find a syntax to make this work
template<typename T, typename V = void> struct Traits {};
// Traits are the default for scalars, or come from GridTypeMapper for GridTensors
template<typename T> struct Traits<T, typename std::enable_if<is_tensor_of_scalar<T>::value>::type>
: public GridTypeMapper_Base {
using scalar_type = typename T::Scalar; // ultimate base scalar
static constexpr bool is_complex = ::Grid::EigenIO::is_complex<scalar_type>::value;
};
// Traits are the default for scalars, or come from GridTypeMapper for GridTensors
template<typename T> struct Traits<T, typename std::enable_if<is_tensor_of_container<T>::value>::type> {
using BaseTraits = GridTypeMapper<typename T::Scalar>;
using scalar_type = typename BaseTraits::scalar_type; // ultimate base scalar
static constexpr bool is_complex = ::Grid::EigenIO::is_complex<scalar_type>::value;
static constexpr int TensorLevel = BaseTraits::TensorLevel;
static constexpr int Rank = BaseTraits::Rank;
static constexpr std::size_t count = BaseTraits::count;
static constexpr int Dimension(int dim) { return BaseTraits::Dimension(dim); }
};
// Is this a fixed-size Eigen tensor
template<typename T> struct is_tensor_fixed : public std::false_type {};
template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
struct is_tensor_fixed<Eigen::TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType>>
: public std::true_type {};
template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType,
int MapOptions_, template <class> class MapPointer_>
struct is_tensor_fixed<Eigen::TensorMap<Eigen::TensorFixedSize<Scalar_, Dimensions_,
Options_, IndexType>, MapOptions_, MapPointer_>>
: public std::true_type {};
// Is this a variable-size Eigen tensor
template<typename T, typename V = void> struct is_tensor_variable : public std::false_type {};
template<typename T> struct is_tensor_variable<T, typename std::enable_if<is_tensor<T>::value
&& !is_tensor_fixed<T>::value>::type> : public std::true_type {};
}
// Abstract writer/reader classes ////////////////////////////////////////////
// static polymorphism implemented using CRTP idiom
class Serializable;
@ -113,10 +49,10 @@ namespace Grid {
void push(const std::string &s);
void pop(void);
template <typename U>
typename std::enable_if<std::is_base_of<Serializable, U>::value>::type
typename std::enable_if<std::is_base_of<Serializable, U>::value, void>::type
write(const std::string& s, const U &output);
template <typename U>
typename std::enable_if<!std::is_base_of<Serializable, U>::value && !EigenIO::is_tensor<U>::value>::type
typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
write(const std::string& s, const U &output);
template <typename U>
void write(const std::string &s, const iScalar<U> &output);
@ -124,42 +60,6 @@ namespace Grid {
void write(const std::string &s, const iVector<U, N> &output);
template <typename U, int N>
void write(const std::string &s, const iMatrix<U, N> &output);
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor<ETensor>::value>::type
write(const std::string &s, const ETensor &output);
// Helper functions for Scalar vs Container specialisations
template <typename ETensor>
inline typename std::enable_if<EigenIO::is_tensor_of_scalar<ETensor>::value,
const typename ETensor::Scalar *>::type
getFirstScalar(const ETensor &output)
{
return output.data();
}
template <typename ETensor>
inline typename std::enable_if<EigenIO::is_tensor_of_container<ETensor>::value,
const typename EigenIO::Traits<ETensor>::scalar_type *>::type
getFirstScalar(const ETensor &output)
{
return output.data()->begin();
}
template <typename S>
inline typename std::enable_if<EigenIO::is_scalar<S>::value, void>::type
copyScalars(S * &pCopy, const S &Source)
{
* pCopy ++ = Source;
}
template <typename S>
inline typename std::enable_if<isGridTensor<S>::value, void>::type
copyScalars(typename GridTypeMapper<S>::scalar_type * &pCopy, const S &Source)
{
for( const typename GridTypeMapper<S>::scalar_type &item : Source )
* pCopy ++ = item;
}
void scientificFormat(const bool set);
bool isScientific(void);
void setPrecision(const unsigned int prec);
@ -183,8 +83,7 @@ namespace Grid {
typename std::enable_if<std::is_base_of<Serializable, U>::value, void>::type
read(const std::string& s, U &output);
template <typename U>
typename std::enable_if<!std::is_base_of<Serializable, U>::value
&& !EigenIO::is_tensor<U>::value, void>::type
typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
read(const std::string& s, U &output);
template <typename U>
void read(const std::string &s, iScalar<U> &output);
@ -192,32 +91,6 @@ namespace Grid {
void read(const std::string &s, iVector<U, N> &output);
template <typename U, int N>
void read(const std::string &s, iMatrix<U, N> &output);
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
read(const std::string &s, ETensor &output);
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor_fixed<ETensor>::value, void>::type
Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims );
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor_variable<ETensor>::value, void>::type
Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims );
// Helper functions for Scalar vs Container specialisations
template <typename S>
inline typename std::enable_if<EigenIO::is_scalar<S>::value, void>::type
copyScalars(S &Dest, const S * &pSource)
{
Dest = * pSource ++;
}
template <typename S>
inline typename std::enable_if<isGridTensor<S>::value, void>::type
copyScalars(S &Dest, const typename GridTypeMapper<S>::scalar_type * &pSource)
{
for( typename GridTypeMapper<S>::scalar_type &item : Dest )
item = * pSource ++;
}
protected:
template <typename U>
void fromString(U &output, const std::string &s);
@ -262,14 +135,12 @@ namespace Grid {
template <typename T>
template <typename U>
typename std::enable_if<!std::is_base_of<Serializable, U>::value
&& !EigenIO::is_tensor<U>::value, void>::type
typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
Writer<T>::write(const std::string &s, const U &output)
{
upcast->writeDefault(s, output);
}
template <typename T>
template <typename U>
void Writer<T>::write(const std::string &s, const iScalar<U> &output)
@ -291,57 +162,6 @@ namespace Grid {
upcast->writeDefault(s, tensorToVec(output));
}
// Eigen::Tensors of Grid tensors (iScalar, iVector, iMatrix)
template <typename T>
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
Writer<T>::write(const std::string &s, const ETensor &output)
{
using Index = typename ETensor::Index;
using Container = typename ETensor::Scalar; // NB: could be same as scalar
using Traits = EigenIO::Traits<ETensor>;
using Scalar = typename Traits::scalar_type; // type of the underlying scalar
constexpr unsigned int TensorRank{ETensor::NumIndices};
constexpr unsigned int ContainerRank{Traits::Rank}; // Only non-zero for containers
constexpr unsigned int TotalRank{TensorRank + ContainerRank};
const Index NumElements{output.size()};
assert( NumElements > 0 );
// Get the dimensionality of the tensor
std::vector<std::size_t> TotalDims(TotalRank);
for(auto i = 0; i < TensorRank; i++ ) {
auto dim = output.dimension(i);
TotalDims[i] = static_cast<size_t>(dim);
assert( TotalDims[i] == dim ); // check we didn't lose anything in the conversion
}
for(auto i = 0; i < ContainerRank; i++ )
TotalDims[TensorRank + i] = Traits::Dimension(i);
// If the Tensor isn't in Row-Major order, then we'll need to copy it's data
const bool CopyData{NumElements > 1 && ETensor::Layout != Eigen::StorageOptions::RowMajor};
const Scalar * pWriteBuffer;
std::vector<Scalar> CopyBuffer;
const Index TotalNumElements = NumElements * Traits::count;
if( !CopyData ) {
pWriteBuffer = getFirstScalar( output );
} else {
// Regardless of the Eigen::Tensor storage order, the copy will be Row Major
CopyBuffer.resize( TotalNumElements );
Scalar * pCopy = &CopyBuffer[0];
pWriteBuffer = pCopy;
std::array<Index, TensorRank> MyIndex;
for( auto &idx : MyIndex ) idx = 0;
for( auto n = 0; n < NumElements; n++ ) {
const Container & c = output( MyIndex );
copyScalars( pCopy, c );
// Now increment the index
for( int i = output.NumDimensions - 1; i >= 0 && ++MyIndex[i] == output.dimension(i); i-- )
MyIndex[i] = 0;
}
}
upcast->template writeMultiDim<Scalar>(s, TotalDims, pWriteBuffer, TotalNumElements);
}
template <typename T>
void Writer<T>::scientificFormat(const bool set)
{
@ -395,8 +215,7 @@ namespace Grid {
template <typename T>
template <typename U>
typename std::enable_if<!std::is_base_of<Serializable, U>::value
&& !EigenIO::is_tensor<U>::value, void>::type
typename std::enable_if<!std::is_base_of<Serializable, U>::value, void>::type
Reader<T>::read(const std::string &s, U &output)
{
upcast->readDefault(s, output);
@ -432,79 +251,6 @@ namespace Grid {
vecToTensor(output, v);
}
template <typename T>
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor<ETensor>::value, void>::type
Reader<T>::read(const std::string &s, ETensor &output)
{
using Index = typename ETensor::Index;
using Container = typename ETensor::Scalar; // NB: could be same as scalar
using Traits = EigenIO::Traits<ETensor>;
using Scalar = typename Traits::scalar_type; // type of the underlying scalar
constexpr unsigned int TensorRank{ETensor::NumIndices};
constexpr unsigned int ContainerRank{Traits::Rank}; // Only non-zero for containers
constexpr unsigned int TotalRank{TensorRank + ContainerRank};
using ETDims = std::array<Index, TensorRank>; // Dimensions of the tensor
// read the (flat) data and dimensionality
std::vector<std::size_t> dimData;
std::vector<Scalar> buf;
upcast->readMultiDim( s, buf, dimData );
assert(dimData.size() == TotalRank && "EigenIO: Tensor rank mismatch" );
// Make sure that the number of elements read matches dimensions read
std::size_t NumContainers = 1;
for( auto i = 0 ; i < TensorRank ; i++ )
NumContainers *= dimData[i];
// If our scalar object is a Container, make sure it's dimensions match what we read back
std::size_t ElementsPerContainer = 1;
for( auto i = 0 ; i < ContainerRank ; i++ ) {
assert( dimData[TensorRank+i] == Traits::Dimension(i) && "Tensor Container dimensions don't match data" );
ElementsPerContainer *= dimData[TensorRank+i];
}
assert( NumContainers * ElementsPerContainer == buf.size() && "EigenIO: Number of elements != product of dimensions" );
// Now see whether the tensor is the right shape, or can be made to be
const auto & dims = output.dimensions();
bool bShapeOK = (output.data() != nullptr);
for( auto i = 0; bShapeOK && i < TensorRank ; i++ )
if( dims[i] != dimData[i] )
bShapeOK = false;
// Make the tensor the same size as the data read
ETDims MyIndex;
if( !bShapeOK ) {
for( auto i = 0 ; i < TensorRank ; i++ )
MyIndex[i] = dimData[i];
Reshape(output, MyIndex);
}
// Copy the data into the tensor
for( auto &d : MyIndex ) d = 0;
const Scalar * pSource = &buf[0];
for( std::size_t n = 0 ; n < NumContainers ; n++ ) {
Container & c = output( MyIndex );
copyScalars( c, pSource );
// Now increment the index
for( int i = TensorRank - 1; i != -1 && ++MyIndex[i] == dims[i]; i-- )
MyIndex[i] = 0;
}
assert( pSource == &buf[NumContainers * ElementsPerContainer] );
}
template <typename T>
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor_fixed<ETensor>::value, void>::type
Reader<T>::Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims )
{
assert( 0 && "EigenIO: Fixed tensor dimensions can't be changed" );
}
template <typename T>
template <typename ETensor>
typename std::enable_if<EigenIO::is_tensor_variable<ETensor>::value, void>::type
Reader<T>::Reshape(ETensor &t, const std::array<typename ETensor::Index, ETensor::NumDimensions> &dims )
{
//t.reshape( dims );
t.resize( dims );
}
template <typename T>
template <typename U>
void Reader<T>::fromString(U &output, const std::string &s)
@ -543,68 +289,6 @@ namespace Grid {
{
return os;
}
template <typename T1, typename T2>
static inline typename std::enable_if<!EigenIO::is_tensor<T1>::value || !EigenIO::is_tensor<T2>::value, bool>::type
CompareMember(const T1 &lhs, const T2 &rhs) {
return lhs == rhs;
}
template <typename T1, typename T2>
static inline typename std::enable_if<EigenIO::is_tensor<T1>::value && EigenIO::is_tensor<T2>::value, bool>::type
CompareMember(const T1 &lhs, const T2 &rhs) {
// First check whether dimensions match (Eigen tensor library will assert if they don't match)
bool bReturnValue = (T1::NumIndices == T2::NumIndices);
for( auto i = 0 ; bReturnValue && i < T1::NumIndices ; i++ )
bReturnValue = ( lhs.dimension(i) == rhs.dimension(i) );
if( bReturnValue ) {
Eigen::Tensor<bool, 0, T1::Options> bResult = (lhs == rhs).all();
bReturnValue = bResult(0);
}
return bReturnValue;
}
template <typename T>
static inline typename std::enable_if<EigenIO::is_tensor<T>::value, bool>::type
CompareMember(const std::vector<T> &lhs, const std::vector<T> &rhs) {
const auto NumElements = lhs.size();
bool bResult = ( NumElements == rhs.size() );
for( auto i = 0 ; i < NumElements && bResult ; i++ )
bResult = CompareMember(lhs[i], rhs[i]);
return bResult;
}
template <typename T>
static inline typename std::enable_if<!EigenIO::is_tensor<T>::value, void>::type
WriteMember(std::ostream &os, const T &object) {
os << object;
}
template <typename T>
static inline typename std::enable_if<EigenIO::is_tensor<T>::value, void>::type
WriteMember(std::ostream &os, const T &object) {
using Index = typename T::Index;
const Index NumElements{object.size()};
assert( NumElements > 0 );
Index count = 1;
os << "T<";
for( int i = 0; i < T::NumIndices; i++ ) {
Index dim = object.dimension(i);
count *= dim;
if( i )
os << ",";
os << dim;
}
assert( count == NumElements && "Number of elements doesn't match tensor dimensions" );
os << ">{";
const typename T::Scalar * p = object.data();
for( Index i = 0; i < count; i++ ) {
if( i )
os << ",";
os << *p++;
}
os << "}";
}
};
// Generic writer interface //////////////////////////////////////////////////

View File

@ -51,8 +51,6 @@ namespace Grid {
template <typename U>
void writeDefault(const std::string &s, const std::vector<U> &x);
void writeDefault(const std::string &s, const char *x);
template <typename U>
void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
private:
std::ofstream file_;
};
@ -68,8 +66,6 @@ namespace Grid {
void readDefault(const std::string &s, U &output);
template <typename U>
void readDefault(const std::string &s, std::vector<U> &output);
template <typename U>
void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
private:
std::ifstream file_;
};
@ -96,27 +92,6 @@ namespace Grid {
}
}
template <typename U>
void BinaryWriter::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
{
uint64_t rank = static_cast<uint64_t>( Dimensions.size() );
uint64_t tmp = 1;
for( auto i = 0 ; i < rank ; i++ )
tmp *= Dimensions[i];
assert( tmp == NumElements && "Dimensions don't match size of data being written" );
// Total number of elements
write("", tmp);
// Number of dimensions
write("", rank);
// Followed by each dimension
for( auto i = 0 ; i < rank ; i++ ) {
tmp = Dimensions[i];
write("", tmp);
}
for( auto i = 0; i < NumElements; ++i)
write("", pDataRowMajor[i]);
}
// Reader template implementation ////////////////////////////////////////////
template <typename U>
void BinaryReader::readDefault(const std::string &s, U &output)
@ -139,30 +114,6 @@ namespace Grid {
read("", output[i]);
}
}
template <typename U>
void BinaryReader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
{
// Number of elements
uint64_t NumElements;
read("", NumElements);
// Number of dimensions
uint64_t rank;
read("", rank);
// Followed by each dimension
uint64_t count = 1;
dim.resize(rank);
uint64_t tmp;
for( auto i = 0 ; i < rank ; i++ ) {
read("", tmp);
dim[i] = tmp;
count *= tmp;
}
assert( count == NumElements && "Dimensions don't match size of data being read" );
buf.resize(count);
for( auto i = 0; i < count; ++i)
read("", buf[i]);
}
}
#endif

View File

@ -3,7 +3,6 @@
#include <stack>
#include <string>
#include <list>
#include <vector>
#include <H5Cpp.h>
#include <Grid/tensors/Tensors.h>
@ -39,8 +38,6 @@ namespace Grid
template <typename U>
typename std::enable_if<!element<std::vector<U>>::is_number, void>::type
writeDefault(const std::string &s, const std::vector<U> &x);
template <typename U>
void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
H5NS::Group & getGroup(void);
private:
template <typename U>
@ -51,7 +48,7 @@ namespace Grid
std::vector<std::string> path_;
H5NS::H5File file_;
H5NS::Group group_;
const unsigned int dataSetThres_{HDF5_DEF_DATASET_THRES};
unsigned int dataSetThres_{HDF5_DEF_DATASET_THRES};
};
class Hdf5Reader: public Reader<Hdf5Reader>
@ -69,8 +66,6 @@ namespace Grid
template <typename U>
typename std::enable_if<!element<std::vector<U>>::is_number, void>::type
readDefault(const std::string &s, std::vector<U> &x);
template <typename U>
void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
H5NS::Group & getGroup(void);
private:
template <typename U>
@ -106,75 +101,6 @@ namespace Grid
template <>
void Hdf5Writer::writeDefault(const std::string &s, const std::string &x);
template <typename U>
void Hdf5Writer::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
{
// Hdf5 needs the dimensions as hsize_t
const int rank = static_cast<int>(Dimensions.size());
std::vector<hsize_t> dim(rank);
for(int i = 0; i < rank; i++)
dim[i] = Dimensions[i];
// write the entire dataset to file
H5NS::DataSpace dataSpace(rank, dim.data());
if (NumElements > dataSetThres_)
{
// Make sure 1) each dimension; and 2) chunk size is < 4GB
const hsize_t MaxElements = ( sizeof( U ) == 1 ) ? 0xffffffff : 0x100000000 / sizeof( U );
hsize_t ElementsPerChunk = 1;
bool bTooBig = false;
for( int i = rank - 1 ; i != -1 ; i-- ) {
auto &d = dim[i];
if( bTooBig )
d = 1; // Chunk size is already as big as can be - remaining dimensions = 1
else {
// If individual dimension too big, reduce by prime factors if possible
while( d > MaxElements && ( d & 1 ) == 0 )
d >>= 1;
const char ErrorMsg[] = " dimension > 4GB and not divisible by 2^n. "
"Hdf5IO chunk size will be inefficient. NB Serialisation is not intended for large datasets - please consider alternatives.";
if( d > MaxElements ) {
std::cout << GridLogWarning << "Individual" << ErrorMsg << std::endl;
hsize_t quotient = d / MaxElements;
if( d % MaxElements )
quotient++;
d /= quotient;
}
// Now make sure overall size is not too big
hsize_t OverflowCheck = ElementsPerChunk;
ElementsPerChunk *= d;
assert( OverflowCheck == ElementsPerChunk / d && "Product of dimensions overflowed hsize_t" );
// If product of dimensions too big, reduce by prime factors
while( ElementsPerChunk > MaxElements && ( ElementsPerChunk & 1 ) == 0 ) {
bTooBig = true;
d >>= 1;
ElementsPerChunk >>= 1;
}
if( ElementsPerChunk > MaxElements ) {
std::cout << GridLogWarning << "Product of" << ErrorMsg << std::endl;
hsize_t quotient = ElementsPerChunk / MaxElements;
if( ElementsPerChunk % MaxElements )
quotient++;
d /= quotient;
ElementsPerChunk /= quotient;
}
}
}
H5NS::DataSet dataSet;
H5NS::DSetCreatPropList plist;
plist.setChunk(rank, dim.data());
plist.setFletcher32();
dataSet = group_.createDataSet(s, Hdf5Type<U>::type(), dataSpace, plist);
dataSet.write(pDataRowMajor, Hdf5Type<U>::type());
}
else
{
H5NS::Attribute attribute;
attribute = group_.createAttribute(s, Hdf5Type<U>::type(), dataSpace);
attribute.write(Hdf5Type<U>::type(), pDataRowMajor);
}
}
template <typename U>
typename std::enable_if<element<std::vector<U>>::is_number, void>::type
Hdf5Writer::writeDefault(const std::string &s, const std::vector<U> &x)
@ -184,11 +110,34 @@ namespace Grid
// flatten the vector and getting dimensions
Flatten<std::vector<U>> flat(x);
std::vector<size_t> dim;
std::vector<hsize_t> dim;
const auto &flatx = flat.getFlatVector();
for (auto &d: flat.getDim())
{
dim.push_back(d);
writeMultiDim<Element>(s, dim, &flatx[0], flatx.size());
}
// write to file
H5NS::DataSpace dataSpace(dim.size(), dim.data());
if (flatx.size() > dataSetThres_)
{
H5NS::DataSet dataSet;
H5NS::DSetCreatPropList plist;
plist.setChunk(dim.size(), dim.data());
plist.setFletcher32();
dataSet = group_.createDataSet(s, Hdf5Type<Element>::type(), dataSpace, plist);
dataSet.write(flatx.data(), Hdf5Type<Element>::type());
}
else
{
H5NS::Attribute attribute;
attribute = group_.createAttribute(s, Hdf5Type<Element>::type(), dataSpace);
attribute.write(Hdf5Type<Element>::type(), flatx.data());
}
}
template <typename U>
@ -226,7 +175,8 @@ namespace Grid
void Hdf5Reader::readDefault(const std::string &s, std::string &x);
template <typename U>
void Hdf5Reader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
typename std::enable_if<element<std::vector<U>>::is_number, void>::type
Hdf5Reader::readDefault(const std::string &s, std::vector<U> &x)
{
// alias to element type
typedef typename element<std::vector<U>>::type Element;
@ -234,6 +184,7 @@ namespace Grid
// read the dimensions
H5NS::DataSpace dataSpace;
std::vector<hsize_t> hdim;
std::vector<size_t> dim;
hsize_t size = 1;
if (group_.attrExists(s))
@ -253,7 +204,7 @@ namespace Grid
}
// read the flat vector
buf.resize(size);
std::vector<Element> buf(size);
if (size > dataSetThres_)
{
@ -269,18 +220,6 @@ namespace Grid
attribute = group_.openAttribute(s);
attribute.read(Hdf5Type<Element>::type(), buf.data());
}
}
template <typename U>
typename std::enable_if<element<std::vector<U>>::is_number, void>::type
Hdf5Reader::readDefault(const std::string &s, std::vector<U> &x)
{
// alias to element type
typedef typename element<std::vector<U>>::type Element;
std::vector<size_t> dim;
std::vector<Element> buf;
readMultiDim( s, buf, dim );
// reconstruct the multidimensional vector
Reconstruct<std::vector<U>> r(buf, dim);

View File

@ -109,8 +109,8 @@ THE SOFTWARE.
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define GRID_MACRO_MEMBER(A,B) A B;
#define GRID_MACRO_COMP_MEMBER(A,B) result = (result and CompareMember(lhs. B, rhs. B));
#define GRID_MACRO_OS_WRITE_MEMBER(A,B) os<< #A <<" " #B << " = "; WriteMember( os, obj. B ); os << " ; " <<std::endl;
#define GRID_MACRO_COMP_MEMBER(A,B) result = (result and (lhs. B == rhs. B));
#define GRID_MACRO_OS_WRITE_MEMBER(A,B) os<< #A <<" " #B << " = " << obj. B << " ; " <<std::endl;
#define GRID_MACRO_READ_MEMBER(A,B) Grid::read(RD,#B,obj. B);
#define GRID_MACRO_WRITE_MEMBER(A,B) Grid::write(WR,#B,obj. B);

View File

@ -51,8 +51,6 @@ namespace Grid
void writeDefault(const std::string &s, const U &x);
template <typename U>
void writeDefault(const std::string &s, const std::vector<U> &x);
template <typename U>
void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
private:
void indent(void);
private:
@ -71,8 +69,6 @@ namespace Grid
void readDefault(const std::string &s, U &output);
template <typename U>
void readDefault(const std::string &s, std::vector<U> &output);
template <typename U>
void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
private:
void checkIndent(void);
private:
@ -100,17 +96,6 @@ namespace Grid
}
}
template <typename U>
void TextWriter::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
{
uint64_t Rank = Dimensions.size();
write(s, Rank);
for( uint64_t d : Dimensions )
write(s, d);
while( NumElements-- )
write(s, *pDataRowMajor++);
}
// Reader template implementation ////////////////////////////////////////////
template <typename U>
void TextReader::readDefault(const std::string &s, U &output)
@ -136,23 +121,6 @@ namespace Grid
read("", output[i]);
}
}
template <typename U>
void TextReader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
{
const char sz[] = "";
uint64_t Rank;
read(sz, Rank);
dim.resize( Rank );
size_t NumElements = 1;
for( auto &d : dim ) {
read(sz, d);
NumElements *= d;
}
buf.resize( NumElements );
for( auto &x : buf )
read(s, x);
}
}
#endif

View File

@ -1,32 +1,3 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./Grid/serialisation/VectorUtils.h
Copyright (C) 2015
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_SERIALISATION_VECTORUTILS_H
#define GRID_SERIALISATION_VECTORUTILS_H
@ -82,17 +53,6 @@ namespace Grid {
return os;
}
// std::vector<std:vector<...>> nested to specified Rank //////////////////////////////////
template<typename T, unsigned int Rank>
struct NestedStdVector {
typedef typename std::vector<typename NestedStdVector<T, Rank - 1>::type> type;
};
template<typename T>
struct NestedStdVector<T,0> {
typedef T type;
};
// Grid scalar tensors to nested std::vectors //////////////////////////////////
template <typename T>
struct TensorToVec

View File

@ -57,8 +57,6 @@ namespace Grid
void writeDefault(const std::string &s, const U &x);
template <typename U>
void writeDefault(const std::string &s, const std::vector<U> &x);
template <typename U>
void writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements);
std::string docString(void);
std::string string(void);
private:
@ -81,8 +79,6 @@ namespace Grid
void readDefault(const std::string &s, U &output);
template <typename U>
void readDefault(const std::string &s, std::vector<U> &output);
template <typename U>
void readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim);
void readCurrentSubtree(std::string &s);
private:
void checkParse(const pugi::xml_parse_result &result, const std::string name);
@ -126,41 +122,9 @@ namespace Grid
void XmlWriter::writeDefault(const std::string &s, const std::vector<U> &x)
{
push(s);
for( auto &u : x )
for (auto &x_i: x)
{
write("elem", u);
}
pop();
}
template <typename U>
void XmlWriter::writeMultiDim(const std::string &s, const std::vector<size_t> & Dimensions, const U * pDataRowMajor, size_t NumElements)
{
push(s);
size_t count = 1;
const int Rank = static_cast<int>( Dimensions.size() );
write("rank", Rank );
std::vector<size_t> MyIndex( Rank );
for( auto d : Dimensions ) {
write("dim", d);
count *= d;
}
assert( count == NumElements && "XmlIO : element count doesn't match dimensions" );
static const char sName[] = "tensor";
for( int i = 0 ; i < Rank ; i++ ) {
MyIndex[i] = 0;
push(sName);
}
while (NumElements--) {
write("elem", *pDataRowMajor++);
int i;
for( i = Rank - 1 ; i != -1 && ++MyIndex[i] == Dimensions[i] ; i-- )
MyIndex[i] = 0;
int Rollover = Rank - 1 - i;
for( i = 0 ; i < Rollover ; i++ )
pop();
for( i = 0 ; NumElements && i < Rollover ; i++ )
push(sName);
write("elem", x_i);
}
pop();
}
@ -181,66 +145,25 @@ namespace Grid
template <typename U>
void XmlReader::readDefault(const std::string &s, std::vector<U> &output)
{
std::string buf;
unsigned int i = 0;
if (!push(s))
{
std::cout << GridLogWarning << "XML: cannot open node '" << s << "'";
std::cout << std::endl;
} else {
for(unsigned int i = 0; node_.child("elem"); )
{
output.resize(i + 1);
read("elem", output[i++]);
node_.child("elem").set_name("elem-done");
}
pop();
}
}
template <typename U>
void XmlReader::readMultiDim(const std::string &s, std::vector<U> &buf, std::vector<size_t> &dim)
{
if (!push(s))
return;
}
while (node_.child("elem"))
{
std::cout << GridLogWarning << "XML: cannot open node '" << s << "'";
std::cout << std::endl;
} else {
static const char sName[] = "tensor";
static const char sNameDone[] = "tensor-done";
int Rank;
read("rank", Rank);
dim.resize( Rank );
size_t NumElements = 1;
for( auto &d : dim )
{
read("dim", d);
node_.child("dim").set_name("dim-done");
NumElements *= d;
}
buf.resize( NumElements );
std::vector<size_t> MyIndex( Rank );
for( int i = 0 ; i < Rank ; i++ ) {
MyIndex[i] = 0;
push(sName);
}
for( auto &x : buf )
{
NumElements--;
read("elem", x);
node_.child("elem").set_name("elem-done");
int i;
for( i = Rank - 1 ; i != -1 && ++MyIndex[i] == dim[i] ; i-- )
MyIndex[i] = 0;
int Rollover = Rank - 1 - i;
for( i = 0 ; i < Rollover ; i++ ) {
node_.set_name(sNameDone);
pop();
}
for( i = 0 ; NumElements && i < Rollover ; i++ )
push(sName);
}
pop();
output.resize(i + 1);
read("elem", output[i]);
node_.child("elem").set_name("elem-done");
i++;
}
pop();
}
}
#endif

View File

@ -485,6 +485,83 @@ namespace Optimization {
// Some Template specialization
// Hack for CLANG until mm512_reduce_add_ps etc... are implemented in GCC and Clang releases
#ifndef __INTEL_COMPILER
#warning "Slow reduction due to incomplete reduce intrinsics"
//Complex float Reduce
template<>
inline Grid::ComplexF Reduce<Grid::ComplexF, __m512>::operator()(__m512 in){
__m512 v1,v2;
v1=Optimization::Permute::Permute0(in); // avx 512; quad complex single
v1= _mm512_add_ps(v1,in);
v2=Optimization::Permute::Permute1(v1);
v1 = _mm512_add_ps(v1,v2);
v2=Optimization::Permute::Permute2(v1);
v1 = _mm512_add_ps(v1,v2);
u512f conv; conv.v = v1;
return Grid::ComplexF(conv.f[0],conv.f[1]);
}
//Real float Reduce
template<>
inline Grid::RealF Reduce<Grid::RealF, __m512>::operator()(__m512 in){
__m512 v1,v2;
v1 = Optimization::Permute::Permute0(in); // avx 512; octo-double
v1 = _mm512_add_ps(v1,in);
v2 = Optimization::Permute::Permute1(v1);
v1 = _mm512_add_ps(v1,v2);
v2 = Optimization::Permute::Permute2(v1);
v1 = _mm512_add_ps(v1,v2);
v2 = Optimization::Permute::Permute3(v1);
v1 = _mm512_add_ps(v1,v2);
u512f conv; conv.v=v1;
return conv.f[0];
}
//Complex double Reduce
template<>
inline Grid::ComplexD Reduce<Grid::ComplexD, __m512d>::operator()(__m512d in){
__m512d v1;
v1 = Optimization::Permute::Permute0(in); // sse 128; paired complex single
v1 = _mm512_add_pd(v1,in);
v1 = Optimization::Permute::Permute1(in); // sse 128; paired complex single
v1 = _mm512_add_pd(v1,in);
u512d conv; conv.v = v1;
return Grid::ComplexD(conv.f[0],conv.f[1]);
}
//Real double Reduce
template<>
inline Grid::RealD Reduce<Grid::RealD, __m512d>::operator()(__m512d in){
__m512d v1,v2;
v1 = Optimization::Permute::Permute0(in); // avx 512; quad double
v1 = _mm512_add_pd(v1,in);
v2 = Optimization::Permute::Permute1(v1);
v1 = _mm512_add_pd(v1,v2);
v2 = Optimization::Permute::Permute2(v1);
v1 = _mm512_add_pd(v1,v2);
u512d conv; conv.v = v1;
return conv.f[0];
}
//Integer Reduce
template<>
inline Integer Reduce<Integer, __m512i>::operator()(__m512i in){
// No full vector reduce, use AVX to add upper and lower halves of register
// and perform AVX reduction.
__m256i v1, v2, v3;
__m128i u1, u2, ret;
v1 = _mm512_castsi512_si256(in); // upper half
v2 = _mm512_extracti32x8_epi32(in, 1); // lower half
v3 = _mm256_add_epi32(v1, v2);
v1 = _mm256_hadd_epi32(v3, v3);
v2 = _mm256_hadd_epi32(v1, v1);
u1 = _mm256_castsi256_si128(v2); // upper half
u2 = _mm256_extracti128_si256(v2, 1); // lower half
ret = _mm_add_epi32(u1, u2);
return _mm_cvtsi128_si32(ret);
}
#else
//Complex float Reduce
template<>
inline Grid::ComplexF Reduce<Grid::ComplexF, __m512>::operator()(__m512 in){
@ -513,6 +590,8 @@ namespace Optimization {
inline Integer Reduce<Integer, __m512i>::operator()(__m512i in){
return _mm512_reduce_add_epi32(in);
}
#endif
}

View File

@ -10,7 +10,6 @@ Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Guido Cossu <cossu@iroiro-pc.kek.jp>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
Author: Michael Marshall <michael.marshall@ed.ac.au>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -90,25 +89,17 @@ template <typename Condition, typename ReturnType> using NotEnableIf = Invoke<st
////////////////////////////////////////////////////////
// Check for complexity with type traits
template <typename T> struct is_complex : public std::false_type {};
template <> struct is_complex<ComplexD> : public std::true_type {};
template <> struct is_complex<ComplexF> : public std::true_type {};
template <> struct is_complex<std::complex<double> > : public std::true_type {};
template <> struct is_complex<std::complex<float> > : public std::true_type {};
template<typename T, typename V=void> struct is_real : public std::false_type {};
template<typename T> struct is_real<T, typename std::enable_if<std::is_floating_point<T>::value,
void>::type> : public std::true_type {};
template<typename T, typename V=void> struct is_integer : public std::false_type {};
template<typename T> struct is_integer<T, typename std::enable_if<std::is_integral<T>::value,
void>::type> : public std::true_type {};
template <typename T> using IfReal = Invoke<std::enable_if<is_real<T>::value, int> >;
template <typename T> using IfReal = Invoke<std::enable_if<std::is_floating_point<T>::value, int> >;
template <typename T> using IfComplex = Invoke<std::enable_if<is_complex<T>::value, int> >;
template <typename T> using IfInteger = Invoke<std::enable_if<is_integer<T>::value, int> >;
template <typename T> using IfInteger = Invoke<std::enable_if<std::is_integral<T>::value, int> >;
template <typename T1,typename T2> using IfSame = Invoke<std::enable_if<std::is_same<T1,T2>::value, int> >;
template <typename T> using IfNotReal = Invoke<std::enable_if<!is_real<T>::value, int> >;
template <typename T> using IfNotReal = Invoke<std::enable_if<!std::is_floating_point<T>::value, int> >;
template <typename T> using IfNotComplex = Invoke<std::enable_if<!is_complex<T>::value, int> >;
template <typename T> using IfNotInteger = Invoke<std::enable_if<!is_integer<T>::value, int> >;
template <typename T> using IfNotInteger = Invoke<std::enable_if<!std::is_integral<T>::value, int> >;
template <typename T1,typename T2> using IfNotSame = Invoke<std::enable_if<!std::is_same<T1,T2>::value, int> >;
////////////////////////////////////////////////////////
@ -866,10 +857,8 @@ template <typename T>
struct is_simd : public std::false_type {};
template <> struct is_simd<vRealF> : public std::true_type {};
template <> struct is_simd<vRealD> : public std::true_type {};
template <> struct is_simd<vRealH> : public std::true_type {};
template <> struct is_simd<vComplexF> : public std::true_type {};
template <> struct is_simd<vComplexD> : public std::true_type {};
template <> struct is_simd<vComplexH> : public std::true_type {};
template <> struct is_simd<vInteger> : public std::true_type {};
template <typename T> using IfSimd = Invoke<std::enable_if<is_simd<T>::value, int> >;

View File

@ -5,7 +5,6 @@ Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Michael Marshall <michael.marshall@ed.ac.au>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -43,26 +42,27 @@ namespace Grid {
//
class GridTensorBase {};
// Too late to remove these traits from Grid Tensors, so inherit from GridTypeMapper
#define GridVector_CopyTraits \
using element = vtype; \
using scalar_type = typename Traits::scalar_type; \
using vector_type = typename Traits::vector_type; \
using vector_typeD = typename Traits::vector_typeD; \
using tensor_reduced = typename Traits::tensor_reduced; \
using scalar_object = typename Traits::scalar_object; \
using Complexified = typename Traits::Complexified; \
using Realified = typename Traits::Realified; \
using DoublePrecision = typename Traits::DoublePrecision; \
static constexpr int TensorLevel = Traits::TensorLevel
template <class vtype>
class iScalar {
public:
vtype _internal;
using Traits = GridTypeMapper<iScalar<vtype> >;
GridVector_CopyTraits;
typedef vtype element;
typedef typename GridTypeMapper<vtype>::scalar_type scalar_type;
typedef typename GridTypeMapper<vtype>::vector_type vector_type;
typedef typename GridTypeMapper<vtype>::vector_typeD vector_typeD;
typedef typename GridTypeMapper<vtype>::tensor_reduced tensor_reduced_v;
typedef typename GridTypeMapper<vtype>::scalar_object recurse_scalar_object;
typedef iScalar<tensor_reduced_v> tensor_reduced;
typedef iScalar<recurse_scalar_object> scalar_object;
// substitutes a real or complex version with same tensor structure
typedef iScalar<typename GridTypeMapper<vtype>::Complexified> Complexified;
typedef iScalar<typename GridTypeMapper<vtype>::Realified> Realified;
// get double precision version
typedef iScalar<typename GridTypeMapper<vtype>::DoublePrecision> DoublePrecision;
enum { TensorLevel = GridTypeMapper<vtype>::TensorLevel + 1 };
// Scalar no action
// template<int Level> using tensor_reduce_level = typename
@ -173,10 +173,7 @@ class iScalar {
return stream;
};
strong_inline const scalar_type * begin() const { return reinterpret_cast<const scalar_type *>(&_internal); }
strong_inline scalar_type * begin() { return reinterpret_cast< scalar_type *>(&_internal); }
strong_inline const scalar_type * end() const { return begin() + Traits::count; }
strong_inline scalar_type * end() { return begin() + Traits::count; }
};
///////////////////////////////////////////////////////////
// Allows to turn scalar<scalar<scalar<double>>>> back to double.
@ -197,8 +194,21 @@ class iVector {
public:
vtype _internal[N];
using Traits = GridTypeMapper<iVector<vtype, N> >;
GridVector_CopyTraits;
typedef vtype element;
typedef typename GridTypeMapper<vtype>::scalar_type scalar_type;
typedef typename GridTypeMapper<vtype>::vector_type vector_type;
typedef typename GridTypeMapper<vtype>::vector_typeD vector_typeD;
typedef typename GridTypeMapper<vtype>::tensor_reduced tensor_reduced_v;
typedef typename GridTypeMapper<vtype>::scalar_object recurse_scalar_object;
typedef iScalar<tensor_reduced_v> tensor_reduced;
typedef iVector<recurse_scalar_object, N> scalar_object;
// substitutes a real or complex version with same tensor structure
typedef iVector<typename GridTypeMapper<vtype>::Complexified, N> Complexified;
typedef iVector<typename GridTypeMapper<vtype>::Realified, N> Realified;
// get double precision version
typedef iVector<typename GridTypeMapper<vtype>::DoublePrecision, N> DoublePrecision;
template <class T, typename std::enable_if<!isGridTensor<T>::value, T>::type
* = nullptr>
@ -208,6 +218,7 @@ class iVector {
return *this;
}
enum { TensorLevel = GridTypeMapper<vtype>::TensorLevel + 1 };
iVector(const Zero &z) { *this = zero; };
iVector() = default;
/*
@ -292,11 +303,6 @@ class iVector {
// strong_inline vtype && operator ()(int i) {
// return _internal[i];
// }
strong_inline const scalar_type * begin() const { return reinterpret_cast<const scalar_type *>(_internal); }
strong_inline scalar_type * begin() { return reinterpret_cast< scalar_type *>(_internal); }
strong_inline const scalar_type * end() const { return begin() + Traits::count; }
strong_inline scalar_type * end() { return begin() + Traits::count; }
};
template <class vtype, int N>
@ -304,8 +310,25 @@ class iMatrix {
public:
vtype _internal[N][N];
using Traits = GridTypeMapper<iMatrix<vtype, N> >;
GridVector_CopyTraits;
typedef vtype element;
typedef typename GridTypeMapper<vtype>::scalar_type scalar_type;
typedef typename GridTypeMapper<vtype>::vector_type vector_type;
typedef typename GridTypeMapper<vtype>::vector_typeD vector_typeD;
typedef typename GridTypeMapper<vtype>::tensor_reduced tensor_reduced_v;
typedef typename GridTypeMapper<vtype>::scalar_object recurse_scalar_object;
// substitutes a real or complex version with same tensor structure
typedef iMatrix<typename GridTypeMapper<vtype>::Complexified, N> Complexified;
typedef iMatrix<typename GridTypeMapper<vtype>::Realified, N> Realified;
// get double precision version
typedef iMatrix<typename GridTypeMapper<vtype>::DoublePrecision, N> DoublePrecision;
// Tensor removal
typedef iScalar<tensor_reduced_v> tensor_reduced;
typedef iMatrix<recurse_scalar_object, N> scalar_object;
enum { TensorLevel = GridTypeMapper<vtype>::TensorLevel + 1 };
iMatrix(const Zero &z) { *this = zero; };
iMatrix() = default;
@ -435,11 +458,6 @@ class iMatrix {
// strong_inline vtype && operator ()(int i,int j) {
// return _internal[i][j];
// }
strong_inline const scalar_type * begin() const { return reinterpret_cast<const scalar_type *>(_internal[0]); }
strong_inline scalar_type * begin() { return reinterpret_cast< scalar_type *>(_internal[0]); }
strong_inline const scalar_type * end() const { return begin() + Traits::count; }
strong_inline scalar_type * end() { return begin() + Traits::count; }
};
template <class v>
@ -462,3 +480,6 @@ void vprefetch(const iMatrix<v, N> &vv) {
}
}
#endif

View File

@ -5,7 +5,6 @@
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Christopher Kelly <ckelly@phys.columbia.edu>
Author: Michael Marshall <michael.marshall@ed.ac.au>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@ -27,17 +26,6 @@ Author: Michael Marshall <michael.marshall@ed.ac.au>
namespace Grid {
// Forward declarations
template<class T> class iScalar;
template<class T, int N> class iVector;
template<class T, int N> class iMatrix;
// These are the Grid tensors
template<typename T> struct isGridTensor : public std::false_type { static constexpr bool notvalue = true; };
template<class T> struct isGridTensor<iScalar<T>> : public std::true_type { static constexpr bool notvalue = false; };
template<class T, int N> struct isGridTensor<iVector<T, N>> : public std::true_type { static constexpr bool notvalue = false; };
template<class T, int N> struct isGridTensor<iMatrix<T, N>> : public std::true_type { static constexpr bool notvalue = false; };
//////////////////////////////////////////////////////////////////////////////////
// Want to recurse: GridTypeMapper<Matrix<vComplexD> >::scalar_type == ComplexD.
// Use of a helper class like this allows us to template specialise and "dress"
@ -53,25 +41,24 @@ namespace Grid {
//
//////////////////////////////////////////////////////////////////////////////////
// This saves repeating common properties for supported Grid Scalar types
// TensorLevel How many nested grid tensors
// Rank Rank of the grid tensor
// count Total number of elements, i.e. product of dimensions
// Dimension(dim) Size of dimension dim
struct GridTypeMapper_Base {
static constexpr int TensorLevel = 0;
static constexpr int Rank = 0;
static constexpr std::size_t count = 1;
static constexpr int Dimension(int dim) { return 0; }
template <class T> class GridTypeMapper {
public:
typedef typename T::scalar_type scalar_type;
typedef typename T::vector_type vector_type;
typedef typename T::vector_typeD vector_typeD;
typedef typename T::tensor_reduced tensor_reduced;
typedef typename T::scalar_object scalar_object;
typedef typename T::Complexified Complexified;
typedef typename T::Realified Realified;
typedef typename T::DoublePrecision DoublePrecision;
enum { TensorLevel = T::TensorLevel };
};
//////////////////////////////////////////////////////////////////////////////////
// Recursion stops with these template specialisations
//////////////////////////////////////////////////////////////////////////////////
template<typename T> struct GridTypeMapper {};
template<> struct GridTypeMapper<RealF> : public GridTypeMapper_Base {
template<> class GridTypeMapper<RealF> {
public:
typedef RealF scalar_type;
typedef RealF vector_type;
typedef RealD vector_typeD;
@ -80,8 +67,10 @@ namespace Grid {
typedef ComplexF Complexified;
typedef RealF Realified;
typedef RealD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> struct GridTypeMapper<RealD> : public GridTypeMapper_Base {
template<> class GridTypeMapper<RealD> {
public:
typedef RealD scalar_type;
typedef RealD vector_type;
typedef RealD vector_typeD;
@ -90,8 +79,10 @@ namespace Grid {
typedef ComplexD Complexified;
typedef RealD Realified;
typedef RealD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> struct GridTypeMapper<ComplexF> : public GridTypeMapper_Base {
template<> class GridTypeMapper<ComplexF> {
public:
typedef ComplexF scalar_type;
typedef ComplexF vector_type;
typedef ComplexD vector_typeD;
@ -100,8 +91,10 @@ namespace Grid {
typedef ComplexF Complexified;
typedef RealF Realified;
typedef ComplexD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> struct GridTypeMapper<ComplexD> : public GridTypeMapper_Base {
template<> class GridTypeMapper<ComplexD> {
public:
typedef ComplexD scalar_type;
typedef ComplexD vector_type;
typedef ComplexD vector_typeD;
@ -110,8 +103,10 @@ namespace Grid {
typedef ComplexD Complexified;
typedef RealD Realified;
typedef ComplexD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> struct GridTypeMapper<Integer> : public GridTypeMapper_Base {
template<> class GridTypeMapper<Integer> {
public:
typedef Integer scalar_type;
typedef Integer vector_type;
typedef Integer vector_typeD;
@ -120,9 +115,11 @@ namespace Grid {
typedef void Complexified;
typedef void Realified;
typedef void DoublePrecision;
enum { TensorLevel = 0 };
};
template<> struct GridTypeMapper<vRealF> : public GridTypeMapper_Base {
template<> class GridTypeMapper<vRealF> {
public:
typedef RealF scalar_type;
typedef vRealF vector_type;
typedef vRealD vector_typeD;
@ -131,8 +128,10 @@ namespace Grid {
typedef vComplexF Complexified;
typedef vRealF Realified;
typedef vRealD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> struct GridTypeMapper<vRealD> : public GridTypeMapper_Base {
template<> class GridTypeMapper<vRealD> {
public:
typedef RealD scalar_type;
typedef vRealD vector_type;
typedef vRealD vector_typeD;
@ -141,20 +140,10 @@ namespace Grid {
typedef vComplexD Complexified;
typedef vRealD Realified;
typedef vRealD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> struct GridTypeMapper<vRealH> : public GridTypeMapper_Base {
// Fixme this is incomplete until Grid supports fp16 or bfp16 arithmetic types
typedef RealF scalar_type;
typedef vRealH vector_type;
typedef vRealD vector_typeD;
typedef vRealH tensor_reduced;
typedef RealF scalar_object;
typedef vComplexH Complexified;
typedef vRealH Realified;
typedef vRealD DoublePrecision;
};
template<> struct GridTypeMapper<vComplexH> : public GridTypeMapper_Base {
// Fixme this is incomplete until Grid supports fp16 or bfp16 arithmetic types
template<> class GridTypeMapper<vComplexH> {
public:
typedef ComplexF scalar_type;
typedef vComplexH vector_type;
typedef vComplexD vector_typeD;
@ -163,8 +152,10 @@ namespace Grid {
typedef vComplexH Complexified;
typedef vRealH Realified;
typedef vComplexD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> struct GridTypeMapper<vComplexF> : public GridTypeMapper_Base {
template<> class GridTypeMapper<vComplexF> {
public:
typedef ComplexF scalar_type;
typedef vComplexF vector_type;
typedef vComplexD vector_typeD;
@ -173,8 +164,10 @@ namespace Grid {
typedef vComplexF Complexified;
typedef vRealF Realified;
typedef vComplexD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> struct GridTypeMapper<vComplexD> : public GridTypeMapper_Base {
template<> class GridTypeMapper<vComplexD> {
public:
typedef ComplexD scalar_type;
typedef vComplexD vector_type;
typedef vComplexD vector_typeD;
@ -183,8 +176,10 @@ namespace Grid {
typedef vComplexD Complexified;
typedef vRealD Realified;
typedef vComplexD DoublePrecision;
enum { TensorLevel = 0 };
};
template<> struct GridTypeMapper<vInteger> : public GridTypeMapper_Base {
template<> class GridTypeMapper<vInteger> {
public:
typedef Integer scalar_type;
typedef vInteger vector_type;
typedef vInteger vector_typeD;
@ -193,52 +188,57 @@ namespace Grid {
typedef void Complexified;
typedef void Realified;
typedef void DoublePrecision;
enum { TensorLevel = 0 };
};
#define GridTypeMapper_RepeatedTypes \
using BaseTraits = GridTypeMapper<T>; \
using scalar_type = typename BaseTraits::scalar_type; \
using vector_type = typename BaseTraits::vector_type; \
using vector_typeD = typename BaseTraits::vector_typeD; \
static constexpr int TensorLevel = BaseTraits::TensorLevel + 1
template<typename T> struct GridTypeMapper<iScalar<T>> {
GridTypeMapper_RepeatedTypes;
using tensor_reduced = iScalar<typename BaseTraits::tensor_reduced>;
using scalar_object = iScalar<typename BaseTraits::scalar_object>;
using Complexified = iScalar<typename BaseTraits::Complexified>;
using Realified = iScalar<typename BaseTraits::Realified>;
using DoublePrecision = iScalar<typename BaseTraits::DoublePrecision>;
static constexpr int Rank = BaseTraits::Rank + 1;
static constexpr std::size_t count = BaseTraits::count;
static constexpr int Dimension(int dim) {
return ( dim == 0 ) ? 1 : BaseTraits::Dimension(dim - 1); }
// First some of my own traits
template<typename T> struct isGridTensor {
static const bool value = true;
static const bool notvalue = false;
};
template<typename T, int N> struct GridTypeMapper<iVector<T, N>> {
GridTypeMapper_RepeatedTypes;
using tensor_reduced = iScalar<typename BaseTraits::tensor_reduced>;
using scalar_object = iVector<typename BaseTraits::scalar_object, N>;
using Complexified = iVector<typename BaseTraits::Complexified, N>;
using Realified = iVector<typename BaseTraits::Realified, N>;
using DoublePrecision = iVector<typename BaseTraits::DoublePrecision, N>;
static constexpr int Rank = BaseTraits::Rank + 1;
static constexpr std::size_t count = BaseTraits::count * N;
static constexpr int Dimension(int dim) {
return ( dim == 0 ) ? N : BaseTraits::Dimension(dim - 1); }
template<> struct isGridTensor<int > {
static const bool value = false;
static const bool notvalue = true;
};
template<typename T, int N> struct GridTypeMapper<iMatrix<T, N>> {
GridTypeMapper_RepeatedTypes;
using tensor_reduced = iScalar<typename BaseTraits::tensor_reduced>;
using scalar_object = iMatrix<typename BaseTraits::scalar_object, N>;
using Complexified = iMatrix<typename BaseTraits::Complexified, N>;
using Realified = iMatrix<typename BaseTraits::Realified, N>;
using DoublePrecision = iMatrix<typename BaseTraits::DoublePrecision, N>;
static constexpr int Rank = BaseTraits::Rank + 2;
static constexpr std::size_t count = BaseTraits::count * N * N;
static constexpr int Dimension(int dim) {
return ( dim == 0 || dim == 1 ) ? N : BaseTraits::Dimension(dim - 2); }
template<> struct isGridTensor<RealD > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<RealF > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<ComplexD > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<ComplexF > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<Integer > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<vRealD > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<vRealF > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<vComplexD > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<vComplexF > {
static const bool value = false;
static const bool notvalue = true;
};
template<> struct isGridTensor<vInteger > {
static const bool value = false;
static const bool notvalue = true;
};
// Match the index
@ -263,12 +263,19 @@ namespace Grid {
typedef T type;
};
//Query whether a tensor or Lattice<Tensor> is SIMD vector or scalar
template<typename T, typename V=void> struct isSIMDvectorized : public std::false_type {};
template<typename U> struct isSIMDvectorized<U, typename std::enable_if< !std::is_same<
typename GridTypeMapper<typename getVectorType<U>::type>::scalar_type,
typename GridTypeMapper<typename getVectorType<U>::type>::vector_type>::value, void>::type>
: public std::true_type {};
//Query if a tensor or Lattice<Tensor> is SIMD vector or scalar
template<typename T>
class isSIMDvectorized{
template<typename U>
static typename std::enable_if< !std::is_same< typename GridTypeMapper<typename getVectorType<U>::type>::scalar_type,
typename GridTypeMapper<typename getVectorType<U>::type>::vector_type>::value, char>::type test(void *);
template<typename U>
static double test(...);
public:
enum {value = sizeof(test<T>(0)) == sizeof(char) };
};
//Get the precision of a Lattice, tensor or scalar type in units of sizeof(float)
template<typename T>

View File

@ -289,11 +289,6 @@ void Grid_init(int *argc,char ***argv)
std::cout << "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the"<<std::endl;
std::cout << "GNU General Public License for more details."<<std::endl;
printHash();
#ifdef GRID_BUILD_REF
#define _GRID_BUILD_STR(x) #x
#define GRID_BUILD_STR(x) _GRID_BUILD_STR(x)
std::cout << "Build " << GRID_BUILD_STR(GRID_BUILD_REF) << std::endl;
#endif
std::cout << std::endl;
}

View File

@ -1,6 +0,0 @@
SUBDIRS = .
include Make.inc

View File

@ -1,198 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_hmc_EODWFRatio.cc
Copyright (C) 2015-2016
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
Author: Guido Cossu <guido.cossu@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
int main(int argc, char **argv) {
using namespace Grid;
using namespace Grid::QCD;
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
// Typedefs to simplify notation
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
// MD.name = std::string("Force Gradient");
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
MD.name = std::string("MinimumNorm2");
MD.MDsteps = 20;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 0;
HMCparams.Trajectories = 200;
HMCparams.NoMetropolisUntil= 20;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
HMCparams.StartingType =std::string("ColdStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_EODWF_lat";
CPparams.rng_prefix = "ckpoint_EODWF_rng";
CPparams.saveInterval = 10;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
// Construct observables
// here there is too much indirection
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.04;
Real pv_mass = 1.0;
RealD M5 = 1.8;
RealD b = 1.0; // Scale factor two
RealD c = 0.0;
OneFlavourRationalParams OFRp;
OFRp.lo = 1.0e-2;
OFRp.hi = 64;
OFRp.MaxIter = 10000;
OFRp.tolerance= 1.0e-10;
OFRp.degree = 14;
OFRp.precision= 40;
std::vector<Real> hasenbusch({ 0.1 });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
IwasakiGaugeActionR GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeField U(GridPtr);
// These lines are unecessary if BC are all periodic
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
double StoppingCondition = 1e-10;
double MaxCGIterations = 30000;
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(4);
////////////////////////////////////
// Strange action
////////////////////////////////////
// FermionAction StrangeOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_mass,M5,b,c, Params);
// DomainWallEOFAFermionR Strange_Op_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5);
// DomainWallEOFAFermionR Strange_Op_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5);
// ExactOneFlavourRatioPseudoFermionAction EOFA(Strange_Op_L,Strange_Op_R,CG,ofp, false);
FermionAction StrangeOp (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,strange_mass,M5,b,c, Params);
FermionAction StrangePauliVillarsOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,pv_mass, M5,b,c, Params);
// OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion(StrangePauliVillarsOp,StrangeOp,OFRp);
OneFlavourRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion(StrangePauliVillarsOp,StrangeOp,OFRp);
// TwoFlavourRationalTesterPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion1F(StrangeOp,OFRp);
// TwoFlavourPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion2F(StrangeOp,CG,CG);
// Level1.push_back(&StrangePseudoFermion2F);
// Level1.push_back(&StrangePseudoFermion);
////////////////////////////////////
// up down action
////////////////////////////////////
std::vector<Real> light_den;
std::vector<Real> light_num;
int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass);
for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]);
light_num.push_back(hasenbusch[h]);
}
light_num.push_back(pv_mass);
std::vector<FermionAction *> Numerators;
std::vector<FermionAction *> Denominators;
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
for(int h=0;h<n_hasenbusch+1;h++){
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h] << " / " << light_den[h]<< std::endl;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],CG,CG));
}
for(int h=0;h<n_hasenbusch+1;h++){
Level1.push_back(Quotients[h]);
}
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level2.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
std::cout << GridLogMessage << " Action complete "<< std::endl;
/////////////////////////////////////////////////////////////
// HMC parameters are serialisable
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.Run(); // no smearing
Grid_finalize();
} // main

View File

@ -1,452 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file:
Copyright (C) 2015-2016
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
Author: Guido Cossu
Author: David Murphy
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
#define MIXED_PRECISION
#endif
namespace Grid{
namespace QCD{
/*
* Need a plan for gauge field update for mixed precision in HMC (2x speed up)
* -- Store the single prec action operator.
* -- Clone the gauge field from the operator function argument.
* -- Build the mixed precision operator dynamically from the passed operator and single prec clone.
*/
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
public:
typedef typename FermionOperatorD::FermionField FieldD;
typedef typename FermionOperatorF::FermionField FieldF;
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
Integer MaxOuterIterations;
GridBase* SinglePrecGrid4; //Grid for single-precision fields
GridBase* SinglePrecGrid5; //Grid for single-precision fields
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
FermionOperatorF &FermOpF;
FermionOperatorD &FermOpD;;
SchurOperatorF &LinOpF;
SchurOperatorD &LinOpD;
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
MixedPrecisionConjugateGradientOperatorFunction(RealD tol,
Integer maxinnerit,
Integer maxouterit,
GridBase* _sp_grid4,
GridBase* _sp_grid5,
FermionOperatorF &_FermOpF,
FermionOperatorD &_FermOpD,
SchurOperatorF &_LinOpF,
SchurOperatorD &_LinOpD):
LinOpF(_LinOpF),
LinOpD(_LinOpD),
FermOpF(_FermOpF),
FermOpD(_FermOpD),
Tolerance(tol),
InnerTolerance(tol),
MaxInnerIterations(maxinnerit),
MaxOuterIterations(maxouterit),
SinglePrecGrid4(_sp_grid4),
SinglePrecGrid5(_sp_grid5),
OuterLoopNormMult(100.)
{
/* Debugging instances of objects; references are stored
std::cout << GridLogMessage << " Mixed precision CG wrapper LinOpF " <<std::hex<< &LinOpF<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper LinOpD " <<std::hex<< &LinOpD<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper FermOpF " <<std::hex<< &FermOpF<<std::dec <<std::endl;
std::cout << GridLogMessage << " Mixed precision CG wrapper FermOpD " <<std::hex<< &FermOpD<<std::dec <<std::endl;
*/
};
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
// std::cout << GridLogMessage << " Mixed precision CG wrapper operator() FermOpU " <<std::hex<< &(SchurOpU->_Mat)<<std::dec <<std::endl;
// std::cout << GridLogMessage << " Mixed precision CG wrapper operator() FermOpD " <<std::hex<< &(LinOpD._Mat) <<std::dec <<std::endl;
// Assumption made in code to extract gauge field
// We could avoid storing LinopD reference alltogether ?
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
////////////////////////////////////////////////////////////////////////////////////
// Must snarf a single precision copy of the gauge field in Linop_d argument
////////////////////////////////////////////////////////////////////////////////////
typedef typename FermionOperatorF::GaugeField GaugeFieldF;
typedef typename FermionOperatorF::GaugeLinkField GaugeLinkFieldF;
typedef typename FermionOperatorD::GaugeField GaugeFieldD;
typedef typename FermionOperatorD::GaugeLinkField GaugeLinkFieldD;
GridBase * GridPtrF = SinglePrecGrid4;
GridBase * GridPtrD = FermOpD.Umu._grid;
GaugeFieldF U_f (GridPtrF);
GaugeLinkFieldF Umu_f(GridPtrF);
// std::cout << " Dim gauge field "<<GridPtrF->Nd()<<std::endl; // 4d
// std::cout << " Dim gauge field "<<GridPtrD->Nd()<<std::endl; // 4d
////////////////////////////////////////////////////////////////////////////////////
// Moving this to a Clone method of fermion operator would allow to duplicate the
// physics parameters and decrease gauge field copies
////////////////////////////////////////////////////////////////////////////////////
GaugeLinkFieldD Umu_d(GridPtrD);
for(int mu=0;mu<Nd*2;mu++){
Umu_d = PeekIndex<LorentzIndex>(FermOpD.Umu, mu);
precisionChange(Umu_f,Umu_d);
PokeIndex<LorentzIndex>(FermOpF.Umu, Umu_f, mu);
}
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
////////////////////////////////////////////////////////////////////////////////////
// Could test to make sure that LinOpF and LinOpD agree to single prec?
////////////////////////////////////////////////////////////////////////////////////
/*
GridBase *Fgrid = psi._grid;
FieldD tmp2(Fgrid);
FieldD tmp1(Fgrid);
LinOpU.Op(src,tmp1);
LinOpD.Op(src,tmp2);
std::cout << " Double gauge field "<< norm2(FermOpD.Umu)<<std::endl;
std::cout << " Single gauge field "<< norm2(FermOpF.Umu)<<std::endl;
std::cout << " Test of operators "<<norm2(tmp1)<<std::endl;
std::cout << " Test of operators "<<norm2(tmp2)<<std::endl;
tmp1=tmp1-tmp2;
std::cout << " Test of operators diff "<<norm2(tmp1)<<std::endl;
*/
////////////////////////////////////////////////////////////////////////////////////
// Make a mixed precision conjugate gradient
////////////////////////////////////////////////////////////////////////////////////
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid5,LinOpF,LinOpD);
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient" <<std::endl;
MPCG(src,psi);
}
};
}};
int main(int argc, char **argv) {
using namespace Grid;
using namespace Grid::QCD;
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
// Typedefs to simplify notation
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef MobiusFermionF FermionActionF;
typedef MobiusEOFAFermionR FermionEOFAAction;
typedef MobiusEOFAFermionF FermionEOFAActionF;
typedef typename FermionAction::FermionField FermionField;
typedef typename FermionActionF::FermionField FermionFieldF;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
typedef GenericHMCRunner<ForceGradient> HMCWrapper;
MD.name = std::string("Force Gradient");
// typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
// MD.name = std::string("MinimumNorm2");
MD.MDsteps = 6;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 590;
HMCparams.Trajectories = 1000;
HMCparams.NoMetropolisUntil= 0;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
// HMCparams.StartingType =std::string("ColdStart");
HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_EODWF_lat";
CPparams.rng_prefix = "ckpoint_EODWF_rng";
CPparams.saveInterval = 10;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
// Construct observables
// here there is too much indirection
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.04;
Real pv_mass = 1.0;
RealD M5 = 1.8;
RealD b = 1.0;
RealD c = 0.0;
std::vector<Real> hasenbusch({ 0.1, 0.3, 0.6 });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
std::vector<int> latt = GridDefaultLatt();
std::vector<int> mpi = GridDefaultMpi();
std::vector<int> simdF = GridDefaultSimd(Nd,vComplexF::Nsimd());
std::vector<int> simdD = GridDefaultSimd(Nd,vComplexD::Nsimd());
auto GridPtrF = SpaceTimeGrid::makeFourDimGrid(latt,simdF,mpi);
auto GridRBPtrF = SpaceTimeGrid::makeFourDimRedBlackGrid(GridPtrF);
auto FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtrF);
auto FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtrF);
IwasakiGaugeActionR GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeField U(GridPtr);
LatticeGaugeFieldF UF(GridPtrF);
// These lines are unecessary if BC are all periodic
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
FermionActionF::ImplParams ParamsF(boundary);
double ActionStoppingCondition = 1e-10;
double DerivativeStoppingCondition = 1e-6;
double MaxCGIterations = 30000;
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(8);
////////////////////////////////////
// Strange action
////////////////////////////////////
typedef SchurDiagMooeeOperator<FermionActionF,FermionFieldF> LinearOperatorF;
typedef SchurDiagMooeeOperator<FermionAction ,FermionField > LinearOperatorD;
typedef SchurDiagMooeeOperator<FermionEOFAActionF,FermionFieldF> LinearOperatorEOFAF;
typedef SchurDiagMooeeOperator<FermionEOFAAction ,FermionField > LinearOperatorEOFAD;
typedef MixedPrecisionConjugateGradientOperatorFunction<MobiusFermionD,MobiusFermionF,LinearOperatorD,LinearOperatorF> MxPCG;
typedef MixedPrecisionConjugateGradientOperatorFunction<MobiusEOFAFermionD,MobiusEOFAFermionF,LinearOperatorEOFAD,LinearOperatorEOFAF> MxPCG_EOFA;
// DJM: setup for EOFA ratio (Mobius)
OneFlavourRationalParams OFRp;
OFRp.lo = 0.1;
OFRp.hi = 25.0;
OFRp.MaxIter = 10000;
OFRp.tolerance= 1.0e-9;
OFRp.degree = 14;
OFRp.precision= 50;
MobiusEOFAFermionR Strange_Op_L (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionF Strange_Op_LF(UF, *FGridF, *FrbGridF, *GridPtrF, *GridRBPtrF, strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionR Strange_Op_R (U , *FGrid , *FrbGrid , *GridPtr , *GridRBPtr , pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
MobiusEOFAFermionF Strange_Op_RF(UF, *FGridF, *FrbGridF, *GridPtrF, *GridRBPtrF, pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
ConjugateGradient<FermionField> ActionCG(ActionStoppingCondition,MaxCGIterations);
ConjugateGradient<FermionField> DerivativeCG(DerivativeStoppingCondition,MaxCGIterations);
#ifdef MIXED_PRECISION
const int MX_inner = 1000;
// Mixed precision EOFA
LinearOperatorEOFAD Strange_LinOp_L (Strange_Op_L);
LinearOperatorEOFAD Strange_LinOp_R (Strange_Op_R);
LinearOperatorEOFAF Strange_LinOp_LF(Strange_Op_LF);
LinearOperatorEOFAF Strange_LinOp_RF(Strange_Op_RF);
MxPCG_EOFA ActionCGL(ActionStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_LF,Strange_Op_L,
Strange_LinOp_LF,Strange_LinOp_L);
MxPCG_EOFA DerivativeCGL(DerivativeStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_LF,Strange_Op_L,
Strange_LinOp_LF,Strange_LinOp_L);
MxPCG_EOFA ActionCGR(ActionStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_RF,Strange_Op_R,
Strange_LinOp_RF,Strange_LinOp_R);
MxPCG_EOFA DerivativeCGR(DerivativeStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
Strange_Op_RF,Strange_Op_R,
Strange_LinOp_RF,Strange_LinOp_R);
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy>
EOFA(Strange_Op_L, Strange_Op_R,
ActionCG,
ActionCGL, ActionCGR,
DerivativeCGL, DerivativeCGR,
OFRp, true);
#else
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy>
EOFA(Strange_Op_L, Strange_Op_R,
ActionCG,
ActionCG, ActionCG,
DerivativeCG, DerivativeCG,
OFRp, true);
#endif
Level1.push_back(&EOFA);
////////////////////////////////////
// up down action
////////////////////////////////////
std::vector<Real> light_den;
std::vector<Real> light_num;
int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass);
for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]);
light_num.push_back(hasenbusch[h]);
}
light_num.push_back(pv_mass);
//////////////////////////////////////////////////////////////
// Forced to replicate the MxPCG and DenominatorsF etc.. because
// there is no convenient way to "Clone" physics params from double op
// into single op for any operator pair.
// Same issue prevents using MxPCG in the Heatbath step
//////////////////////////////////////////////////////////////
std::vector<FermionAction *> Numerators;
std::vector<FermionAction *> Denominators;
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
std::vector<MxPCG *> ActionMPCG;
std::vector<MxPCG *> MPCG;
std::vector<FermionActionF *> DenominatorsF;
std::vector<LinearOperatorD *> LinOpD;
std::vector<LinearOperatorF *> LinOpF;
for(int h=0;h<n_hasenbusch+1;h++){
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h] << " / " << light_den[h]<< std::endl;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
#ifdef MIXED_PRECISION
////////////////////////////////////////////////////////////////////////////
// Mixed precision CG for 2f force
////////////////////////////////////////////////////////////////////////////
DenominatorsF.push_back(new FermionActionF(UF,*FGridF,*FrbGridF,*GridPtrF,*GridRBPtrF,light_den[h],M5,b,c, ParamsF));
LinOpD.push_back(new LinearOperatorD(*Denominators[h]));
LinOpF.push_back(new LinearOperatorF(*DenominatorsF[h]));
MPCG.push_back(new MxPCG(DerivativeStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
*DenominatorsF[h],*Denominators[h],
*LinOpF[h], *LinOpD[h]) );
ActionMPCG.push_back(new MxPCG(ActionStoppingCondition,
MX_inner,
MaxCGIterations,
GridPtrF,
FrbGridF,
*DenominatorsF[h],*Denominators[h],
*LinOpF[h], *LinOpD[h]) );
// Heatbath not mixed yet. As inverts numerators not so important as raised mass.
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],*MPCG[h],*ActionMPCG[h],ActionCG));
#else
////////////////////////////////////////////////////////////////////////////
// Standard CG for 2f force
////////////////////////////////////////////////////////////////////////////
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],DerivativeCG,ActionCG));
#endif
}
for(int h=0;h<n_hasenbusch+1;h++){
Level1.push_back(Quotients[h]);
}
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level2.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
std::cout << GridLogMessage << " Action complete "<< std::endl;
/////////////////////////////////////////////////////////////
// HMC parameters are serialisable
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.Run(); // no smearing
Grid_finalize();
} // main

View File

@ -1,198 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_hmc_EODWFRatio.cc
Copyright (C) 2015-2016
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
Author: Guido Cossu <guido.cossu@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
int main(int argc, char **argv) {
using namespace Grid;
using namespace Grid::QCD;
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
// here make a routine to print all the relevant information on the run
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
// Typedefs to simplify notation
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
IntegratorParameters MD;
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
// MD.name = std::string("Leap Frog");
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
// MD.name = std::string("Force Gradient");
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
MD.name = std::string("MinimumNorm2");
MD.MDsteps = 20;
MD.trajL = 1.0;
HMCparameters HMCparams;
HMCparams.StartTrajectory = 30;
HMCparams.Trajectories = 200;
HMCparams.NoMetropolisUntil= 0;
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
// HMCparams.StartingType =std::string("ColdStart");
HMCparams.StartingType =std::string("CheckpointStart");
HMCparams.MD = MD;
HMCWrapper TheHMC(HMCparams);
// Grid from the command line arguments --grid and --mpi
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_EODWF_lat";
CPparams.rng_prefix = "ckpoint_EODWF_rng";
CPparams.saveInterval = 10;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
RNGModuleParameters RNGpar;
RNGpar.serial_seeds = "1 2 3 4 5";
RNGpar.parallel_seeds = "6 7 8 9 10";
TheHMC.Resources.SetRNGSeeds(RNGpar);
// Construct observables
// here there is too much indirection
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
TheHMC.Resources.AddObservable<PlaqObs>();
//////////////////////////////////////////////
const int Ls = 16;
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.04;
Real pv_mass = 1.0;
RealD M5 = 1.8;
RealD b = 1.0;
RealD c = 0.0;
// FIXME:
// Same in MC and MD
// Need to mix precision too
OneFlavourRationalParams OFRp;
OFRp.lo = 4.0e-3;
OFRp.hi = 30.0;
OFRp.MaxIter = 10000;
OFRp.tolerance= 1.0e-10;
OFRp.degree = 16;
OFRp.precision= 50;
std::vector<Real> hasenbusch({ 0.1 });
auto GridPtr = TheHMC.Resources.GetCartesian();
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
IwasakiGaugeActionR GaugeAction(beta);
// temporarily need a gauge field
LatticeGaugeField U(GridPtr);
// These lines are unecessary if BC are all periodic
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
double StoppingCondition = 1e-10;
double MaxCGIterations = 30000;
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
////////////////////////////////////
// Collect actions
////////////////////////////////////
ActionLevel<HMCWrapper::Field> Level1(1);
ActionLevel<HMCWrapper::Field> Level2(4);
////////////////////////////////////
// Strange action
////////////////////////////////////
// FermionAction StrangeOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_mass,M5,b,c, Params);
// DomainWallEOFAFermionR Strange_Op_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5);
// DomainWallEOFAFermionR Strange_Op_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5);
// ExactOneFlavourRatioPseudoFermionAction EOFA(Strange_Op_L,Strange_Op_R,CG,ofp, false);
FermionAction StrangeOp (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,strange_mass,M5,b,c, Params);
FermionAction StrangePauliVillarsOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,pv_mass, M5,b,c, Params);
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion(StrangePauliVillarsOp,StrangeOp,OFRp);
Level1.push_back(&StrangePseudoFermion);
////////////////////////////////////
// up down action
////////////////////////////////////
std::vector<Real> light_den;
std::vector<Real> light_num;
int n_hasenbusch = hasenbusch.size();
light_den.push_back(light_mass);
for(int h=0;h<n_hasenbusch;h++){
light_den.push_back(hasenbusch[h]);
light_num.push_back(hasenbusch[h]);
}
light_num.push_back(pv_mass);
std::vector<FermionAction *> Numerators;
std::vector<FermionAction *> Denominators;
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
for(int h=0;h<n_hasenbusch+1;h++){
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h] << " / " << light_den[h]<< std::endl;
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],CG,CG));
}
for(int h=0;h<n_hasenbusch+1;h++){
Level1.push_back(Quotients[h]);
}
/////////////////////////////////////////////////////////////
// Gauge action
/////////////////////////////////////////////////////////////
Level2.push_back(&GaugeAction);
TheHMC.TheAction.push_back(Level1);
TheHMC.TheAction.push_back(Level2);
std::cout << GridLogMessage << " Action complete "<< std::endl;
/////////////////////////////////////////////////////////////
// HMC parameters are serialisable
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.Run(); // no smearing
Grid_finalize();
} // main

View File

@ -1,109 +0,0 @@
********************************************************************
TODO:
********************************************************************
i) Got mixed precision in 2f and EOFA force and action solves.
But need mixed precision in the heatbath solve. Best for Fermop to have a "clone" method, to
reduce the number of solver and action objects. Needed ideally for the EOFA heatbath.
15% perhaps
Combine with 2x trajectory length?
ii) Rational on EOFA HB -- relax order
-- Test the approx as per David email
Resume / roll.sh
----------------------------------------------------------------
- 16^3 Currently 10 traj per hour
- EOFA use a different derivative solver from action solver
- EOFA fix Davids hack to the SchurRedBlack guessing
*** Reduce precision/tolerance in EOFA with second CG param. (10% speed up)
*** Force gradient - reduced precision solve for the gradient (4/3x speedup)
*** Need a plan for gauge field update for mixed precision in HMC (2x speed up)
-- Store the single prec action operator.
-- Clone the gauge field from the operator function argument.
-- Build the mixed precision operator dynamically from the passed operator and single prec clone.
*** Mixed precision CG into EOFA portion
*** Further reduce precision in forces to 10^-6 ?
*** Overall: a 3x or so is still possible => 500s -> 160s and 20 traj per hour on 16^3.
- Use mixed precision CG in HMC
- SchurRedBlack.h: stop use of operator function; use LinearOperator or similar instead.
- Or make an OperatorFunction for mixed precision as a wrapper
********************************************************************
* Signed off 2+1f HMC with Hasenbush and strange RHMC 16^3 x 32 DWF Ls=16 Plaquette 0.5883 ish
* Signed off 2+1f HMC with Hasenbush and strange EOFA 16^3 x 32 DWF Ls=16 Plaquette 0.5883 ish
* Wilson plaquette cross checked against CPS and literature GwilsonFnone
********************************************************************
********************************************************************
* RHMC: Timesteps & eigenranges matched from previous CPS 16^3 x 32 runs:
********************************************************************
****
Strange (m=0.04) has eigenspan
****
16^3 done as 1+1+1 with separate PV's.
/dirac1/archive/QCDOC/host/QCDDWF/DWF/2+1f/16nt32/IWASAKI/b2.13/ls16/M1_8/ms0.04/mu0.01/rhmc_multitimescale/evol5/work
****
2+1f 16^3 - [ 4e^-4, 2.42 ] for strange
****
24^3 done as 1+1+1 at strange, and single quotient https://arxiv.org/pdf/0804.0473.pdf Eq 83,
****
double lambda_low = 4.0000000000000002e-04 <- strange
double lambda_low = 1.0000000000000000e-02 <- pauli villars
And high = 2.5
Array bsn_mass[3] = {
double bsn_mass[0] = 1.0000000000000000e+00
double bsn_mass[1] = 1.0000000000000000e+00
double bsn_mass[2] = 1.0000000000000000e+00
}
Array frm_mass[3] = {
double frm_mass[0] = 4.0000000000000001e-02
double frm_mass[1] = 4.0000000000000001e-02
double frm_mass[2] = 4.0000000000000001e-02
}
***
32^3
/dirac1/archive/QCDOC/host/QCDDWF/DWF/2+1f/32nt64/IWASAKI/b2.25/ls16/M1_8/ms0.03/mu0.004/evol6/work
***
Similar det scheme
double lambda_low = 4.0000000000000002e-04
double lambda_low = 1.0000000000000000e-02
Array bsn_mass[3] = {
double bsn_mass[0] = 1.0000000000000000e+00
double bsn_mass[1] = 1.0000000000000000e+00
double bsn_mass[2] = 1.0000000000000000e+00
}
Array frm_mass[3] = {
double frm_mass[0] = 3.0000000000000002e-02
double frm_mass[1] = 3.0000000000000002e-02
double frm_mass[2] = 3.0000000000000002e-02
}
********************************************************************
* Grid: Power method bounds check
********************************************************************
- Finding largest eigenvalue approx 25 not 2.5
- Conventions:
Grid MpcDagMpc based on:
(Moo-Moe Mee^-1 Meo)^dag(Moo-Moe Mee^-1 Meo)
- with Moo = 5-M5 = 3.2
- CPS use(d) Moo = 1
- Eigenrange in Grid is 3.2^2 rescaled so factor of 10 accounted for

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/A2AMatrix.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
@ -62,6 +62,9 @@ using A2AMatrixSet = Eigen::TensorMap<Eigen::Tensor<T, 5, Eigen::RowMajor>>;
template <typename T>
using A2AMatrix = Eigen::Matrix<T, -1, -1, Eigen::RowMajor>;
template <typename T>
using A2AMatrixMap = Eigen::Map<A2AMatrix<T>>;
template <typename T>
using A2AMatrixTr = Eigen::Matrix<T, -1, -1, Eigen::ColMajor>;
@ -108,7 +111,7 @@ public:
void saveBlock(const A2AMatrixSet<T> &m, const unsigned int ext, const unsigned int str,
const unsigned int i, const unsigned int j);
template <template <class> class Vec, typename VecT>
void load(Vec<VecT> &v, double *tRead = nullptr);
void load(Vec<VecT> &v, double *tRead = nullptr, const bool useCache = true);
private:
std::string filename_{""}, dataname_{""};
unsigned int nt_{0}, ni_{0}, nj_{0};
@ -495,7 +498,7 @@ void A2AMatrixIo<T>::saveBlock(const A2AMatrixSet<T> &m,
template <typename T>
template <template <class> class Vec, typename VecT>
void A2AMatrixIo<T>::load(Vec<VecT> &v, double *tRead)
void A2AMatrixIo<T>::load(Vec<VecT> &v, double *tRead, const bool useCache)
{
#ifdef HAVE_HDF5
Hdf5Reader reader(filename_);
@ -532,36 +535,55 @@ void A2AMatrixIo<T>::load(Vec<VecT> &v, double *tRead)
nj_ = hdim[2];
}
A2AMatrix<T> buf(ni_, nj_);
std::vector<hsize_t> count = {1, static_cast<hsize_t>(ni_),
static_cast<hsize_t>(nj_)},
stride = {1, 1, 1},
block = {1, 1, 1},
memCount = {static_cast<hsize_t>(ni_),
static_cast<hsize_t>(nj_)};
H5NS::DataSpace memspace(memCount.size(), memCount.data());
std::cout << "Loading timeslice";
std::cout.flush();
*tRead = 0.;
for (unsigned int tp1 = nt_; tp1 > 0; --tp1)
if (useCache)
{
unsigned int t = tp1 - 1;
std::vector<hsize_t> offset = {static_cast<hsize_t>(t), 0, 0};
std::vector<T> buf(nt_*ni_*nj_);
T *pt;
if (t % 10 == 0)
dataset.read(buf.data(), datatype);
pt = buf.data();
for (unsigned int t = 0; t < nt_; ++t)
{
std::cout << " " << t;
std::cout.flush();
A2AMatrixMap<T> bufMap(pt, ni_, nj_);
v[t] = bufMap.template cast<VecT>();
pt += ni_*nj_;
}
dataspace.selectHyperslab(H5S_SELECT_SET, count.data(), offset.data(),
stride.data(), block.data());
if (tRead) *tRead -= usecond();
dataset.read(buf.data(), datatype, memspace, dataspace);
if (tRead) *tRead += usecond();
v[t] = buf.template cast<VecT>();
}
std::cout << std::endl;
// if useCache = false, do I/O timeslice per timeslice (much slower)
else
{
A2AMatrix<T> buf(ni_, nj_);
std::vector<hsize_t> count = {1, static_cast<hsize_t>(ni_),
static_cast<hsize_t>(nj_)},
stride = {1, 1, 1},
block = {1, 1, 1},
memCount = {static_cast<hsize_t>(ni_),
static_cast<hsize_t>(nj_)};
H5NS::DataSpace memspace(memCount.size(), memCount.data());
std::cout << "Loading timeslice";
std::cout.flush();
*tRead = 0.;
for (unsigned int tp1 = nt_; tp1 > 0; --tp1)
{
unsigned int t = tp1 - 1;
std::vector<hsize_t> offset = {static_cast<hsize_t>(t), 0, 0};
if (t % 10 == 0)
{
std::cout << " " << t;
std::cout.flush();
}
dataspace.selectHyperslab(H5S_SELECT_SET, count.data(), offset.data(),
stride.data(), block.data());
if (tRead) *tRead -= usecond();
dataset.read(buf.data(), datatype, memspace, dataspace);
if (tRead) *tRead += usecond();
v[t] = buf.template cast<VecT>();
}
std::cout << std::endl;
}
#else
HADRONS_ERROR(Implementation, "all-to-all matrix I/O needs HDF5 library");
#endif

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/A2AVectors.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: fionnoh <fionnoh@gmail.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Application.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
@ -48,32 +48,28 @@ Application::Application(void)
{
initLogger();
auto dim = GridDefaultLatt(), mpi = GridDefaultMpi(), loc(dim);
if (dim.size())
locVol_ = 1;
for (unsigned int d = 0; d < dim.size(); ++d)
{
locVol_ = 1;
for (unsigned int d = 0; d < dim.size(); ++d)
{
loc[d] /= mpi[d];
locVol_ *= loc[d];
}
LOG(Message) << "====== HADRONS APPLICATION INITIALISATION ======" << std::endl;
LOG(Message) << "** Dimensions" << std::endl;
LOG(Message) << "Global lattice: " << dim << std::endl;
LOG(Message) << "MPI partition : " << mpi << std::endl;
LOG(Message) << "Local lattice : " << loc << std::endl;
LOG(Message) << std::endl;
LOG(Message) << "** Default parameters (and associated C macros)" << std::endl;
LOG(Message) << "ASCII output precision : " << MACOUT(DEFAULT_ASCII_PREC) << std::endl;
LOG(Message) << "Fermion implementation : " << MACOUTS(FIMPLBASE) << std::endl;
LOG(Message) << "z-Fermion implementation: " << MACOUTS(ZFIMPLBASE) << std::endl;
LOG(Message) << "Scalar implementation : " << MACOUTS(SIMPLBASE) << std::endl;
LOG(Message) << "Gauge implementation : " << MACOUTS(GIMPLBASE) << std::endl;
LOG(Message) << "Eigenvector base size : "
<< MACOUT(HADRONS_DEFAULT_LANCZOS_NBASIS) << std::endl;
LOG(Message) << "Schur decomposition : " << MACOUTS(HADRONS_DEFAULT_SCHUR) << std::endl;
LOG(Message) << std::endl;
loc[d] /= mpi[d];
locVol_ *= loc[d];
}
LOG(Message) << "====== HADRONS APPLICATION INITIALISATION ======" << std::endl;
LOG(Message) << "** Dimensions" << std::endl;
LOG(Message) << "Global lattice: " << dim << std::endl;
LOG(Message) << "MPI partition : " << mpi << std::endl;
LOG(Message) << "Local lattice : " << loc << std::endl;
LOG(Message) << std::endl;
LOG(Message) << "** Default parameters (and associated C macros)" << std::endl;
LOG(Message) << "ASCII output precision : " << MACOUT(DEFAULT_ASCII_PREC) << std::endl;
LOG(Message) << "Fermion implementation : " << MACOUTS(FIMPLBASE) << std::endl;
LOG(Message) << "z-Fermion implementation: " << MACOUTS(ZFIMPLBASE) << std::endl;
LOG(Message) << "Scalar implementation : " << MACOUTS(SIMPLBASE) << std::endl;
LOG(Message) << "Gauge implementation : " << MACOUTS(GIMPLBASE) << std::endl;
LOG(Message) << "Eigenvector base size : "
<< MACOUT(HADRONS_DEFAULT_LANCZOS_NBASIS) << std::endl;
LOG(Message) << "Schur decomposition : " << MACOUTS(HADRONS_DEFAULT_SCHUR) << std::endl;
LOG(Message) << std::endl;
}
Application::Application(const Application::GlobalPar &par)
@ -118,22 +114,7 @@ void Application::run(void)
vm().setRunId(getPar().runId);
vm().printContent();
env().printContent();
if (getPar().saveSchedule or getPar().scheduleFile.empty())
{
schedule();
if (getPar().saveSchedule)
{
std::string filename;
filename = (getPar().scheduleFile.empty()) ?
"hadrons.sched" : getPar().scheduleFile;
saveSchedule(filename);
}
}
else
{
loadSchedule(getPar().scheduleFile);
}
schedule();
printSchedule();
if (!getPar().graphFile.empty())
{
@ -180,13 +161,12 @@ void Application::parseParameterFile(const std::string parameterFileName)
pop(reader);
}
void Application::saveParameterFile(const std::string parameterFileName, unsigned int prec)
void Application::saveParameterFile(const std::string parameterFileName)
{
LOG(Message) << "Saving application to '" << parameterFileName << "'..." << std::endl;
if (env().getGrid()->IsBoss())
{
XmlWriter writer(parameterFileName);
writer.setPrecision(prec);
ObjectId id;
const unsigned int nMod = vm().getNModule();

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Application.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
@ -41,14 +41,6 @@ BEGIN_HADRONS_NAMESPACE
class Application
{
public:
class TrajRange: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(TrajRange,
unsigned int, start,
unsigned int, end,
unsigned int, step);
};
class GlobalPar: Serializable
{
public:
@ -57,8 +49,6 @@ public:
VirtualMachine::GeneticPar, genetic,
std::string, runId,
std::string, graphFile,
std::string, scheduleFile,
bool, saveSchedule,
int, parallelWriteMaxRetry);
GlobalPar(void): parallelWriteMaxRetry{-1} {}
};
@ -81,7 +71,7 @@ public:
void run(void);
// XML parameter file I/O
void parseParameterFile(const std::string parameterFileName);
void saveParameterFile(const std::string parameterFileName, unsigned int prec=15);
void saveParameterFile(const std::string parameterFileName);
// schedule computation
void schedule(void);
void saveSchedule(const std::string filename);

View File

@ -4,11 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/DilutedNoise.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Vera Guelpers <Vera.Guelpers@ed.ac.uk>
Author: Vera Guelpers <vmg1n14@soton.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/DiskVector.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
@ -395,26 +395,12 @@ void DiskVectorBase<T>::cacheInsert(const unsigned int i, const T &obj) const
auto &freeInd = *freePtr_;
auto &loads = *loadsPtr_;
// cache miss, evict and store
if (index.find(i) == index.end())
{
evict();
index[i] = freeInd.top();
freeInd.pop();
cache[index.at(i)] = obj;
loads.push_back(i);
modified[index.at(i)] = false;
}
// cache hit, modify current value
else
{
auto pos = std::find(loads.begin(), loads.end(), i);
cache[index.at(i)] = obj;
modified[index.at(i)] = true;
loads.erase(pos);
loads.push_back(i);
}
evict();
index[i] = freeInd.top();
freeInd.pop();
cache[index.at(i)] = obj;
loads.push_back(i);
modified[index.at(i)] = false;
#ifdef DV_DEBUG
std::string msg;

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/EigenPack.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
@ -308,9 +308,7 @@ template <typename FineF, typename CoarseF,
class CoarseEigenPack: public EigenPack<FineF, FineFIo>
{
public:
typedef CoarseF CoarseField;
typedef CoarseFIo CoarseFieldIo;
public:
typedef CoarseF CoarseField;
std::vector<CoarseF> evecCoarse;
std::vector<RealD> evalCoarse;
public:

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Environment.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
@ -43,13 +43,15 @@ HADRONS_ERROR_REF(ObjectDefinition, "no object with address " + std::to_string(a
// constructor /////////////////////////////////////////////////////////////////
Environment::Environment(void)
{
dim_ = GridDefaultLatt();
nd_ = dim_.size();
dim_ = GridDefaultLatt();
nd_ = dim_.size();
createGrid<vComplex>(1);
vol_ = 1.;
for (auto d: dim_)
{
vol_ *= d;
}
rng4d_.reset(new GridParallelRNG(getGrid()));
}
// grids ///////////////////////////////////////////////////////////////////////
@ -74,13 +76,8 @@ double Environment::getVolume(void) const
}
// random number generator /////////////////////////////////////////////////////
GridParallelRNG * Environment::get4dRng(void)
GridParallelRNG * Environment::get4dRng(void) const
{
if (rng4d_ == nullptr)
{
rng4d_.reset(new GridParallelRNG(getGrid()));
}
return rng4d_.get();
}

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Environment.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
@ -113,7 +113,7 @@ public:
unsigned int getNd(void) const;
double getVolume(void) const;
// random number generator
GridParallelRNG * get4dRng(void);
GridParallelRNG * get4dRng(void) const;
// general memory management
void addObject(const std::string name,
const int moduleAddress = -1);
@ -182,7 +182,7 @@ private:
std::map<CoarseGridKey, GridPt> gridCoarse5d_;
unsigned int nd_;
// random number generator
RngPt rng4d_{nullptr};
RngPt rng4d_;
// object store
std::vector<ObjInfo> object_;
std::map<std::string, unsigned int> objectAddress_;

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Exceptions.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Exceptions.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Factory.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/GeneticScheduler.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Global.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Global.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>
@ -44,8 +44,6 @@ See the full license in the file "LICENSE" in the top level distribution directo
#define DEFAULT_ASCII_PREC 16
#endif
#define ARG(...) __VA_ARGS__
/* the 'using Grid::operator<<;' statement prevents a very nasty compilation
* error with GCC 5 (clang & GCC 6 compile fine without it).
*/
@ -103,17 +101,15 @@ BEGIN_HADRONS_NAMESPACE
typedef typename Impl::Field ScalarField##suffix;\
typedef typename Impl::PropagatorField PropagatorField##suffix;\
typedef typename Impl::SitePropagator::scalar_object SitePropagator##suffix;\
typedef typename Impl::ComplexField ComplexField##suffix;\
typedef std::vector<SitePropagator##suffix> SlicedPropagator##suffix;\
typedef std::vector<typename ComplexField##suffix::vector_object::scalar_object> SlicedComplex##suffix;
typedef std::vector<SitePropagator##suffix> SlicedPropagator##suffix;
#define FERM_TYPE_ALIASES(FImpl, suffix)\
BASIC_TYPE_ALIASES(FImpl, suffix);\
typedef FermionOperator<FImpl> FMat##suffix;\
typedef typename FImpl::FermionField FermionField##suffix;\
typedef typename FImpl::GaugeField GaugeField##suffix;\
typedef typename FImpl::DoubledGaugeField DoubledGaugeField##suffix;\
typedef Lattice<iSpinMatrix<typename FImpl::Simd>> SpinMatrixField##suffix;
typedef FermionOperator<FImpl> FMat##suffix;\
typedef typename FImpl::FermionField FermionField##suffix;\
typedef typename FImpl::GaugeField GaugeField##suffix;\
typedef typename FImpl::DoubledGaugeField DoubledGaugeField##suffix;\
typedef typename FImpl::ComplexField ComplexField##suffix;
#define GAUGE_TYPE_ALIASES(GImpl, suffix)\
typedef typename GImpl::GaugeField GaugeField##suffix;
@ -267,13 +263,31 @@ void tokenReplace(std::string &str, const std::string token,
}
}
// generic correlator class
template <typename Metadata, typename Scalar = Complex>
struct Correlator: Serializable
// trajectory range
class TrajRange: Serializable
{
GRID_SERIALIZABLE_CLASS_MEMBERS(ARG(Correlator<Metadata, Scalar>),
Metadata, info,
std::vector<Complex>, corr);
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(TrajRange,
unsigned int, start,
unsigned int, end,
unsigned int, step,
std::string, exclude);
inline std::vector<unsigned int> getTrajectoryList(void)
{
std::vector<unsigned int> excVec = strToVec<unsigned int>(exclude);
std::vector<unsigned int> list;
for (unsigned int t = start; t < end; t += step)
{
if (std::find(excVec.begin(), excVec.end(), t) == excVec.end())
{
list.push_back(t);
}
}
return list;
}
};
END_HADRONS_NAMESPACE

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Graph.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -34,4 +34,5 @@ nobase_libHadrons_a_HEADERS = \
Solver.hpp \
TimerArray.hpp \
VirtualMachine.hpp \
Utilities/Contractor.hpp \
$(modules_hpp)

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Module.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Module.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
@ -65,6 +65,7 @@ static ns##mod##ModuleRegistrar ns##mod##ModuleRegistrarInstance;
extern template class base;\
MODULE_REGISTER(mod, ARG(base), ns);
#define ARG(...) __VA_ARGS__
#define HADRONS_MACRO_REDIRECT_12(arg1, arg2, macro, ...) macro
#define HADRONS_MACRO_REDIRECT_23(arg1, arg2, arg3, macro, ...) macro
@ -77,15 +78,6 @@ env().template getGrid<typename latticeType::vector_type>(Ls)
#define envGetGrid(...)\
HADRONS_MACRO_REDIRECT_12(__VA_ARGS__, envGetGrid5, envGetGrid4)(__VA_ARGS__)
#define envGetCoarseGrid4(latticeType, blockSize)\
env().template getCoarseGrid<typename latticeType::vector_type>(blockSize)
#define envGetCoarseGrid5(latticeType, blockSize, Ls)\
env().template getCoarseGrid<typename latticeType::vector_type>(blockSize, Ls)
#define envGetCoarseGrid(...)\
HADRONS_MACRO_REDIRECT_23(__VA_ARGS__, envGetCoarseGrid5, envGetCoarseGrid4)(__VA_ARGS__)
#define envGetRbGrid4(latticeType)\
env().template getRbGrid<typename latticeType::vector_type>()

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/ModuleFactory.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -1,71 +1,74 @@
#include <Hadrons/Modules/MSource/Gauss.hpp>
#include <Hadrons/Modules/MSource/Momentum.hpp>
#include <Hadrons/Modules/MSource/SeqAslash.hpp>
#include <Hadrons/Modules/MSource/Z2.hpp>
#include <Hadrons/Modules/MSource/Point.hpp>
#include <Hadrons/Modules/MSource/SeqGamma.hpp>
#include <Hadrons/Modules/MSource/Convolution.hpp>
#include <Hadrons/Modules/MSource/Wall.hpp>
#include <Hadrons/Modules/MSource/SeqConserved.hpp>
#include <Hadrons/Modules/MScalarSUN/Div.hpp>
#include <Hadrons/Modules/MScalarSUN/TrKinetic.hpp>
#include <Hadrons/Modules/MScalarSUN/TrPhi.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPoint.hpp>
#include <Hadrons/Modules/MScalarSUN/Grad.hpp>
#include <Hadrons/Modules/MScalarSUN/Utils.hpp>
#include <Hadrons/Modules/MScalarSUN/StochFreeField.hpp>
#include <Hadrons/Modules/MScalarSUN/EMT.hpp>
#include <Hadrons/Modules/MScalarSUN/TrMag.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPointNPR.hpp>
#include <Hadrons/Modules/MScalarSUN/TransProj.hpp>
#include <Hadrons/Modules/MNoise/TimeDilutedSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MNoise/FullVolumeSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MScalar/FreeProp.hpp>
#include <Hadrons/Modules/MScalar/Scalar.hpp>
#include <Hadrons/Modules/MScalar/ChargedProp.hpp>
#include <Hadrons/Modules/MAction/Wilson.hpp>
#include <Hadrons/Modules/MAction/ScaledDWF.hpp>
#include <Hadrons/Modules/MAction/MobiusDWF.hpp>
#include <Hadrons/Modules/MAction/WilsonClover.hpp>
#include <Hadrons/Modules/MAction/ZMobiusDWF.hpp>
#include <Hadrons/Modules/MAction/DWF.hpp>
#include <Hadrons/Modules/MGauge/UnitEm.hpp>
#include <Hadrons/Modules/MGauge/Electrify.hpp>
#include <Hadrons/Modules/MGauge/StoutSmearing.hpp>
#include <Hadrons/Modules/MGauge/Random.hpp>
#include <Hadrons/Modules/MGauge/FundtoHirep.hpp>
#include <Hadrons/Modules/MGauge/GaugeFix.hpp>
#include <Hadrons/Modules/MGauge/Unit.hpp>
#include <Hadrons/Modules/MGauge/StochEm.hpp>
#include <Hadrons/Modules/MUtilities/RandomVectors.hpp>
#include <Hadrons/Modules/MUtilities/PrecisionCast.hpp>
#include <Hadrons/Modules/MIO/LoadCosmHol.hpp>
#include <Hadrons/Modules/MIO/LoadA2AVectors.hpp>
#include <Hadrons/Modules/MIO/LoadEigenPack.hpp>
#include <Hadrons/Modules/MIO/LoadNersc.hpp>
#include <Hadrons/Modules/MIO/LoadBinary.hpp>
#include <Hadrons/Modules/MIO/LoadCoarseEigenPack.hpp>
#include <Hadrons/Modules/MContraction/WeakEye3pt.hpp>
#include <Hadrons/Modules/MContraction/WeakMesonDecayKl2.hpp>
#include <Hadrons/Modules/MContraction/Gamma3pt.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonField.hpp>
#include <Hadrons/Modules/MContraction/A2ALoop.hpp>
#include <Hadrons/Modules/MContraction/WeakNonEye3pt.hpp>
#include <Hadrons/Modules/MContraction/DiscLoop.hpp>
#include <Hadrons/Modules/MContraction/A2AAslashField.hpp>
#include <Hadrons/Modules/MContraction/Baryon.hpp>
#include <Hadrons/Modules/MContraction/A2AAslashField.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonField.hpp>
#include <Hadrons/Modules/MContraction/Meson.hpp>
#include <Hadrons/Modules/MNPR/FourQuark.hpp>
#include <Hadrons/Modules/MNPR/Bilinear.hpp>
#include <Hadrons/Modules/MNPR/Amputate.hpp>
#include <Hadrons/Modules/MSolver/A2AAslashVectors.hpp>
#include <Hadrons/Modules/MSolver/RBPrecCG.hpp>
#include <Hadrons/Modules/MSolver/Guesser.hpp>
#include <Hadrons/Modules/MSolver/LocalCoherenceLanczos.hpp>
#include <Hadrons/Modules/MSolver/A2AVectors.hpp>
#include <Hadrons/Modules/MSolver/MixedPrecisionRBPrecCG.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonian.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp>
#include <Hadrons/Modules/MContraction/DiscLoop.hpp>
#include <Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp>
#include <Hadrons/Modules/MContraction/Gamma3pt.hpp>
#include <Hadrons/Modules/MContraction/WardIdentity.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp>
#include <Hadrons/Modules/MFermion/FreeProp.hpp>
#include <Hadrons/Modules/MFermion/GaugeProp.hpp>
#include <Hadrons/Modules/MFermion/EMLepton.hpp>
#include <Hadrons/Modules/MSource/SeqGamma.hpp>
#include <Hadrons/Modules/MSource/Point.hpp>
#include <Hadrons/Modules/MSource/Wall.hpp>
#include <Hadrons/Modules/MSource/Z2.hpp>
#include <Hadrons/Modules/MSource/SeqConserved.hpp>
#include <Hadrons/Modules/MSource/Momentum.hpp>
#include <Hadrons/Modules/MSink/Smear.hpp>
#include <Hadrons/Modules/MSink/Point.hpp>
#include <Hadrons/Modules/MSolver/MixedPrecisionRBPrecCG.hpp>
#include <Hadrons/Modules/MSolver/LocalCoherenceLanczos.hpp>
#include <Hadrons/Modules/MSolver/Guesser.hpp>
#include <Hadrons/Modules/MSolver/RBPrecCG.hpp>
#include <Hadrons/Modules/MSolver/A2AVectors.hpp>
#include <Hadrons/Modules/MSolver/A2AAslashVectors.hpp>
#include <Hadrons/Modules/MGauge/UnitEm.hpp>
#include <Hadrons/Modules/MGauge/StoutSmearing.hpp>
#include <Hadrons/Modules/MGauge/Unit.hpp>
#include <Hadrons/Modules/MGauge/Electrify.hpp>
#include <Hadrons/Modules/MGauge/Random.hpp>
#include <Hadrons/Modules/MGauge/GaugeFix.hpp>
#include <Hadrons/Modules/MGauge/FundtoHirep.hpp>
#include <Hadrons/Modules/MGauge/StochEm.hpp>
#include <Hadrons/Modules/MNoise/TimeDilutedSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MNoise/FullVolumeSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MUtilities/PrecisionCast.hpp>
#include <Hadrons/Modules/MUtilities/RandomVectors.hpp>
#include <Hadrons/Modules/MUtilities/TestSeqGamma.hpp>
#include <Hadrons/Modules/MUtilities/TestSeqConserved.hpp>
#include <Hadrons/Modules/MLoop/NoiseLoop.hpp>
#include <Hadrons/Modules/MScalar/FreeProp.hpp>
#include <Hadrons/Modules/MScalar/VPCounterTerms.hpp>
#include <Hadrons/Modules/MScalar/ScalarVP.hpp>
#include <Hadrons/Modules/MScalar/Scalar.hpp>
#include <Hadrons/Modules/MScalar/ChargedProp.hpp>
#include <Hadrons/Modules/MNPR/Bilinear.hpp>
#include <Hadrons/Modules/MNPR/Amputate.hpp>
#include <Hadrons/Modules/MNPR/FourQuark.hpp>
#include <Hadrons/Modules/MAction/DWF.hpp>
#include <Hadrons/Modules/MAction/MobiusDWF.hpp>
#include <Hadrons/Modules/MAction/Wilson.hpp>
#include <Hadrons/Modules/MAction/WilsonClover.hpp>
#include <Hadrons/Modules/MAction/ZMobiusDWF.hpp>
#include <Hadrons/Modules/MAction/ScaledDWF.hpp>
#include <Hadrons/Modules/MScalarSUN/StochFreeField.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPointNPR.hpp>
#include <Hadrons/Modules/MScalarSUN/ShiftProbe.hpp>
#include <Hadrons/Modules/MScalarSUN/Div.hpp>
#include <Hadrons/Modules/MScalarSUN/TrMag.hpp>
#include <Hadrons/Modules/MScalarSUN/EMT.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPoint.hpp>
#include <Hadrons/Modules/MScalarSUN/TrPhi.hpp>
#include <Hadrons/Modules/MScalarSUN/Utils.hpp>
#include <Hadrons/Modules/MScalarSUN/TransProj.hpp>
#include <Hadrons/Modules/MScalarSUN/Grad.hpp>
#include <Hadrons/Modules/MScalarSUN/TrKinetic.hpp>
#include <Hadrons/Modules/MIO/LoadEigenPack.hpp>
#include <Hadrons/Modules/MIO/LoadNersc.hpp>
#include <Hadrons/Modules/MIO/LoadA2AVectors.hpp>
#include <Hadrons/Modules/MIO/LoadCosmHol.hpp>
#include <Hadrons/Modules/MIO/LoadCoarseEigenPack.hpp>
#include <Hadrons/Modules/MIO/LoadBinary.hpp>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/DWF.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/DWF.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>
@ -112,6 +112,8 @@ void TDWF<FImpl>::setup(void)
<< par().mass << ", M5= " << par().M5 << " and Ls= "
<< par().Ls << " using gauge field '" << par().gauge << "'"
<< std::endl;
LOG(Message) << "Fermion boundary conditions: " << par().boundary
<< std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &g4 = *envGetGrid(FermionField);
@ -119,26 +121,8 @@ void TDWF<FImpl>::setup(void)
auto &g5 = *envGetGrid(FermionField, par().Ls);
auto &grb5 = *envGetRbGrid(FermionField, par().Ls);
typename DomainWallFermion<FImpl>::ImplParams implParams;
if (!par().boundary.empty())
{
implParams.boundary_phases = strToVec<Complex>(par().boundary);
}
if (!par().twist.empty())
{
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
}
LOG(Message) << "Fermion boundary conditions: " << implParams.boundary_phases
<< std::endl;
LOG(Message) << "Twists: " << implParams.twist_n_2pi_L
<< std::endl;
if (implParams.boundary_phases.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of boundary phase");
}
if (implParams.twist_n_2pi_L.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of twist");
}
implParams.boundary_phases = strToVec<Complex>(par().boundary);
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
envCreateDerived(FMat, DomainWallFermion<FImpl>, getName(), par().Ls, U, g5,
grb5, g4, grb4, par().mass, par().M5, implParams);
}

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/MobiusDWF.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/MobiusDWF.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
@ -112,6 +112,8 @@ void TMobiusDWF<FImpl>::setup(void)
<< ", b= " << par().b << ", c= " << par().c
<< " using gauge field '" << par().gauge << "'"
<< std::endl;
LOG(Message) << "Fermion boundary conditions: " << par().boundary
<< std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &g4 = *envGetGrid(FermionField);
@ -119,26 +121,8 @@ void TMobiusDWF<FImpl>::setup(void)
auto &g5 = *envGetGrid(FermionField, par().Ls);
auto &grb5 = *envGetRbGrid(FermionField, par().Ls);
typename MobiusFermion<FImpl>::ImplParams implParams;
if (!par().boundary.empty())
{
implParams.boundary_phases = strToVec<Complex>(par().boundary);
}
if (!par().twist.empty())
{
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
}
LOG(Message) << "Fermion boundary conditions: " << implParams.boundary_phases
<< std::endl;
LOG(Message) << "Twists: " << implParams.twist_n_2pi_L
<< std::endl;
if (implParams.boundary_phases.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of boundary phase");
}
if (implParams.twist_n_2pi_L.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of twist");
}
implParams.boundary_phases = strToVec<Complex>(par().boundary);
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
envCreateDerived(FMat, MobiusFermion<FImpl>, getName(), par().Ls, U, g5,
grb5, g4, grb4, par().mass, par().M5, par().b, par().c,
implParams);

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/ScaledDWF.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/ScaledDWF.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
@ -111,6 +111,8 @@ void TScaledDWF<FImpl>::setup(void)
<< ", scale= " << par().scale
<< " using gauge field '" << par().gauge << "'"
<< std::endl;
LOG(Message) << "Fermion boundary conditions: " << par().boundary
<< std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &g4 = *envGetGrid(FermionField);
@ -118,26 +120,8 @@ void TScaledDWF<FImpl>::setup(void)
auto &g5 = *envGetGrid(FermionField, par().Ls);
auto &grb5 = *envGetRbGrid(FermionField, par().Ls);
typename ScaledShamirFermion<FImpl>::ImplParams implParams;
if (!par().boundary.empty())
{
implParams.boundary_phases = strToVec<Complex>(par().boundary);
}
if (!par().twist.empty())
{
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
}
LOG(Message) << "Fermion boundary conditions: " << implParams.boundary_phases
<< std::endl;
LOG(Message) << "Twists: " << implParams.twist_n_2pi_L
<< std::endl;
if (implParams.boundary_phases.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of boundary phase");
}
if (implParams.twist_n_2pi_L.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of twist");
}
implParams.boundary_phases = strToVec<Complex>(par().boundary);
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
envCreateDerived(FMat, ScaledShamirFermion<FImpl>, getName(), par().Ls, U, g5,
grb5, g4, grb4, par().mass, par().M5, par().scale,
implParams);

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/Wilson.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/Wilson.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>
@ -109,31 +109,15 @@ void TWilson<FImpl>::setup(void)
{
LOG(Message) << "Setting up Wilson fermion matrix with m= " << par().mass
<< " using gauge field '" << par().gauge << "'" << std::endl;
LOG(Message) << "Fermion boundary conditions: " << par().boundary
<< std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &grid = *envGetGrid(FermionField);
auto &gridRb = *envGetRbGrid(FermionField);
typename WilsonFermion<FImpl>::ImplParams implParams;
if (!par().boundary.empty())
{
implParams.boundary_phases = strToVec<Complex>(par().boundary);
}
if (!par().twist.empty())
{
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
}
LOG(Message) << "Fermion boundary conditions: " << implParams.boundary_phases
<< std::endl;
LOG(Message) << "Twists: " << implParams.twist_n_2pi_L
<< std::endl;
if (implParams.boundary_phases.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of boundary phase");
}
if (implParams.twist_n_2pi_L.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of twist");
}
implParams.boundary_phases = strToVec<Complex>(par().boundary);
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
envCreateDerived(FMat, WilsonFermion<FImpl>, getName(), 1, U, grid, gridRb,
par().mass, implParams);
}

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/WilsonClover.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/WilsonClover.hpp
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Guido Cossu <guido.cossu@ed.ac.uk>
@ -112,34 +112,17 @@ void TWilsonClover<FImpl>::setup(void)
{
LOG(Message) << "Setting up Wilson clover fermion matrix with m= " << par().mass
<< " using gauge field '" << par().gauge << "'" << std::endl;
LOG(Message) << "Fermion boundary conditions: " << par().boundary
<< std::endl;
LOG(Message) << "Clover term csw_r: " << par().csw_r
<< " csw_t: " << par().csw_t
<< std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &grid = *envGetGrid(FermionField);
auto &gridRb = *envGetRbGrid(FermionField);
typename WilsonCloverFermion<FImpl>::ImplParams implParams;
if (!par().boundary.empty())
{
implParams.boundary_phases = strToVec<Complex>(par().boundary);
}
if (!par().twist.empty())
{
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
}
LOG(Message) << "Fermion boundary conditions: " << implParams.boundary_phases
<< std::endl;
LOG(Message) << "Twists: " << implParams.twist_n_2pi_L
<< std::endl;
if (implParams.boundary_phases.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of boundary phase");
}
if (implParams.twist_n_2pi_L.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of twist");
}
implParams.boundary_phases = strToVec<Complex>(par().boundary);
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
envCreateDerived(FMat, WilsonCloverFermion<FImpl>, getName(), 1, U, grid,
gridRb, par().mass, par().csw_r, par().csw_t,
par().clover_anisotropy, implParams);

View File

@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/ZMobiusDWF.cc
Copyright (C) 2015-2019
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>

Some files were not shown because too many files have changed in this diff Show More