mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
Merge branch 'develop' into feature/a64fx-3
This commit is contained in:
commit
3f9ae6e7e7
@ -37,7 +37,9 @@ directory
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
//disables and intel compiler specific warning (in json.hpp)
|
//disables and intel compiler specific warning (in json.hpp)
|
||||||
|
#ifdef __ICC
|
||||||
#pragma warning disable 488
|
#pragma warning disable 488
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __NVCC__
|
#ifdef __NVCC__
|
||||||
//disables nvcc specific warning in json.hpp
|
//disables nvcc specific warning in json.hpp
|
||||||
|
@ -28,4 +28,7 @@
|
|||||||
///////////////////
|
///////////////////
|
||||||
#include "Config.h"
|
#include "Config.h"
|
||||||
|
|
||||||
|
#ifdef TOFU
|
||||||
|
#undef GRID_COMMS_THREADS
|
||||||
|
#endif
|
||||||
#endif /* GRID_STD_H */
|
#endif /* GRID_STD_H */
|
||||||
|
@ -21,6 +21,7 @@ if BUILD_HDF5
|
|||||||
extra_headers+=serialisation/Hdf5Type.h
|
extra_headers+=serialisation/Hdf5Type.h
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
all: version-cache Version.h
|
all: version-cache Version.h
|
||||||
|
|
||||||
version-cache:
|
version-cache:
|
||||||
@ -53,6 +54,17 @@ Version.h: version-cache
|
|||||||
include Make.inc
|
include Make.inc
|
||||||
include Eigen.inc
|
include Eigen.inc
|
||||||
|
|
||||||
|
extra_sources+=$(ZWILS_FERMION_FILES)
|
||||||
|
extra_sources+=$(WILS_FERMION_FILES)
|
||||||
|
extra_sources+=$(STAG_FERMION_FILES)
|
||||||
|
if BUILD_GPARITY
|
||||||
|
extra_sources+=$(GP_FERMION_FILES)
|
||||||
|
endif
|
||||||
|
if BUILD_FERMION_REPS
|
||||||
|
extra_sources+=$(ADJ_FERMION_FILES)
|
||||||
|
extra_sources+=$(TWOIND_FERMION_FILES)
|
||||||
|
endif
|
||||||
|
|
||||||
lib_LIBRARIES = libGrid.a
|
lib_LIBRARIES = libGrid.a
|
||||||
|
|
||||||
CCFILES += $(extra_sources)
|
CCFILES += $(extra_sources)
|
||||||
|
@ -31,6 +31,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_ALGORITHM_COARSENED_MATRIX_H
|
#ifndef GRID_ALGORITHM_COARSENED_MATRIX_H
|
||||||
#define GRID_ALGORITHM_COARSENED_MATRIX_H
|
#define GRID_ALGORITHM_COARSENED_MATRIX_H
|
||||||
|
|
||||||
|
#include <Grid/qcd/QCD.h> // needed for Dagger(Yes|No), Inverse(Yes|No)
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
@ -59,12 +60,14 @@ inline void blockMaskedInnerProduct(Lattice<CComplex> &CoarseInner,
|
|||||||
class Geometry {
|
class Geometry {
|
||||||
public:
|
public:
|
||||||
int npoint;
|
int npoint;
|
||||||
|
int base;
|
||||||
std::vector<int> directions ;
|
std::vector<int> directions ;
|
||||||
std::vector<int> displacements;
|
std::vector<int> displacements;
|
||||||
|
std::vector<int> points_dagger;
|
||||||
|
|
||||||
Geometry(int _d) {
|
Geometry(int _d) {
|
||||||
|
|
||||||
int base = (_d==5) ? 1:0;
|
base = (_d==5) ? 1:0;
|
||||||
|
|
||||||
// make coarse grid stencil for 4d , not 5d
|
// make coarse grid stencil for 4d , not 5d
|
||||||
if ( _d==5 ) _d=4;
|
if ( _d==5 ) _d=4;
|
||||||
@ -72,16 +75,51 @@ public:
|
|||||||
npoint = 2*_d+1;
|
npoint = 2*_d+1;
|
||||||
directions.resize(npoint);
|
directions.resize(npoint);
|
||||||
displacements.resize(npoint);
|
displacements.resize(npoint);
|
||||||
|
points_dagger.resize(npoint);
|
||||||
for(int d=0;d<_d;d++){
|
for(int d=0;d<_d;d++){
|
||||||
directions[d ] = d+base;
|
directions[d ] = d+base;
|
||||||
directions[d+_d] = d+base;
|
directions[d+_d] = d+base;
|
||||||
displacements[d ] = +1;
|
displacements[d ] = +1;
|
||||||
displacements[d+_d]= -1;
|
displacements[d+_d]= -1;
|
||||||
|
points_dagger[d ] = d+_d;
|
||||||
|
points_dagger[d+_d] = d;
|
||||||
}
|
}
|
||||||
directions [2*_d]=0;
|
directions [2*_d]=0;
|
||||||
displacements[2*_d]=0;
|
displacements[2*_d]=0;
|
||||||
|
points_dagger[2*_d]=2*_d;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int point(int dir, int disp) {
|
||||||
|
assert(disp == -1 || disp == 0 || disp == 1);
|
||||||
|
assert(base+0 <= dir && dir < base+4);
|
||||||
|
|
||||||
|
// directions faster index = new indexing
|
||||||
|
// 4d (base = 0):
|
||||||
|
// point 0 1 2 3 4 5 6 7 8
|
||||||
|
// dir 0 1 2 3 0 1 2 3 0
|
||||||
|
// disp +1 +1 +1 +1 -1 -1 -1 -1 0
|
||||||
|
// 5d (base = 1):
|
||||||
|
// point 0 1 2 3 4 5 6 7 8
|
||||||
|
// dir 1 2 3 4 1 2 3 4 0
|
||||||
|
// disp +1 +1 +1 +1 -1 -1 -1 -1 0
|
||||||
|
|
||||||
|
// displacements faster index = old indexing
|
||||||
|
// 4d (base = 0):
|
||||||
|
// point 0 1 2 3 4 5 6 7 8
|
||||||
|
// dir 0 0 1 1 2 2 3 3 0
|
||||||
|
// disp +1 -1 +1 -1 +1 -1 +1 -1 0
|
||||||
|
// 5d (base = 1):
|
||||||
|
// point 0 1 2 3 4 5 6 7 8
|
||||||
|
// dir 1 1 2 2 3 3 4 4 0
|
||||||
|
// disp +1 -1 +1 -1 +1 -1 +1 -1 0
|
||||||
|
|
||||||
|
if(dir == 0 and disp == 0)
|
||||||
|
return 8;
|
||||||
|
else // New indexing
|
||||||
|
return (1 - disp) / 2 * 4 + dir - base;
|
||||||
|
// else // Old indexing
|
||||||
|
// return (4 * (dir - base) + 1 - disp) / 2;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<class Fobj,class CComplex,int nbasis>
|
template<class Fobj,class CComplex,int nbasis>
|
||||||
@ -258,7 +296,7 @@ public:
|
|||||||
// Fine Object == (per site) type of fine field
|
// Fine Object == (per site) type of fine field
|
||||||
// nbasis == number of deflation vectors
|
// nbasis == number of deflation vectors
|
||||||
template<class Fobj,class CComplex,int nbasis>
|
template<class Fobj,class CComplex,int nbasis>
|
||||||
class CoarsenedMatrix : public SparseMatrixBase<Lattice<iVector<CComplex,nbasis > > > {
|
class CoarsenedMatrix : public CheckerBoardedSparseMatrixBase<Lattice<iVector<CComplex,nbasis > > > {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
typedef iVector<CComplex,nbasis > siteVector;
|
typedef iVector<CComplex,nbasis > siteVector;
|
||||||
@ -268,33 +306,59 @@ public:
|
|||||||
typedef iMatrix<CComplex,nbasis > Cobj;
|
typedef iMatrix<CComplex,nbasis > Cobj;
|
||||||
typedef Lattice< CComplex > CoarseScalar; // used for inner products on fine field
|
typedef Lattice< CComplex > CoarseScalar; // used for inner products on fine field
|
||||||
typedef Lattice<Fobj > FineField;
|
typedef Lattice<Fobj > FineField;
|
||||||
|
typedef CoarseVector FermionField;
|
||||||
|
|
||||||
|
// enrich interface, use default implementation as in FermionOperator ///////
|
||||||
|
void Dminus(CoarseVector const& in, CoarseVector& out) { out = in; }
|
||||||
|
void DminusDag(CoarseVector const& in, CoarseVector& out) { out = in; }
|
||||||
|
void ImportPhysicalFermionSource(CoarseVector const& input, CoarseVector& imported) { imported = input; }
|
||||||
|
void ImportUnphysicalFermion(CoarseVector const& input, CoarseVector& imported) { imported = input; }
|
||||||
|
void ExportPhysicalFermionSolution(CoarseVector const& solution, CoarseVector& exported) { exported = solution; };
|
||||||
|
void ExportPhysicalFermionSource(CoarseVector const& solution, CoarseVector& exported) { exported = solution; };
|
||||||
|
|
||||||
////////////////////
|
////////////////////
|
||||||
// Data members
|
// Data members
|
||||||
////////////////////
|
////////////////////
|
||||||
Geometry geom;
|
Geometry geom;
|
||||||
GridBase * _grid;
|
GridBase * _grid;
|
||||||
|
GridBase* _cbgrid;
|
||||||
int hermitian;
|
int hermitian;
|
||||||
|
|
||||||
CartesianStencil<siteVector,siteVector,int> Stencil;
|
CartesianStencil<siteVector,siteVector,int> Stencil;
|
||||||
|
CartesianStencil<siteVector,siteVector,int> StencilEven;
|
||||||
|
CartesianStencil<siteVector,siteVector,int> StencilOdd;
|
||||||
|
|
||||||
std::vector<CoarseMatrix> A;
|
std::vector<CoarseMatrix> A;
|
||||||
|
std::vector<CoarseMatrix> Aeven;
|
||||||
|
std::vector<CoarseMatrix> Aodd;
|
||||||
|
|
||||||
|
CoarseMatrix AselfInv;
|
||||||
|
CoarseMatrix AselfInvEven;
|
||||||
|
CoarseMatrix AselfInvOdd;
|
||||||
|
|
||||||
|
Vector<RealD> dag_factor;
|
||||||
|
|
||||||
///////////////////////
|
///////////////////////
|
||||||
// Interface
|
// Interface
|
||||||
///////////////////////
|
///////////////////////
|
||||||
GridBase * Grid(void) { return _grid; }; // this is all the linalg routines need to know
|
GridBase * Grid(void) { return _grid; }; // this is all the linalg routines need to know
|
||||||
|
GridBase * RedBlackGrid() { return _cbgrid; };
|
||||||
|
|
||||||
|
int ConstEE() { return 0; }
|
||||||
|
|
||||||
void M (const CoarseVector &in, CoarseVector &out)
|
void M (const CoarseVector &in, CoarseVector &out)
|
||||||
{
|
{
|
||||||
conformable(_grid,in.Grid());
|
conformable(_grid,in.Grid());
|
||||||
conformable(in.Grid(),out.Grid());
|
conformable(in.Grid(),out.Grid());
|
||||||
|
out.Checkerboard() = in.Checkerboard();
|
||||||
|
|
||||||
SimpleCompressor<siteVector> compressor;
|
SimpleCompressor<siteVector> compressor;
|
||||||
|
|
||||||
Stencil.HaloExchange(in,compressor);
|
Stencil.HaloExchange(in,compressor);
|
||||||
autoView( in_v , in, AcceleratorRead);
|
autoView( in_v , in, AcceleratorRead);
|
||||||
autoView( out_v , out, AcceleratorWrite);
|
autoView( out_v , out, AcceleratorWrite);
|
||||||
|
autoView( Stencil_v , Stencil, AcceleratorRead);
|
||||||
|
auto& geom_v = geom;
|
||||||
typedef LatticeView<Cobj> Aview;
|
typedef LatticeView<Cobj> Aview;
|
||||||
|
|
||||||
Vector<Aview> AcceleratorViewContainer;
|
Vector<Aview> AcceleratorViewContainer;
|
||||||
@ -316,14 +380,14 @@ public:
|
|||||||
int ptype;
|
int ptype;
|
||||||
StencilEntry *SE;
|
StencilEntry *SE;
|
||||||
|
|
||||||
for(int point=0;point<geom.npoint;point++){
|
for(int point=0;point<geom_v.npoint;point++){
|
||||||
|
|
||||||
SE=Stencil.GetEntry(ptype,point,ss);
|
SE=Stencil_v.GetEntry(ptype,point,ss);
|
||||||
|
|
||||||
if(SE->_is_local) {
|
if(SE->_is_local) {
|
||||||
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||||
} else {
|
} else {
|
||||||
nbr = coalescedRead(Stencil.CommBuf()[SE->_offset]);
|
nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
|
||||||
}
|
}
|
||||||
acceleratorSynchronise();
|
acceleratorSynchronise();
|
||||||
|
|
||||||
@ -344,12 +408,72 @@ public:
|
|||||||
return M(in,out);
|
return M(in,out);
|
||||||
} else {
|
} else {
|
||||||
// corresponds to Galerkin coarsening
|
// corresponds to Galerkin coarsening
|
||||||
CoarseVector tmp(Grid());
|
return MdagNonHermitian(in, out);
|
||||||
G5C(tmp, in);
|
|
||||||
M(tmp, out);
|
|
||||||
G5C(out, out);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void MdagNonHermitian(const CoarseVector &in, CoarseVector &out)
|
||||||
|
{
|
||||||
|
conformable(_grid,in.Grid());
|
||||||
|
conformable(in.Grid(),out.Grid());
|
||||||
|
out.Checkerboard() = in.Checkerboard();
|
||||||
|
|
||||||
|
SimpleCompressor<siteVector> compressor;
|
||||||
|
|
||||||
|
Stencil.HaloExchange(in,compressor);
|
||||||
|
autoView( in_v , in, AcceleratorRead);
|
||||||
|
autoView( out_v , out, AcceleratorWrite);
|
||||||
|
autoView( Stencil_v , Stencil, AcceleratorRead);
|
||||||
|
auto& geom_v = geom;
|
||||||
|
typedef LatticeView<Cobj> Aview;
|
||||||
|
|
||||||
|
Vector<Aview> AcceleratorViewContainer;
|
||||||
|
|
||||||
|
for(int p=0;p<geom.npoint;p++) AcceleratorViewContainer.push_back(A[p].View(AcceleratorRead));
|
||||||
|
Aview *Aview_p = & AcceleratorViewContainer[0];
|
||||||
|
|
||||||
|
const int Nsimd = CComplex::Nsimd();
|
||||||
|
typedef decltype(coalescedRead(in_v[0])) calcVector;
|
||||||
|
typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
|
||||||
|
|
||||||
|
int osites=Grid()->oSites();
|
||||||
|
|
||||||
|
Vector<int> points(geom.npoint, 0);
|
||||||
|
for(int p=0; p<geom.npoint; p++)
|
||||||
|
points[p] = geom.points_dagger[p];
|
||||||
|
|
||||||
|
RealD* dag_factor_p = &dag_factor[0];
|
||||||
|
|
||||||
|
accelerator_for(sss, Grid()->oSites()*nbasis, Nsimd, {
|
||||||
|
int ss = sss/nbasis;
|
||||||
|
int b = sss%nbasis;
|
||||||
|
calcComplex res = Zero();
|
||||||
|
calcVector nbr;
|
||||||
|
int ptype;
|
||||||
|
StencilEntry *SE;
|
||||||
|
|
||||||
|
for(int p=0;p<geom_v.npoint;p++){
|
||||||
|
int point = points[p];
|
||||||
|
|
||||||
|
SE=Stencil_v.GetEntry(ptype,point,ss);
|
||||||
|
|
||||||
|
if(SE->_is_local) {
|
||||||
|
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||||
|
} else {
|
||||||
|
nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
|
||||||
|
}
|
||||||
|
acceleratorSynchronise();
|
||||||
|
|
||||||
|
for(int bb=0;bb<nbasis;bb++) {
|
||||||
|
res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
coalescedWrite(out_v[ss](b),res);
|
||||||
|
});
|
||||||
|
|
||||||
|
for(int p=0;p<geom.npoint;p++) AcceleratorViewContainer[p].ViewClose();
|
||||||
|
}
|
||||||
|
|
||||||
void MdirComms(const CoarseVector &in)
|
void MdirComms(const CoarseVector &in)
|
||||||
{
|
{
|
||||||
SimpleCompressor<siteVector> compressor;
|
SimpleCompressor<siteVector> compressor;
|
||||||
@ -359,6 +483,7 @@ public:
|
|||||||
{
|
{
|
||||||
conformable(_grid,in.Grid());
|
conformable(_grid,in.Grid());
|
||||||
conformable(_grid,out.Grid());
|
conformable(_grid,out.Grid());
|
||||||
|
out.Checkerboard() = in.Checkerboard();
|
||||||
|
|
||||||
typedef LatticeView<Cobj> Aview;
|
typedef LatticeView<Cobj> Aview;
|
||||||
Vector<Aview> AcceleratorViewContainer;
|
Vector<Aview> AcceleratorViewContainer;
|
||||||
@ -367,6 +492,7 @@ public:
|
|||||||
|
|
||||||
autoView( out_v , out, AcceleratorWrite);
|
autoView( out_v , out, AcceleratorWrite);
|
||||||
autoView( in_v , in, AcceleratorRead);
|
autoView( in_v , in, AcceleratorRead);
|
||||||
|
autoView( Stencil_v , Stencil, AcceleratorRead);
|
||||||
|
|
||||||
const int Nsimd = CComplex::Nsimd();
|
const int Nsimd = CComplex::Nsimd();
|
||||||
typedef decltype(coalescedRead(in_v[0])) calcVector;
|
typedef decltype(coalescedRead(in_v[0])) calcVector;
|
||||||
@ -380,12 +506,12 @@ public:
|
|||||||
int ptype;
|
int ptype;
|
||||||
StencilEntry *SE;
|
StencilEntry *SE;
|
||||||
|
|
||||||
SE=Stencil.GetEntry(ptype,point,ss);
|
SE=Stencil_v.GetEntry(ptype,point,ss);
|
||||||
|
|
||||||
if(SE->_is_local) {
|
if(SE->_is_local) {
|
||||||
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||||
} else {
|
} else {
|
||||||
nbr = coalescedRead(Stencil.CommBuf()[SE->_offset]);
|
nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
|
||||||
}
|
}
|
||||||
acceleratorSynchronise();
|
acceleratorSynchronise();
|
||||||
|
|
||||||
@ -413,34 +539,7 @@ public:
|
|||||||
|
|
||||||
this->MdirComms(in);
|
this->MdirComms(in);
|
||||||
|
|
||||||
int ndim = in.Grid()->Nd();
|
MdirCalc(in,out,geom.point(dir,disp));
|
||||||
|
|
||||||
//////////////
|
|
||||||
// 4D action like wilson
|
|
||||||
// 0+ => 0
|
|
||||||
// 0- => 1
|
|
||||||
// 1+ => 2
|
|
||||||
// 1- => 3
|
|
||||||
// etc..
|
|
||||||
//////////////
|
|
||||||
// 5D action like DWF
|
|
||||||
// 1+ => 0
|
|
||||||
// 1- => 1
|
|
||||||
// 2+ => 2
|
|
||||||
// 2- => 3
|
|
||||||
// etc..
|
|
||||||
auto point = [dir, disp, ndim](){
|
|
||||||
if(dir == 0 and disp == 0)
|
|
||||||
return 8;
|
|
||||||
else if ( ndim==4 ) {
|
|
||||||
return (4 * dir + 1 - disp) / 2;
|
|
||||||
} else {
|
|
||||||
return (4 * (dir-1) + 1 - disp) / 2;
|
|
||||||
}
|
|
||||||
}();
|
|
||||||
|
|
||||||
MdirCalc(in,out,point);
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void Mdiag(const CoarseVector &in, CoarseVector &out)
|
void Mdiag(const CoarseVector &in, CoarseVector &out)
|
||||||
@ -449,17 +548,269 @@ public:
|
|||||||
MdirCalc(in, out, point); // No comms
|
MdirCalc(in, out, point); // No comms
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void Mooee(const CoarseVector &in, CoarseVector &out) {
|
||||||
|
MooeeInternal(in, out, DaggerNo, InverseNo);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MooeeInv(const CoarseVector &in, CoarseVector &out) {
|
||||||
|
MooeeInternal(in, out, DaggerNo, InverseYes);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MooeeDag(const CoarseVector &in, CoarseVector &out) {
|
||||||
|
MooeeInternal(in, out, DaggerYes, InverseNo);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MooeeInvDag(const CoarseVector &in, CoarseVector &out) {
|
||||||
|
MooeeInternal(in, out, DaggerYes, InverseYes);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Meooe(const CoarseVector &in, CoarseVector &out) {
|
||||||
|
if(in.Checkerboard() == Odd) {
|
||||||
|
DhopEO(in, out, DaggerNo);
|
||||||
|
} else {
|
||||||
|
DhopOE(in, out, DaggerNo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MeooeDag(const CoarseVector &in, CoarseVector &out) {
|
||||||
|
if(in.Checkerboard() == Odd) {
|
||||||
|
DhopEO(in, out, DaggerYes);
|
||||||
|
} else {
|
||||||
|
DhopOE(in, out, DaggerYes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Dhop(const CoarseVector &in, CoarseVector &out, int dag) {
|
||||||
|
conformable(in.Grid(), _grid); // verifies full grid
|
||||||
|
conformable(in.Grid(), out.Grid());
|
||||||
|
|
||||||
|
out.Checkerboard() = in.Checkerboard();
|
||||||
|
|
||||||
|
DhopInternal(Stencil, A, in, out, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DhopOE(const CoarseVector &in, CoarseVector &out, int dag) {
|
||||||
|
conformable(in.Grid(), _cbgrid); // verifies half grid
|
||||||
|
conformable(in.Grid(), out.Grid()); // drops the cb check
|
||||||
|
|
||||||
|
assert(in.Checkerboard() == Even);
|
||||||
|
out.Checkerboard() = Odd;
|
||||||
|
|
||||||
|
DhopInternal(StencilEven, Aodd, in, out, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DhopEO(const CoarseVector &in, CoarseVector &out, int dag) {
|
||||||
|
conformable(in.Grid(), _cbgrid); // verifies half grid
|
||||||
|
conformable(in.Grid(), out.Grid()); // drops the cb check
|
||||||
|
|
||||||
|
assert(in.Checkerboard() == Odd);
|
||||||
|
out.Checkerboard() = Even;
|
||||||
|
|
||||||
|
DhopInternal(StencilOdd, Aeven, in, out, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MooeeInternal(const CoarseVector &in, CoarseVector &out, int dag, int inv) {
|
||||||
|
out.Checkerboard() = in.Checkerboard();
|
||||||
|
assert(in.Checkerboard() == Odd || in.Checkerboard() == Even);
|
||||||
|
|
||||||
|
CoarseMatrix *Aself = nullptr;
|
||||||
|
if(in.Grid()->_isCheckerBoarded) {
|
||||||
|
if(in.Checkerboard() == Odd) {
|
||||||
|
Aself = (inv) ? &AselfInvOdd : &Aodd[geom.npoint-1];
|
||||||
|
DselfInternal(StencilOdd, *Aself, in, out, dag);
|
||||||
|
} else {
|
||||||
|
Aself = (inv) ? &AselfInvEven : &Aeven[geom.npoint-1];
|
||||||
|
DselfInternal(StencilEven, *Aself, in, out, dag);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Aself = (inv) ? &AselfInv : &A[geom.npoint-1];
|
||||||
|
DselfInternal(Stencil, *Aself, in, out, dag);
|
||||||
|
}
|
||||||
|
assert(Aself != nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DselfInternal(CartesianStencil<siteVector,siteVector,int> &st, CoarseMatrix &a,
|
||||||
|
const CoarseVector &in, CoarseVector &out, int dag) {
|
||||||
|
int point = geom.npoint-1;
|
||||||
|
autoView( out_v, out, AcceleratorWrite);
|
||||||
|
autoView( in_v, in, AcceleratorRead);
|
||||||
|
autoView( st_v, st, AcceleratorRead);
|
||||||
|
autoView( a_v, a, AcceleratorRead);
|
||||||
|
|
||||||
|
const int Nsimd = CComplex::Nsimd();
|
||||||
|
typedef decltype(coalescedRead(in_v[0])) calcVector;
|
||||||
|
typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
|
||||||
|
|
||||||
|
RealD* dag_factor_p = &dag_factor[0];
|
||||||
|
|
||||||
|
if(dag) {
|
||||||
|
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
|
||||||
|
int ss = sss/nbasis;
|
||||||
|
int b = sss%nbasis;
|
||||||
|
calcComplex res = Zero();
|
||||||
|
calcVector nbr;
|
||||||
|
int ptype;
|
||||||
|
StencilEntry *SE;
|
||||||
|
|
||||||
|
SE=st_v.GetEntry(ptype,point,ss);
|
||||||
|
|
||||||
|
if(SE->_is_local) {
|
||||||
|
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||||
|
} else {
|
||||||
|
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
|
||||||
|
}
|
||||||
|
acceleratorSynchronise();
|
||||||
|
|
||||||
|
for(int bb=0;bb<nbasis;bb++) {
|
||||||
|
res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(a_v[ss](b,bb))*nbr(bb);
|
||||||
|
}
|
||||||
|
coalescedWrite(out_v[ss](b),res);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
|
||||||
|
int ss = sss/nbasis;
|
||||||
|
int b = sss%nbasis;
|
||||||
|
calcComplex res = Zero();
|
||||||
|
calcVector nbr;
|
||||||
|
int ptype;
|
||||||
|
StencilEntry *SE;
|
||||||
|
|
||||||
|
SE=st_v.GetEntry(ptype,point,ss);
|
||||||
|
|
||||||
|
if(SE->_is_local) {
|
||||||
|
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||||
|
} else {
|
||||||
|
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
|
||||||
|
}
|
||||||
|
acceleratorSynchronise();
|
||||||
|
|
||||||
|
for(int bb=0;bb<nbasis;bb++) {
|
||||||
|
res = res + coalescedRead(a_v[ss](b,bb))*nbr(bb);
|
||||||
|
}
|
||||||
|
coalescedWrite(out_v[ss](b),res);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DhopInternal(CartesianStencil<siteVector,siteVector,int> &st, std::vector<CoarseMatrix> &a,
|
||||||
|
const CoarseVector &in, CoarseVector &out, int dag) {
|
||||||
|
SimpleCompressor<siteVector> compressor;
|
||||||
|
|
||||||
|
st.HaloExchange(in,compressor);
|
||||||
|
autoView( in_v, in, AcceleratorRead);
|
||||||
|
autoView( out_v, out, AcceleratorWrite);
|
||||||
|
autoView( st_v , st, AcceleratorRead);
|
||||||
|
typedef LatticeView<Cobj> Aview;
|
||||||
|
|
||||||
|
// determine in what order we need the points
|
||||||
|
int npoint = geom.npoint-1;
|
||||||
|
Vector<int> points(npoint, 0);
|
||||||
|
for(int p=0; p<npoint; p++)
|
||||||
|
points[p] = (dag && !hermitian) ? geom.points_dagger[p] : p;
|
||||||
|
|
||||||
|
Vector<Aview> AcceleratorViewContainer;
|
||||||
|
for(int p=0;p<npoint;p++) AcceleratorViewContainer.push_back(a[p].View(AcceleratorRead));
|
||||||
|
Aview *Aview_p = & AcceleratorViewContainer[0];
|
||||||
|
|
||||||
|
const int Nsimd = CComplex::Nsimd();
|
||||||
|
typedef decltype(coalescedRead(in_v[0])) calcVector;
|
||||||
|
typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
|
||||||
|
|
||||||
|
RealD* dag_factor_p = &dag_factor[0];
|
||||||
|
|
||||||
|
if(dag) {
|
||||||
|
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
|
||||||
|
int ss = sss/nbasis;
|
||||||
|
int b = sss%nbasis;
|
||||||
|
calcComplex res = Zero();
|
||||||
|
calcVector nbr;
|
||||||
|
int ptype;
|
||||||
|
StencilEntry *SE;
|
||||||
|
|
||||||
|
for(int p=0;p<npoint;p++){
|
||||||
|
int point = points[p];
|
||||||
|
SE=st_v.GetEntry(ptype,point,ss);
|
||||||
|
|
||||||
|
if(SE->_is_local) {
|
||||||
|
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||||
|
} else {
|
||||||
|
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
|
||||||
|
}
|
||||||
|
acceleratorSynchronise();
|
||||||
|
|
||||||
|
for(int bb=0;bb<nbasis;bb++) {
|
||||||
|
res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
coalescedWrite(out_v[ss](b),res);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
|
||||||
|
int ss = sss/nbasis;
|
||||||
|
int b = sss%nbasis;
|
||||||
|
calcComplex res = Zero();
|
||||||
|
calcVector nbr;
|
||||||
|
int ptype;
|
||||||
|
StencilEntry *SE;
|
||||||
|
|
||||||
|
for(int p=0;p<npoint;p++){
|
||||||
|
int point = points[p];
|
||||||
|
SE=st_v.GetEntry(ptype,point,ss);
|
||||||
|
|
||||||
|
if(SE->_is_local) {
|
||||||
|
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
|
||||||
|
} else {
|
||||||
|
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
|
||||||
|
}
|
||||||
|
acceleratorSynchronise();
|
||||||
|
|
||||||
|
for(int bb=0;bb<nbasis;bb++) {
|
||||||
|
res = res + coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
coalescedWrite(out_v[ss](b),res);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int p=0;p<npoint;p++) AcceleratorViewContainer[p].ViewClose();
|
||||||
|
}
|
||||||
|
|
||||||
CoarsenedMatrix(GridCartesian &CoarseGrid, int hermitian_=0) :
|
CoarsenedMatrix(GridCartesian &CoarseGrid, GridRedBlackCartesian &CoarseRBGrid, int hermitian_=0) :
|
||||||
|
|
||||||
_grid(&CoarseGrid),
|
_grid(&CoarseGrid),
|
||||||
|
_cbgrid(&CoarseRBGrid),
|
||||||
geom(CoarseGrid._ndimension),
|
geom(CoarseGrid._ndimension),
|
||||||
hermitian(hermitian_),
|
hermitian(hermitian_),
|
||||||
Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
|
Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
|
||||||
A(geom.npoint,&CoarseGrid)
|
StencilEven(&CoarseRBGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
|
||||||
|
StencilOdd(&CoarseRBGrid,geom.npoint,Odd,geom.directions,geom.displacements,0),
|
||||||
|
A(geom.npoint,&CoarseGrid),
|
||||||
|
Aeven(geom.npoint,&CoarseRBGrid),
|
||||||
|
Aodd(geom.npoint,&CoarseRBGrid),
|
||||||
|
AselfInv(&CoarseGrid),
|
||||||
|
AselfInvEven(&CoarseRBGrid),
|
||||||
|
AselfInvOdd(&CoarseRBGrid),
|
||||||
|
dag_factor(nbasis*nbasis)
|
||||||
{
|
{
|
||||||
|
fillFactor();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void fillFactor() {
|
||||||
|
Eigen::MatrixXd dag_factor_eigen = Eigen::MatrixXd::Ones(nbasis, nbasis);
|
||||||
|
if(!hermitian) {
|
||||||
|
const int nb = nbasis/2;
|
||||||
|
dag_factor_eigen.block(0,nb,nb,nb) *= -1.0;
|
||||||
|
dag_factor_eigen.block(nb,0,nb,nb) *= -1.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// GPU readable prefactor
|
||||||
|
thread_for(i, nbasis*nbasis, {
|
||||||
|
int j = i/nbasis;
|
||||||
|
int k = i%nbasis;
|
||||||
|
dag_factor[i] = dag_factor_eigen(j, k);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
void CoarsenOperator(GridBase *FineGrid,LinearOperatorBase<Lattice<Fobj> > &linop,
|
void CoarsenOperator(GridBase *FineGrid,LinearOperatorBase<Lattice<Fobj> > &linop,
|
||||||
Aggregation<Fobj,CComplex,nbasis> & Subspace)
|
Aggregation<Fobj,CComplex,nbasis> & Subspace)
|
||||||
{
|
{
|
||||||
@ -608,6 +959,9 @@ public:
|
|||||||
std::cout << GridLogMessage << " ForceHermitian, new code "<<std::endl;
|
std::cout << GridLogMessage << " ForceHermitian, new code "<<std::endl;
|
||||||
ForceHermitian();
|
ForceHermitian();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
InvertSelfStencilLink(); std::cout << GridLogMessage << "Coarse self link inverted" << std::endl;
|
||||||
|
FillHalfCbs(); std::cout << GridLogMessage << "Coarse half checkerboards filled" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceHermitian(void) {
|
void ForceHermitian(void) {
|
||||||
@ -629,6 +983,50 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InvertSelfStencilLink() {
|
||||||
|
std::cout << GridLogDebug << "CoarsenedMatrix::InvertSelfStencilLink" << std::endl;
|
||||||
|
int localVolume = Grid()->lSites();
|
||||||
|
|
||||||
|
typedef typename Cobj::scalar_object scalar_object;
|
||||||
|
|
||||||
|
autoView(Aself_v, A[geom.npoint-1], CpuRead);
|
||||||
|
autoView(AselfInv_v, AselfInv, CpuWrite);
|
||||||
|
thread_for(site, localVolume, { // NOTE: Not able to bring this to GPU because of Eigen + peek/poke
|
||||||
|
Eigen::MatrixXcd selfLinkEigen = Eigen::MatrixXcd::Zero(nbasis, nbasis);
|
||||||
|
Eigen::MatrixXcd selfLinkInvEigen = Eigen::MatrixXcd::Zero(nbasis, nbasis);
|
||||||
|
|
||||||
|
scalar_object selfLink = Zero();
|
||||||
|
scalar_object selfLinkInv = Zero();
|
||||||
|
|
||||||
|
Coordinate lcoor;
|
||||||
|
|
||||||
|
Grid()->LocalIndexToLocalCoor(site, lcoor);
|
||||||
|
peekLocalSite(selfLink, Aself_v, lcoor);
|
||||||
|
|
||||||
|
for (int i = 0; i < nbasis; ++i)
|
||||||
|
for (int j = 0; j < nbasis; ++j)
|
||||||
|
selfLinkEigen(i, j) = static_cast<ComplexD>(TensorRemove(selfLink(i, j)));
|
||||||
|
|
||||||
|
selfLinkInvEigen = selfLinkEigen.inverse();
|
||||||
|
|
||||||
|
for(int i = 0; i < nbasis; ++i)
|
||||||
|
for(int j = 0; j < nbasis; ++j)
|
||||||
|
selfLinkInv(i, j) = selfLinkInvEigen(i, j);
|
||||||
|
|
||||||
|
pokeLocalSite(selfLinkInv, AselfInv_v, lcoor);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void FillHalfCbs() {
|
||||||
|
std::cout << GridLogDebug << "CoarsenedMatrix::FillHalfCbs" << std::endl;
|
||||||
|
for(int p = 0; p < geom.npoint; ++p) {
|
||||||
|
pickCheckerboard(Even, Aeven[p], A[p]);
|
||||||
|
pickCheckerboard(Odd, Aodd[p], A[p]);
|
||||||
|
}
|
||||||
|
pickCheckerboard(Even, AselfInvEven, AselfInv);
|
||||||
|
pickCheckerboard(Odd, AselfInvOdd, AselfInv);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
@ -1,67 +0,0 @@
|
|||||||
#include <Grid/GridCore.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
|
|
||||||
MemoryStats *MemoryProfiler::stats = nullptr;
|
|
||||||
bool MemoryProfiler::debug = false;
|
|
||||||
|
|
||||||
void check_huge_pages(void *Buf,uint64_t BYTES)
|
|
||||||
{
|
|
||||||
#ifdef __linux__
|
|
||||||
int fd = open("/proc/self/pagemap", O_RDONLY);
|
|
||||||
assert(fd >= 0);
|
|
||||||
const int page_size = 4096;
|
|
||||||
uint64_t virt_pfn = (uint64_t)Buf / page_size;
|
|
||||||
off_t offset = sizeof(uint64_t) * virt_pfn;
|
|
||||||
uint64_t npages = (BYTES + page_size-1) / page_size;
|
|
||||||
uint64_t pagedata[npages];
|
|
||||||
uint64_t ret = lseek(fd, offset, SEEK_SET);
|
|
||||||
assert(ret == offset);
|
|
||||||
ret = ::read(fd, pagedata, sizeof(uint64_t)*npages);
|
|
||||||
assert(ret == sizeof(uint64_t) * npages);
|
|
||||||
int nhugepages = npages / 512;
|
|
||||||
int n4ktotal, nnothuge;
|
|
||||||
n4ktotal = 0;
|
|
||||||
nnothuge = 0;
|
|
||||||
for (int i = 0; i < nhugepages; ++i) {
|
|
||||||
uint64_t baseaddr = (pagedata[i*512] & 0x7fffffffffffffULL) * page_size;
|
|
||||||
for (int j = 0; j < 512; ++j) {
|
|
||||||
uint64_t pageaddr = (pagedata[i*512+j] & 0x7fffffffffffffULL) * page_size;
|
|
||||||
++n4ktotal;
|
|
||||||
if (pageaddr != baseaddr + j * page_size)
|
|
||||||
++nnothuge;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
int rank = CartesianCommunicator::RankWorld();
|
|
||||||
printf("rank %d Allocated %d 4k pages, %d not in huge pages\n", rank, n4ktotal, nnothuge);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string sizeString(const size_t bytes)
|
|
||||||
{
|
|
||||||
constexpr unsigned int bufSize = 256;
|
|
||||||
const char *suffixes[7] = {"", "K", "M", "G", "T", "P", "E"};
|
|
||||||
char buf[256];
|
|
||||||
size_t s = 0;
|
|
||||||
double count = bytes;
|
|
||||||
|
|
||||||
while (count >= 1024 && s < 7)
|
|
||||||
{
|
|
||||||
s++;
|
|
||||||
count /= 1024;
|
|
||||||
}
|
|
||||||
if (count - floor(count) == 0.0)
|
|
||||||
{
|
|
||||||
snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]);
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::string(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
|
||||||
|
|
@ -165,9 +165,18 @@ template<typename _Tp> inline bool operator!=(const devAllocator<_Tp>&, const d
|
|||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// Template typedefs
|
// Template typedefs
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
//template<class T> using commAllocator = devAllocator<T>;
|
#ifdef ACCELERATOR_CSHIFT
|
||||||
template<class T> using Vector = std::vector<T,uvmAllocator<T> >;
|
// Cshift on device
|
||||||
|
template<class T> using cshiftAllocator = devAllocator<T>;
|
||||||
|
#else
|
||||||
|
// Cshift on host
|
||||||
|
template<class T> using cshiftAllocator = std::allocator<T>;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
template<class T> using Vector = std::vector<T,uvmAllocator<T> >;
|
||||||
|
template<class T> using stencilVector = std::vector<T,alignedAllocator<T> >;
|
||||||
template<class T> using commVector = std::vector<T,devAllocator<T> >;
|
template<class T> using commVector = std::vector<T,devAllocator<T> >;
|
||||||
|
template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >;
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
|
||||||
|
@ -34,8 +34,6 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
|
|
||||||
// Move control to configure.ac and Config.h?
|
// Move control to configure.ac and Config.h?
|
||||||
|
|
||||||
#define ALLOCATION_CACHE
|
|
||||||
#define GRID_ALLOC_ALIGN (2*1024*1024)
|
|
||||||
#define GRID_ALLOC_SMALL_LIMIT (4096)
|
#define GRID_ALLOC_SMALL_LIMIT (4096)
|
||||||
|
|
||||||
/*Pinning pages is costly*/
|
/*Pinning pages is costly*/
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
#include <Grid/GridCore.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
#ifndef GRID_UVM
|
#ifndef GRID_UVM
|
||||||
|
|
||||||
#warning "Using explicit device memory copies"
|
#warning "Using explicit device memory copies"
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
//define dprintf(...) printf ( __VA_ARGS__ ); fflush(stdout);
|
||||||
#define dprintf(...)
|
#define dprintf(...)
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
// For caching copies of data on device
|
// For caching copies of data on device
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
@ -103,7 +104,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
assert(AccCache.state!=Empty);
|
assert(AccCache.state!=Empty);
|
||||||
|
|
||||||
// dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||||
assert(AccCache.accLock==0);
|
assert(AccCache.accLock==0);
|
||||||
assert(AccCache.cpuLock==0);
|
assert(AccCache.cpuLock==0);
|
||||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||||
@ -111,7 +112,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
||||||
DeviceBytes -=AccCache.bytes;
|
DeviceBytes -=AccCache.bytes;
|
||||||
LRUremove(AccCache);
|
LRUremove(AccCache);
|
||||||
// dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
|
dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
|
||||||
}
|
}
|
||||||
uint64_t CpuPtr = AccCache.CpuPtr;
|
uint64_t CpuPtr = AccCache.CpuPtr;
|
||||||
EntryErase(CpuPtr);
|
EntryErase(CpuPtr);
|
||||||
@ -125,7 +126,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
|||||||
///////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////
|
||||||
assert(AccCache.state!=Empty);
|
assert(AccCache.state!=Empty);
|
||||||
|
|
||||||
// dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||||
assert(AccCache.accLock==0);
|
assert(AccCache.accLock==0);
|
||||||
assert(AccCache.cpuLock==0);
|
assert(AccCache.cpuLock==0);
|
||||||
if(AccCache.state==AccDirty) {
|
if(AccCache.state==AccDirty) {
|
||||||
@ -136,7 +137,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
|||||||
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
||||||
DeviceBytes -=AccCache.bytes;
|
DeviceBytes -=AccCache.bytes;
|
||||||
LRUremove(AccCache);
|
LRUremove(AccCache);
|
||||||
// dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
|
dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
|
||||||
}
|
}
|
||||||
uint64_t CpuPtr = AccCache.CpuPtr;
|
uint64_t CpuPtr = AccCache.CpuPtr;
|
||||||
EntryErase(CpuPtr);
|
EntryErase(CpuPtr);
|
||||||
@ -149,7 +150,7 @@ void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
|
|||||||
assert(AccCache.AccPtr!=(uint64_t)NULL);
|
assert(AccCache.AccPtr!=(uint64_t)NULL);
|
||||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||||
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
|
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
|
||||||
// dprintf("MemoryManager: Flush %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
dprintf("MemoryManager: Flush %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||||
DeviceToHostBytes+=AccCache.bytes;
|
DeviceToHostBytes+=AccCache.bytes;
|
||||||
DeviceToHostXfer++;
|
DeviceToHostXfer++;
|
||||||
AccCache.state=Consistent;
|
AccCache.state=Consistent;
|
||||||
@ -164,7 +165,7 @@ void MemoryManager::Clone(AcceleratorViewEntry &AccCache)
|
|||||||
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
|
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
|
||||||
DeviceBytes+=AccCache.bytes;
|
DeviceBytes+=AccCache.bytes;
|
||||||
}
|
}
|
||||||
// dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||||
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
|
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
|
||||||
HostToDeviceBytes+=AccCache.bytes;
|
HostToDeviceBytes+=AccCache.bytes;
|
||||||
HostToDeviceXfer++;
|
HostToDeviceXfer++;
|
||||||
@ -227,18 +228,24 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
// Find if present, otherwise get or force an empty
|
// Find if present, otherwise get or force an empty
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
if ( EntryPresent(CpuPtr)==0 ){
|
if ( EntryPresent(CpuPtr)==0 ){
|
||||||
EvictVictims(bytes);
|
|
||||||
EntryCreate(CpuPtr,bytes,mode,hint);
|
EntryCreate(CpuPtr,bytes,mode,hint);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto AccCacheIterator = EntryLookup(CpuPtr);
|
auto AccCacheIterator = EntryLookup(CpuPtr);
|
||||||
auto & AccCache = AccCacheIterator->second;
|
auto & AccCache = AccCacheIterator->second;
|
||||||
|
if (!AccCache.AccPtr) {
|
||||||
|
EvictVictims(bytes);
|
||||||
|
}
|
||||||
assert((mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard));
|
assert((mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard));
|
||||||
|
|
||||||
assert(AccCache.cpuLock==0); // Programming error
|
assert(AccCache.cpuLock==0); // Programming error
|
||||||
|
|
||||||
if(AccCache.state!=Empty) {
|
if(AccCache.state!=Empty) {
|
||||||
|
dprintf("ViewOpen found entry %llx %llx : %lld %lld\n",
|
||||||
|
(uint64_t)AccCache.CpuPtr,
|
||||||
|
(uint64_t)CpuPtr,
|
||||||
|
(uint64_t)AccCache.bytes,
|
||||||
|
(uint64_t)bytes);
|
||||||
assert(AccCache.CpuPtr == CpuPtr);
|
assert(AccCache.CpuPtr == CpuPtr);
|
||||||
assert(AccCache.bytes ==bytes);
|
assert(AccCache.bytes ==bytes);
|
||||||
}
|
}
|
||||||
@ -285,21 +292,21 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
|
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
|
||||||
}
|
}
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
// printf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
|
dprintf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
|
||||||
} else if(AccCache.state==Consistent) {
|
} else if(AccCache.state==Consistent) {
|
||||||
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
||||||
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
|
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
|
||||||
else
|
else
|
||||||
AccCache.state = Consistent; // Consistent + AccRead => Consistent
|
AccCache.state = Consistent; // Consistent + AccRead => Consistent
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
// printf("Consistent entry into device accLock %d\n",AccCache.accLock);
|
dprintf("Consistent entry into device accLock %d\n",AccCache.accLock);
|
||||||
} else if(AccCache.state==AccDirty) {
|
} else if(AccCache.state==AccDirty) {
|
||||||
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
||||||
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
|
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
|
||||||
else
|
else
|
||||||
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
|
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
// printf("AccDirty entry into device accLock %d\n",AccCache.accLock);
|
dprintf("AccDirty entry into device accLock %d\n",AccCache.accLock);
|
||||||
} else {
|
} else {
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
@ -361,13 +368,16 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V
|
|||||||
// Find if present, otherwise get or force an empty
|
// Find if present, otherwise get or force an empty
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
if ( EntryPresent(CpuPtr)==0 ){
|
if ( EntryPresent(CpuPtr)==0 ){
|
||||||
EvictVictims(bytes);
|
|
||||||
EntryCreate(CpuPtr,bytes,mode,transient);
|
EntryCreate(CpuPtr,bytes,mode,transient);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto AccCacheIterator = EntryLookup(CpuPtr);
|
auto AccCacheIterator = EntryLookup(CpuPtr);
|
||||||
auto & AccCache = AccCacheIterator->second;
|
auto & AccCache = AccCacheIterator->second;
|
||||||
|
|
||||||
|
if (!AccCache.AccPtr) {
|
||||||
|
EvictVictims(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
assert((mode==CpuRead)||(mode==CpuWrite));
|
assert((mode==CpuRead)||(mode==CpuWrite));
|
||||||
assert(AccCache.accLock==0); // Programming error
|
assert(AccCache.accLock==0); // Programming error
|
||||||
|
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#include <Grid/GridCore.h>
|
#include <Grid/GridCore.h>
|
||||||
#ifdef GRID_UVM
|
#ifdef GRID_UVM
|
||||||
|
|
||||||
#warning "Grid is assuming unified virtual memory address space"
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
/////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////
|
||||||
// View management is 1:1 address space mapping
|
// View management is 1:1 address space mapping
|
||||||
|
@ -44,7 +44,7 @@ void CartesianCommunicator::Init(int *argc, char ***argv)
|
|||||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||||
if ( !flag ) {
|
if ( !flag ) {
|
||||||
|
|
||||||
#if defined (TOFU) // FUGAKU, credits go to Issaku Kanamori
|
#ifndef GRID_COMMS_THREADS
|
||||||
nCommThreads=1;
|
nCommThreads=1;
|
||||||
// wrong results here too
|
// wrong results here too
|
||||||
// For now: comms-overlap leads to wrong results in Benchmark_wilson even on single node MPI runs
|
// For now: comms-overlap leads to wrong results in Benchmark_wilson even on single node MPI runs
|
||||||
@ -358,16 +358,19 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
|||||||
assert(from != _processor);
|
assert(from != _processor);
|
||||||
assert(gme == ShmRank);
|
assert(gme == ShmRank);
|
||||||
double off_node_bytes=0.0;
|
double off_node_bytes=0.0;
|
||||||
|
int tag;
|
||||||
|
|
||||||
if ( gfrom ==MPI_UNDEFINED) {
|
if ( gfrom ==MPI_UNDEFINED) {
|
||||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator_halo[commdir],&rrq);
|
tag= dir+from*32;
|
||||||
|
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.push_back(rrq);
|
list.push_back(rrq);
|
||||||
off_node_bytes+=bytes;
|
off_node_bytes+=bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( gdest == MPI_UNDEFINED ) {
|
if ( gdest == MPI_UNDEFINED ) {
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator_halo[commdir],&xrq);
|
tag= dir+_processor*32;
|
||||||
|
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.push_back(xrq);
|
list.push_back(xrq);
|
||||||
off_node_bytes+=bytes;
|
off_node_bytes+=bytes;
|
||||||
|
@ -102,7 +102,7 @@ public:
|
|||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
static void SharedMemoryAllocate(uint64_t bytes, int flags);
|
static void SharedMemoryAllocate(uint64_t bytes, int flags);
|
||||||
static void SharedMemoryFree(void);
|
static void SharedMemoryFree(void);
|
||||||
static void SharedMemoryCopy(void *dest,const void *src,size_t bytes);
|
static void SharedMemoryCopy(void *dest,void *src,size_t bytes);
|
||||||
static void SharedMemoryZero(void *dest,size_t bytes);
|
static void SharedMemoryZero(void *dest,size_t bytes);
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -457,8 +457,9 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
if ( WorldRank == 0 ){
|
// if ( WorldRank == 0 ){
|
||||||
std::cout << header " SharedMemoryMPI.cc cudaMalloc "<< bytes
|
if ( 1 ){
|
||||||
|
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
|
||||||
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
||||||
}
|
}
|
||||||
SharedMemoryZero(ShmCommBuf,bytes);
|
SharedMemoryZero(ShmCommBuf,bytes);
|
||||||
@ -665,7 +666,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
#endif
|
#endif
|
||||||
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0);
|
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0);
|
||||||
|
|
||||||
// std::cout << "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< size<< "bytes)"<<std::endl;
|
|
||||||
if ( ptr == (void * )MAP_FAILED ) {
|
if ( ptr == (void * )MAP_FAILED ) {
|
||||||
perror("failed mmap");
|
perror("failed mmap");
|
||||||
assert(0);
|
assert(0);
|
||||||
@ -715,7 +715,7 @@ void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
|
|||||||
bzero(dest,bytes);
|
bzero(dest,bytes);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
void GlobalSharedMemory::SharedMemoryCopy(void *dest,const void *src,size_t bytes)
|
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
|
||||||
{
|
{
|
||||||
#ifdef GRID_CUDA
|
#ifdef GRID_CUDA
|
||||||
cudaMemcpy(dest,src,bytes,cudaMemcpyDefault);
|
cudaMemcpy(dest,src,bytes,cudaMemcpyDefault);
|
||||||
@ -771,19 +771,12 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
|
|||||||
std::vector<int> ranks(size); for(int r=0;r<size;r++) ranks[r]=r;
|
std::vector<int> ranks(size); for(int r=0;r<size;r++) ranks[r]=r;
|
||||||
MPI_Group_translate_ranks (FullGroup,size,&ranks[0],ShmGroup, &ShmRanks[0]);
|
MPI_Group_translate_ranks (FullGroup,size,&ranks[0],ShmGroup, &ShmRanks[0]);
|
||||||
|
|
||||||
#ifdef GRID_IBM_SUMMIT
|
#ifdef GRID_SHM_FORCE_MPI
|
||||||
// Hide the shared memory path between sockets
|
// Hide the shared memory path between ranks
|
||||||
// if even number of nodes
|
{
|
||||||
if ( (ShmSize & 0x1)==0 ) {
|
|
||||||
int SocketSize = ShmSize/2;
|
|
||||||
int mySocket = ShmRank/SocketSize;
|
|
||||||
for(int r=0;r<size;r++){
|
for(int r=0;r<size;r++){
|
||||||
int hisRank=ShmRanks[r];
|
if ( r!=rank ) {
|
||||||
if ( hisRank!= MPI_UNDEFINED ) {
|
ShmRanks[r] = MPI_UNDEFINED;
|
||||||
int hisSocket=hisRank/SocketSize;
|
|
||||||
if ( hisSocket != mySocket ) {
|
|
||||||
ShmRanks[r] = MPI_UNDEFINED;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,6 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include <Grid/GridCore.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
#define header "SharedMemoryNone: "
|
||||||
|
|
||||||
/*Construct from an MPI communicator*/
|
/*Construct from an MPI communicator*/
|
||||||
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
||||||
@ -55,6 +56,38 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
|
|||||||
////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Hugetlbfs mapping intended, use anonymous mmap
|
// Hugetlbfs mapping intended, use anonymous mmap
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
#if 1
|
||||||
|
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||||
|
{
|
||||||
|
std::cout << header "SharedMemoryAllocate "<< bytes<< " GPU implementation "<<std::endl;
|
||||||
|
void * ShmCommBuf ;
|
||||||
|
assert(_ShmSetup==1);
|
||||||
|
assert(_ShmAlloc==0);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Each MPI rank should allocate our own buffer
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
ShmCommBuf = acceleratorAllocDevice(bytes);
|
||||||
|
|
||||||
|
if (ShmCommBuf == (void *)NULL ) {
|
||||||
|
std::cerr << " SharedMemoryNone.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
if ( WorldRank == 0 ){
|
||||||
|
std::cout << WorldRank << header " SharedMemoryNone.cc acceleratorAllocDevice "<< bytes
|
||||||
|
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
|
||||||
|
}
|
||||||
|
SharedMemoryZero(ShmCommBuf,bytes);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Loop over ranks/gpu's on our node
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
WorldShmCommBufs[0] = ShmCommBuf;
|
||||||
|
|
||||||
|
_ShmAllocBytes=bytes;
|
||||||
|
_ShmAlloc=1;
|
||||||
|
}
|
||||||
|
#else
|
||||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||||
{
|
{
|
||||||
void * ShmCommBuf ;
|
void * ShmCommBuf ;
|
||||||
@ -83,7 +116,15 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
_ShmAllocBytes=bytes;
|
_ShmAllocBytes=bytes;
|
||||||
_ShmAlloc=1;
|
_ShmAlloc=1;
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
|
||||||
|
{
|
||||||
|
acceleratorMemSet(dest,0,bytes);
|
||||||
|
}
|
||||||
|
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
|
||||||
|
{
|
||||||
|
acceleratorCopyToDevice(src,dest,bytes);
|
||||||
|
}
|
||||||
////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////
|
||||||
// Global shared functionality finished
|
// Global shared functionality finished
|
||||||
// Now move to per communicator functionality
|
// Now move to per communicator functionality
|
||||||
|
@ -35,7 +35,7 @@ extern Vector<std::pair<int,int> > Cshift_table;
|
|||||||
// Gather for when there is no need to SIMD split
|
// Gather for when there is no need to SIMD split
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
template<class vobj> void
|
template<class vobj> void
|
||||||
Gather_plane_simple (const Lattice<vobj> &rhs,commVector<vobj> &buffer,int dimension,int plane,int cbmask, int off=0)
|
Gather_plane_simple (const Lattice<vobj> &rhs,cshiftVector<vobj> &buffer,int dimension,int plane,int cbmask, int off=0)
|
||||||
{
|
{
|
||||||
int rd = rhs.Grid()->_rdimensions[dimension];
|
int rd = rhs.Grid()->_rdimensions[dimension];
|
||||||
|
|
||||||
@ -73,12 +73,19 @@ Gather_plane_simple (const Lattice<vobj> &rhs,commVector<vobj> &buffer,int dimen
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
autoView(rhs_v , rhs, AcceleratorRead);
|
|
||||||
auto buffer_p = & buffer[0];
|
auto buffer_p = & buffer[0];
|
||||||
auto table = &Cshift_table[0];
|
auto table = &Cshift_table[0];
|
||||||
|
#ifdef ACCELERATOR_CSHIFT
|
||||||
|
autoView(rhs_v , rhs, AcceleratorRead);
|
||||||
accelerator_for(i,ent,vobj::Nsimd(),{
|
accelerator_for(i,ent,vobj::Nsimd(),{
|
||||||
coalescedWrite(buffer_p[table[i].first],coalescedRead(rhs_v[table[i].second]));
|
coalescedWrite(buffer_p[table[i].first],coalescedRead(rhs_v[table[i].second]));
|
||||||
});
|
});
|
||||||
|
#else
|
||||||
|
autoView(rhs_v , rhs, CpuRead);
|
||||||
|
thread_for(i,ent,{
|
||||||
|
buffer_p[table[i].first]=rhs_v[table[i].second];
|
||||||
|
});
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,6 +110,7 @@ Gather_plane_extract(const Lattice<vobj> &rhs,
|
|||||||
int n1=rhs.Grid()->_slice_stride[dimension];
|
int n1=rhs.Grid()->_slice_stride[dimension];
|
||||||
|
|
||||||
if ( cbmask ==0x3){
|
if ( cbmask ==0x3){
|
||||||
|
#ifdef ACCELERATOR_CSHIFT
|
||||||
autoView(rhs_v , rhs, AcceleratorRead);
|
autoView(rhs_v , rhs, AcceleratorRead);
|
||||||
accelerator_for2d(n,e1,b,e2,1,{
|
accelerator_for2d(n,e1,b,e2,1,{
|
||||||
int o = n*n1;
|
int o = n*n1;
|
||||||
@ -111,12 +119,22 @@ Gather_plane_extract(const Lattice<vobj> &rhs,
|
|||||||
vobj temp =rhs_v[so+o+b];
|
vobj temp =rhs_v[so+o+b];
|
||||||
extract<vobj>(temp,pointers,offset);
|
extract<vobj>(temp,pointers,offset);
|
||||||
});
|
});
|
||||||
|
#else
|
||||||
|
autoView(rhs_v , rhs, CpuRead);
|
||||||
|
thread_for2d(n,e1,b,e2,{
|
||||||
|
int o = n*n1;
|
||||||
|
int offset = b+n*e2;
|
||||||
|
|
||||||
|
vobj temp =rhs_v[so+o+b];
|
||||||
|
extract<vobj>(temp,pointers,offset);
|
||||||
|
});
|
||||||
|
#endif
|
||||||
} else {
|
} else {
|
||||||
autoView(rhs_v , rhs, AcceleratorRead);
|
|
||||||
|
|
||||||
Coordinate rdim=rhs.Grid()->_rdimensions;
|
Coordinate rdim=rhs.Grid()->_rdimensions;
|
||||||
Coordinate cdm =rhs.Grid()->_checker_dim_mask;
|
Coordinate cdm =rhs.Grid()->_checker_dim_mask;
|
||||||
std::cout << " Dense packed buffer WARNING " <<std::endl; // Does this get called twice once for each cb?
|
std::cout << " Dense packed buffer WARNING " <<std::endl; // Does this get called twice once for each cb?
|
||||||
|
#ifdef ACCELERATOR_CSHIFT
|
||||||
|
autoView(rhs_v , rhs, AcceleratorRead);
|
||||||
accelerator_for2d(n,e1,b,e2,1,{
|
accelerator_for2d(n,e1,b,e2,1,{
|
||||||
|
|
||||||
Coordinate coor;
|
Coordinate coor;
|
||||||
@ -134,13 +152,33 @@ Gather_plane_extract(const Lattice<vobj> &rhs,
|
|||||||
extract<vobj>(temp,pointers,offset);
|
extract<vobj>(temp,pointers,offset);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
#else
|
||||||
|
autoView(rhs_v , rhs, CpuRead);
|
||||||
|
thread_for2d(n,e1,b,e2,{
|
||||||
|
|
||||||
|
Coordinate coor;
|
||||||
|
|
||||||
|
int o=n*n1;
|
||||||
|
int oindex = o+b;
|
||||||
|
|
||||||
|
int cb = RedBlackCheckerBoardFromOindex(oindex, rdim, cdm);
|
||||||
|
|
||||||
|
int ocb=1<<cb;
|
||||||
|
int offset = b+n*e2;
|
||||||
|
|
||||||
|
if ( ocb & cbmask ) {
|
||||||
|
vobj temp =rhs_v[so+o+b];
|
||||||
|
extract<vobj>(temp,pointers,offset);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////
|
||||||
// Scatter for when there is no need to SIMD split
|
// Scatter for when there is no need to SIMD split
|
||||||
//////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////
|
||||||
template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,commVector<vobj> &buffer, int dimension,int plane,int cbmask)
|
template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,cshiftVector<vobj> &buffer, int dimension,int plane,int cbmask)
|
||||||
{
|
{
|
||||||
int rd = rhs.Grid()->_rdimensions[dimension];
|
int rd = rhs.Grid()->_rdimensions[dimension];
|
||||||
|
|
||||||
@ -182,12 +220,19 @@ template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,commVector<vo
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
autoView( rhs_v, rhs, AcceleratorWrite);
|
|
||||||
auto buffer_p = & buffer[0];
|
auto buffer_p = & buffer[0];
|
||||||
auto table = &Cshift_table[0];
|
auto table = &Cshift_table[0];
|
||||||
|
#ifdef ACCELERATOR_CSHIFT
|
||||||
|
autoView( rhs_v, rhs, AcceleratorWrite);
|
||||||
accelerator_for(i,ent,vobj::Nsimd(),{
|
accelerator_for(i,ent,vobj::Nsimd(),{
|
||||||
coalescedWrite(rhs_v[table[i].first],coalescedRead(buffer_p[table[i].second]));
|
coalescedWrite(rhs_v[table[i].first],coalescedRead(buffer_p[table[i].second]));
|
||||||
});
|
});
|
||||||
|
#else
|
||||||
|
autoView( rhs_v, rhs, CpuWrite);
|
||||||
|
thread_for(i,ent,{
|
||||||
|
rhs_v[table[i].first]=buffer_p[table[i].second];
|
||||||
|
});
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,14 +253,23 @@ template<class vobj> void Scatter_plane_merge(Lattice<vobj> &rhs,ExtractPointerA
|
|||||||
int e2=rhs.Grid()->_slice_block[dimension];
|
int e2=rhs.Grid()->_slice_block[dimension];
|
||||||
|
|
||||||
if(cbmask ==0x3 ) {
|
if(cbmask ==0x3 ) {
|
||||||
autoView( rhs_v , rhs, AcceleratorWrite);
|
|
||||||
int _slice_stride = rhs.Grid()->_slice_stride[dimension];
|
int _slice_stride = rhs.Grid()->_slice_stride[dimension];
|
||||||
int _slice_block = rhs.Grid()->_slice_block[dimension];
|
int _slice_block = rhs.Grid()->_slice_block[dimension];
|
||||||
|
#ifdef ACCELERATOR_CSHIFT
|
||||||
|
autoView( rhs_v , rhs, AcceleratorWrite);
|
||||||
accelerator_for2d(n,e1,b,e2,1,{
|
accelerator_for2d(n,e1,b,e2,1,{
|
||||||
int o = n*_slice_stride;
|
int o = n*_slice_stride;
|
||||||
int offset = b+n*_slice_block;
|
int offset = b+n*_slice_block;
|
||||||
merge(rhs_v[so+o+b],pointers,offset);
|
merge(rhs_v[so+o+b],pointers,offset);
|
||||||
});
|
});
|
||||||
|
#else
|
||||||
|
autoView( rhs_v , rhs, CpuWrite);
|
||||||
|
thread_for2d(n,e1,b,e2,{
|
||||||
|
int o = n*_slice_stride;
|
||||||
|
int offset = b+n*_slice_block;
|
||||||
|
merge(rhs_v[so+o+b],pointers,offset);
|
||||||
|
});
|
||||||
|
#endif
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// Case of SIMD split AND checker dim cannot currently be hit, except in
|
// Case of SIMD split AND checker dim cannot currently be hit, except in
|
||||||
@ -280,12 +334,20 @@ template<class vobj> void Copy_plane(Lattice<vobj>& lhs,const Lattice<vobj> &rhs
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
auto table = &Cshift_table[0];
|
||||||
|
#ifdef ACCELERATOR_CSHIFT
|
||||||
autoView(rhs_v , rhs, AcceleratorRead);
|
autoView(rhs_v , rhs, AcceleratorRead);
|
||||||
autoView(lhs_v , lhs, AcceleratorWrite);
|
autoView(lhs_v , lhs, AcceleratorWrite);
|
||||||
auto table = &Cshift_table[0];
|
|
||||||
accelerator_for(i,ent,vobj::Nsimd(),{
|
accelerator_for(i,ent,vobj::Nsimd(),{
|
||||||
coalescedWrite(lhs_v[table[i].first],coalescedRead(rhs_v[table[i].second]));
|
coalescedWrite(lhs_v[table[i].first],coalescedRead(rhs_v[table[i].second]));
|
||||||
});
|
});
|
||||||
|
#else
|
||||||
|
autoView(rhs_v , rhs, CpuRead);
|
||||||
|
autoView(lhs_v , lhs, CpuWrite);
|
||||||
|
thread_for(i,ent,{
|
||||||
|
lhs_v[table[i].first]=rhs_v[table[i].second];
|
||||||
|
});
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,12 +386,20 @@ template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,const Lattice<vo
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
auto table = &Cshift_table[0];
|
||||||
|
#ifdef ACCELERATOR_CSHIFT
|
||||||
autoView( rhs_v, rhs, AcceleratorRead);
|
autoView( rhs_v, rhs, AcceleratorRead);
|
||||||
autoView( lhs_v, lhs, AcceleratorWrite);
|
autoView( lhs_v, lhs, AcceleratorWrite);
|
||||||
auto table = &Cshift_table[0];
|
|
||||||
accelerator_for(i,ent,1,{
|
accelerator_for(i,ent,1,{
|
||||||
permute(lhs_v[table[i].first],rhs_v[table[i].second],permute_type);
|
permute(lhs_v[table[i].first],rhs_v[table[i].second],permute_type);
|
||||||
});
|
});
|
||||||
|
#else
|
||||||
|
autoView( rhs_v, rhs, CpuRead);
|
||||||
|
autoView( lhs_v, lhs, CpuWrite);
|
||||||
|
thread_for(i,ent,{
|
||||||
|
permute(lhs_v[table[i].first],rhs_v[table[i].second],permute_type);
|
||||||
|
});
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,7 +101,8 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj>& ret,const Lattice<vob
|
|||||||
Cshift_comms_simd(ret,rhs,dimension,shift,0x2);// both with block stride loop iteration
|
Cshift_comms_simd(ret,rhs,dimension,shift,0x2);// both with block stride loop iteration
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#define ACCELERATOR_CSHIFT_NO_COPY
|
||||||
|
#ifdef ACCELERATOR_CSHIFT_NO_COPY
|
||||||
template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
|
template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
|
||||||
{
|
{
|
||||||
typedef typename vobj::vector_type vector_type;
|
typedef typename vobj::vector_type vector_type;
|
||||||
@ -121,9 +122,9 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
|||||||
assert(shift<fd);
|
assert(shift<fd);
|
||||||
|
|
||||||
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
|
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
|
||||||
commVector<vobj> send_buf(buffer_size);
|
cshiftVector<vobj> send_buf(buffer_size);
|
||||||
commVector<vobj> recv_buf(buffer_size);
|
cshiftVector<vobj> recv_buf(buffer_size);
|
||||||
|
|
||||||
int cb= (cbmask==0x2)? Odd : Even;
|
int cb= (cbmask==0x2)? Odd : Even;
|
||||||
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
||||||
|
|
||||||
@ -138,7 +139,7 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
|||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
int words = send_buf.size();
|
int words = buffer_size;
|
||||||
if (cbmask != 0x3) words=words>>1;
|
if (cbmask != 0x3) words=words>>1;
|
||||||
|
|
||||||
int bytes = words * sizeof(vobj);
|
int bytes = words * sizeof(vobj);
|
||||||
@ -150,12 +151,14 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
|||||||
int xmit_to_rank;
|
int xmit_to_rank;
|
||||||
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
|
|
||||||
|
grid->Barrier();
|
||||||
|
|
||||||
grid->SendToRecvFrom((void *)&send_buf[0],
|
grid->SendToRecvFrom((void *)&send_buf[0],
|
||||||
xmit_to_rank,
|
xmit_to_rank,
|
||||||
(void *)&recv_buf[0],
|
(void *)&recv_buf[0],
|
||||||
recv_from_rank,
|
recv_from_rank,
|
||||||
bytes);
|
bytes);
|
||||||
|
|
||||||
grid->Barrier();
|
grid->Barrier();
|
||||||
|
|
||||||
Scatter_plane_simple (ret,recv_buf,dimension,x,cbmask);
|
Scatter_plane_simple (ret,recv_buf,dimension,x,cbmask);
|
||||||
@ -195,8 +198,15 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
|||||||
int buffer_size = grid->_slice_nblock[dimension]*grid->_slice_block[dimension];
|
int buffer_size = grid->_slice_nblock[dimension]*grid->_slice_block[dimension];
|
||||||
// int words = sizeof(vobj)/sizeof(vector_type);
|
// int words = sizeof(vobj)/sizeof(vector_type);
|
||||||
|
|
||||||
std::vector<commVector<scalar_object> > send_buf_extract(Nsimd,commVector<scalar_object>(buffer_size) );
|
std::vector<cshiftVector<scalar_object> > send_buf_extract(Nsimd);
|
||||||
std::vector<commVector<scalar_object> > recv_buf_extract(Nsimd,commVector<scalar_object>(buffer_size) );
|
std::vector<cshiftVector<scalar_object> > recv_buf_extract(Nsimd);
|
||||||
|
scalar_object * recv_buf_extract_mpi;
|
||||||
|
scalar_object * send_buf_extract_mpi;
|
||||||
|
|
||||||
|
for(int s=0;s<Nsimd;s++){
|
||||||
|
send_buf_extract[s].resize(buffer_size);
|
||||||
|
recv_buf_extract[s].resize(buffer_size);
|
||||||
|
}
|
||||||
|
|
||||||
int bytes = buffer_size*sizeof(scalar_object);
|
int bytes = buffer_size*sizeof(scalar_object);
|
||||||
|
|
||||||
@ -242,11 +252,204 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
|||||||
if(nbr_proc){
|
if(nbr_proc){
|
||||||
grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
||||||
|
|
||||||
grid->SendToRecvFrom((void *)&send_buf_extract[nbr_lane][0],
|
grid->Barrier();
|
||||||
|
|
||||||
|
send_buf_extract_mpi = &send_buf_extract[nbr_lane][0];
|
||||||
|
recv_buf_extract_mpi = &recv_buf_extract[i][0];
|
||||||
|
grid->SendToRecvFrom((void *)send_buf_extract_mpi,
|
||||||
xmit_to_rank,
|
xmit_to_rank,
|
||||||
(void *)&recv_buf_extract[i][0],
|
(void *)recv_buf_extract_mpi,
|
||||||
recv_from_rank,
|
recv_from_rank,
|
||||||
bytes);
|
bytes);
|
||||||
|
|
||||||
|
grid->Barrier();
|
||||||
|
|
||||||
|
rpointers[i] = &recv_buf_extract[i][0];
|
||||||
|
} else {
|
||||||
|
rpointers[i] = &send_buf_extract[nbr_lane][0];
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
Scatter_plane_merge(ret,rpointers,dimension,x,cbmask);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
|
||||||
|
{
|
||||||
|
typedef typename vobj::vector_type vector_type;
|
||||||
|
typedef typename vobj::scalar_type scalar_type;
|
||||||
|
|
||||||
|
GridBase *grid=rhs.Grid();
|
||||||
|
Lattice<vobj> temp(rhs.Grid());
|
||||||
|
|
||||||
|
int fd = rhs.Grid()->_fdimensions[dimension];
|
||||||
|
int rd = rhs.Grid()->_rdimensions[dimension];
|
||||||
|
int pd = rhs.Grid()->_processors[dimension];
|
||||||
|
int simd_layout = rhs.Grid()->_simd_layout[dimension];
|
||||||
|
int comm_dim = rhs.Grid()->_processors[dimension] >1 ;
|
||||||
|
assert(simd_layout==1);
|
||||||
|
assert(comm_dim==1);
|
||||||
|
assert(shift>=0);
|
||||||
|
assert(shift<fd);
|
||||||
|
|
||||||
|
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
|
||||||
|
cshiftVector<vobj> send_buf_v(buffer_size);
|
||||||
|
cshiftVector<vobj> recv_buf_v(buffer_size);
|
||||||
|
vobj *send_buf;
|
||||||
|
vobj *recv_buf;
|
||||||
|
{
|
||||||
|
grid->ShmBufferFreeAll();
|
||||||
|
size_t bytes = buffer_size*sizeof(vobj);
|
||||||
|
send_buf=(vobj *)grid->ShmBufferMalloc(bytes);
|
||||||
|
recv_buf=(vobj *)grid->ShmBufferMalloc(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
int cb= (cbmask==0x2)? Odd : Even;
|
||||||
|
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
||||||
|
|
||||||
|
for(int x=0;x<rd;x++){
|
||||||
|
|
||||||
|
int sx = (x+sshift)%rd;
|
||||||
|
int comm_proc = ((x+sshift)/rd)%pd;
|
||||||
|
|
||||||
|
if (comm_proc==0) {
|
||||||
|
|
||||||
|
Copy_plane(ret,rhs,dimension,x,sx,cbmask);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
int words = buffer_size;
|
||||||
|
if (cbmask != 0x3) words=words>>1;
|
||||||
|
|
||||||
|
int bytes = words * sizeof(vobj);
|
||||||
|
|
||||||
|
Gather_plane_simple (rhs,send_buf_v,dimension,sx,cbmask);
|
||||||
|
|
||||||
|
// int rank = grid->_processor;
|
||||||
|
int recv_from_rank;
|
||||||
|
int xmit_to_rank;
|
||||||
|
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
|
|
||||||
|
|
||||||
|
grid->Barrier();
|
||||||
|
|
||||||
|
acceleratorCopyDeviceToDevice((void *)&send_buf_v[0],(void *)&send_buf[0],bytes);
|
||||||
|
grid->SendToRecvFrom((void *)&send_buf[0],
|
||||||
|
xmit_to_rank,
|
||||||
|
(void *)&recv_buf[0],
|
||||||
|
recv_from_rank,
|
||||||
|
bytes);
|
||||||
|
acceleratorCopyDeviceToDevice((void *)&recv_buf[0],(void *)&recv_buf_v[0],bytes);
|
||||||
|
|
||||||
|
grid->Barrier();
|
||||||
|
|
||||||
|
Scatter_plane_simple (ret,recv_buf_v,dimension,x,cbmask);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
|
||||||
|
{
|
||||||
|
GridBase *grid=rhs.Grid();
|
||||||
|
const int Nsimd = grid->Nsimd();
|
||||||
|
typedef typename vobj::vector_type vector_type;
|
||||||
|
typedef typename vobj::scalar_object scalar_object;
|
||||||
|
typedef typename vobj::scalar_type scalar_type;
|
||||||
|
|
||||||
|
int fd = grid->_fdimensions[dimension];
|
||||||
|
int rd = grid->_rdimensions[dimension];
|
||||||
|
int ld = grid->_ldimensions[dimension];
|
||||||
|
int pd = grid->_processors[dimension];
|
||||||
|
int simd_layout = grid->_simd_layout[dimension];
|
||||||
|
int comm_dim = grid->_processors[dimension] >1 ;
|
||||||
|
|
||||||
|
//std::cout << "Cshift_comms_simd dim "<< dimension << " fd "<<fd<<" rd "<<rd
|
||||||
|
// << " ld "<<ld<<" pd " << pd<<" simd_layout "<<simd_layout
|
||||||
|
// << " comm_dim " << comm_dim << " cbmask " << cbmask <<std::endl;
|
||||||
|
|
||||||
|
assert(comm_dim==1);
|
||||||
|
assert(simd_layout==2);
|
||||||
|
assert(shift>=0);
|
||||||
|
assert(shift<fd);
|
||||||
|
|
||||||
|
int permute_type=grid->PermuteType(dimension);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
// Simd direction uses an extract/merge pair
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
int buffer_size = grid->_slice_nblock[dimension]*grid->_slice_block[dimension];
|
||||||
|
// int words = sizeof(vobj)/sizeof(vector_type);
|
||||||
|
|
||||||
|
std::vector<cshiftVector<scalar_object> > send_buf_extract(Nsimd);
|
||||||
|
std::vector<cshiftVector<scalar_object> > recv_buf_extract(Nsimd);
|
||||||
|
scalar_object * recv_buf_extract_mpi;
|
||||||
|
scalar_object * send_buf_extract_mpi;
|
||||||
|
{
|
||||||
|
size_t bytes = sizeof(scalar_object)*buffer_size;
|
||||||
|
grid->ShmBufferFreeAll();
|
||||||
|
send_buf_extract_mpi = (scalar_object *)grid->ShmBufferMalloc(bytes);
|
||||||
|
recv_buf_extract_mpi = (scalar_object *)grid->ShmBufferMalloc(bytes);
|
||||||
|
}
|
||||||
|
for(int s=0;s<Nsimd;s++){
|
||||||
|
send_buf_extract[s].resize(buffer_size);
|
||||||
|
recv_buf_extract[s].resize(buffer_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
int bytes = buffer_size*sizeof(scalar_object);
|
||||||
|
|
||||||
|
ExtractPointerArray<scalar_object> pointers(Nsimd); //
|
||||||
|
ExtractPointerArray<scalar_object> rpointers(Nsimd); // received pointers
|
||||||
|
|
||||||
|
///////////////////////////////////////////
|
||||||
|
// Work out what to send where
|
||||||
|
///////////////////////////////////////////
|
||||||
|
int cb = (cbmask==0x2)? Odd : Even;
|
||||||
|
int sshift= grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
||||||
|
|
||||||
|
// loop over outer coord planes orthog to dim
|
||||||
|
for(int x=0;x<rd;x++){
|
||||||
|
|
||||||
|
// FIXME call local permute copy if none are offnode.
|
||||||
|
for(int i=0;i<Nsimd;i++){
|
||||||
|
pointers[i] = &send_buf_extract[i][0];
|
||||||
|
}
|
||||||
|
int sx = (x+sshift)%rd;
|
||||||
|
Gather_plane_extract(rhs,pointers,dimension,sx,cbmask);
|
||||||
|
|
||||||
|
for(int i=0;i<Nsimd;i++){
|
||||||
|
|
||||||
|
int inner_bit = (Nsimd>>(permute_type+1));
|
||||||
|
int ic= (i&inner_bit)? 1:0;
|
||||||
|
|
||||||
|
int my_coor = rd*ic + x;
|
||||||
|
int nbr_coor = my_coor+sshift;
|
||||||
|
int nbr_proc = ((nbr_coor)/ld) % pd;// relative shift in processors
|
||||||
|
|
||||||
|
int nbr_ic = (nbr_coor%ld)/rd; // inner coord of peer
|
||||||
|
int nbr_ox = (nbr_coor%rd); // outer coord of peer
|
||||||
|
int nbr_lane = (i&(~inner_bit));
|
||||||
|
|
||||||
|
int recv_from_rank;
|
||||||
|
int xmit_to_rank;
|
||||||
|
|
||||||
|
if (nbr_ic) nbr_lane|=inner_bit;
|
||||||
|
|
||||||
|
assert (sx == nbr_ox);
|
||||||
|
|
||||||
|
if(nbr_proc){
|
||||||
|
grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
||||||
|
|
||||||
|
grid->Barrier();
|
||||||
|
|
||||||
|
acceleratorCopyDeviceToDevice((void *)&send_buf_extract[nbr_lane][0],(void *)send_buf_extract_mpi,bytes);
|
||||||
|
grid->SendToRecvFrom((void *)send_buf_extract_mpi,
|
||||||
|
xmit_to_rank,
|
||||||
|
(void *)recv_buf_extract_mpi,
|
||||||
|
recv_from_rank,
|
||||||
|
bytes);
|
||||||
|
acceleratorCopyDeviceToDevice((void *)recv_buf_extract_mpi,(void *)&recv_buf_extract[i][0],bytes);
|
||||||
|
|
||||||
grid->Barrier();
|
grid->Barrier();
|
||||||
rpointers[i] = &recv_buf_extract[i][0];
|
rpointers[i] = &recv_buf_extract[i][0];
|
||||||
} else {
|
} else {
|
||||||
@ -258,7 +461,7 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -350,7 +350,6 @@ GridUnopClass(UnaryTimesI, timesI(a));
|
|||||||
GridUnopClass(UnaryTimesMinusI, timesMinusI(a));
|
GridUnopClass(UnaryTimesMinusI, timesMinusI(a));
|
||||||
GridUnopClass(UnaryAbs, abs(a));
|
GridUnopClass(UnaryAbs, abs(a));
|
||||||
GridUnopClass(UnarySqrt, sqrt(a));
|
GridUnopClass(UnarySqrt, sqrt(a));
|
||||||
GridUnopClass(UnaryRsqrt, rsqrt(a));
|
|
||||||
GridUnopClass(UnarySin, sin(a));
|
GridUnopClass(UnarySin, sin(a));
|
||||||
GridUnopClass(UnaryCos, cos(a));
|
GridUnopClass(UnaryCos, cos(a));
|
||||||
GridUnopClass(UnaryAsin, asin(a));
|
GridUnopClass(UnaryAsin, asin(a));
|
||||||
@ -463,7 +462,6 @@ GRID_DEF_UNOP(timesMinusI, UnaryTimesMinusI);
|
|||||||
GRID_DEF_UNOP(abs, UnaryAbs); // abs overloaded in cmath C++98; DON'T do the
|
GRID_DEF_UNOP(abs, UnaryAbs); // abs overloaded in cmath C++98; DON'T do the
|
||||||
// abs-fabs-dabs-labs thing
|
// abs-fabs-dabs-labs thing
|
||||||
GRID_DEF_UNOP(sqrt, UnarySqrt);
|
GRID_DEF_UNOP(sqrt, UnarySqrt);
|
||||||
GRID_DEF_UNOP(rsqrt, UnaryRsqrt);
|
|
||||||
GRID_DEF_UNOP(sin, UnarySin);
|
GRID_DEF_UNOP(sin, UnarySin);
|
||||||
GRID_DEF_UNOP(cos, UnaryCos);
|
GRID_DEF_UNOP(cos, UnaryCos);
|
||||||
GRID_DEF_UNOP(asin, UnaryAsin);
|
GRID_DEF_UNOP(asin, UnaryAsin);
|
||||||
|
@ -62,7 +62,7 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
|
|||||||
basis_v.push_back(basis[k].View(AcceleratorWrite));
|
basis_v.push_back(basis[k].View(AcceleratorWrite));
|
||||||
}
|
}
|
||||||
|
|
||||||
#if ( (!defined(GRID_SYCL)) && (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) )
|
#if ( (!defined(GRID_CUDA)) )
|
||||||
int max_threads = thread_max();
|
int max_threads = thread_max();
|
||||||
Vector < vobj > Bt(Nm * max_threads);
|
Vector < vobj > Bt(Nm * max_threads);
|
||||||
thread_region
|
thread_region
|
||||||
@ -161,11 +161,13 @@ void basisRotateJ(Field &result,std::vector<Field> &basis,Eigen::MatrixXd& Qt,in
|
|||||||
double * Qt_j = & Qt_jv[0];
|
double * Qt_j = & Qt_jv[0];
|
||||||
for(int k=0;k<Nm;++k) Qt_j[k]=Qt(j,k);
|
for(int k=0;k<Nm;++k) Qt_j[k]=Qt(j,k);
|
||||||
|
|
||||||
|
auto basis_vp=& basis_v[0];
|
||||||
autoView(result_v,result,AcceleratorWrite);
|
autoView(result_v,result,AcceleratorWrite);
|
||||||
accelerator_for(ss, grid->oSites(),vobj::Nsimd(),{
|
accelerator_for(ss, grid->oSites(),vobj::Nsimd(),{
|
||||||
auto B=coalescedRead(zz);
|
vobj zzz=Zero();
|
||||||
|
auto B=coalescedRead(zzz);
|
||||||
for(int k=k0; k<k1; ++k){
|
for(int k=k0; k<k1; ++k){
|
||||||
B +=Qt_j[k] * coalescedRead(basis_v[k][ss]);
|
B +=Qt_j[k] * coalescedRead(basis_vp[k][ss]);
|
||||||
}
|
}
|
||||||
coalescedWrite(result_v[ss], B);
|
coalescedWrite(result_v[ss], B);
|
||||||
});
|
});
|
||||||
|
@ -127,6 +127,11 @@ accelerator_inline void convertType(T1 & out, const iScalar<T2> & in) {
|
|||||||
convertType(out,in._internal);
|
convertType(out,in._internal);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename T1, typename std::enable_if<!isGridScalar<T1>::value, T1>::type* = nullptr>
|
||||||
|
accelerator_inline void convertType(T1 & out, const iScalar<T1> & in) {
|
||||||
|
convertType(out,in._internal);
|
||||||
|
}
|
||||||
|
|
||||||
template<typename T1,typename T2>
|
template<typename T1,typename T2>
|
||||||
accelerator_inline void convertType(iScalar<T1> & out, const T2 & in) {
|
accelerator_inline void convertType(iScalar<T1> & out, const T2 & in) {
|
||||||
convertType(out._internal,in);
|
convertType(out._internal,in);
|
||||||
|
@ -97,42 +97,30 @@ public:
|
|||||||
Coordinate icoor;
|
Coordinate icoor;
|
||||||
|
|
||||||
#ifdef GRID_SIMT
|
#ifdef GRID_SIMT
|
||||||
_Spinor tmp;
|
|
||||||
|
|
||||||
const int Nsimd =SiteDoubledGaugeField::Nsimd();
|
const int Nsimd =SiteDoubledGaugeField::Nsimd();
|
||||||
int s = acceleratorSIMTlane(Nsimd);
|
int s = acceleratorSIMTlane(Nsimd);
|
||||||
St.iCoorFromIindex(icoor,s);
|
St.iCoorFromIindex(icoor,s);
|
||||||
|
|
||||||
int mmu = mu % Nd;
|
int mmu = mu % Nd;
|
||||||
if ( SE->_around_the_world && St.parameters.twists[mmu] ) {
|
|
||||||
|
|
||||||
int permute_lane = (sl==1)
|
|
||||||
|| ((distance== 1)&&(icoor[direction]==1))
|
|
||||||
|| ((distance==-1)&&(icoor[direction]==0));
|
|
||||||
|
|
||||||
if ( permute_lane ) {
|
auto UU0=coalescedRead(U(0)(mu));
|
||||||
tmp(0) = chi(1);
|
auto UU1=coalescedRead(U(1)(mu));
|
||||||
tmp(1) = chi(0);
|
|
||||||
} else {
|
//Decide whether we do a G-parity flavor twist
|
||||||
tmp(0) = chi(0);
|
//Note: this assumes (but does not check) that sl==1 || sl==2 i.e. max 2 SIMD lanes in G-parity dir
|
||||||
tmp(1) = chi(1);
|
//It also assumes (but does not check) that abs(distance) == 1
|
||||||
}
|
int permute_lane = (sl==1)
|
||||||
|
|| ((distance== 1)&&(icoor[direction]==1))
|
||||||
|
|| ((distance==-1)&&(icoor[direction]==0));
|
||||||
|
|
||||||
auto UU0=coalescedRead(U(0)(mu));
|
permute_lane = permute_lane && SE->_around_the_world && St.parameters.twists[mmu]; //only if we are going around the world
|
||||||
auto UU1=coalescedRead(U(1)(mu));
|
|
||||||
|
|
||||||
mult(&phi(0),&UU0,&tmp(0));
|
//Apply the links
|
||||||
mult(&phi(1),&UU1,&tmp(1));
|
int f_upper = permute_lane ? 1 : 0;
|
||||||
|
int f_lower = !f_upper;
|
||||||
|
|
||||||
} else {
|
mult(&phi(0),&UU0,&chi(f_upper));
|
||||||
|
mult(&phi(1),&UU1,&chi(f_lower));
|
||||||
auto UU0=coalescedRead(U(0)(mu));
|
|
||||||
auto UU1=coalescedRead(U(1)(mu));
|
|
||||||
|
|
||||||
mult(&phi(0),&UU0,&chi(0));
|
|
||||||
mult(&phi(1),&UU1,&chi(1));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
typedef _Spinor vobj;
|
typedef _Spinor vobj;
|
||||||
|
@ -642,7 +642,7 @@ void CayleyFermion5D<Impl>::ContractConservedCurrent( PropagatorField &q_in_1,
|
|||||||
Current curr_type,
|
Current curr_type,
|
||||||
unsigned int mu)
|
unsigned int mu)
|
||||||
{
|
{
|
||||||
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
|
#if (!defined(GRID_HIP))
|
||||||
Gamma::Algebra Gmu [] = {
|
Gamma::Algebra Gmu [] = {
|
||||||
Gamma::Algebra::GammaX,
|
Gamma::Algebra::GammaX,
|
||||||
Gamma::Algebra::GammaY,
|
Gamma::Algebra::GammaY,
|
||||||
@ -826,7 +826,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
|
#if (!defined(GRID_HIP))
|
||||||
int tshift = (mu == Nd-1) ? 1 : 0;
|
int tshift = (mu == Nd-1) ? 1 : 0;
|
||||||
////////////////////////////////////////////////
|
////////////////////////////////////////////////
|
||||||
// GENERAL CAYLEY CASE
|
// GENERAL CAYLEY CASE
|
||||||
|
@ -92,20 +92,16 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
|||||||
int lvol = _Umu.Grid()->lSites();
|
int lvol = _Umu.Grid()->lSites();
|
||||||
int DimRep = Impl::Dimension;
|
int DimRep = Impl::Dimension;
|
||||||
|
|
||||||
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
|
||||||
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
|
||||||
|
|
||||||
Coordinate lcoor;
|
|
||||||
typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
autoView(CTv,CloverTerm,CpuRead);
|
autoView(CTv,CloverTerm,CpuRead);
|
||||||
autoView(CTIv,CloverTermInv,CpuWrite);
|
autoView(CTIv,CloverTermInv,CpuWrite);
|
||||||
for (int site = 0; site < lvol; site++) {
|
thread_for(site, lvol, {
|
||||||
|
Coordinate lcoor;
|
||||||
grid->LocalIndexToLocalCoor(site, lcoor);
|
grid->LocalIndexToLocalCoor(site, lcoor);
|
||||||
EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||||
|
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||||
|
typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero();
|
||||||
peekLocalSite(Qx, CTv, lcoor);
|
peekLocalSite(Qx, CTv, lcoor);
|
||||||
Qxinv = Zero();
|
|
||||||
//if (csw!=0){
|
//if (csw!=0){
|
||||||
for (int j = 0; j < Ns; j++)
|
for (int j = 0; j < Ns; j++)
|
||||||
for (int k = 0; k < Ns; k++)
|
for (int k = 0; k < Ns; k++)
|
||||||
@ -126,7 +122,7 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
|||||||
// if (site==0) std::cout << "site =" << site << "\n" << EigenInvCloverOp << std::endl;
|
// if (site==0) std::cout << "site =" << site << "\n" << EigenInvCloverOp << std::endl;
|
||||||
// }
|
// }
|
||||||
pokeLocalSite(Qxinv, CTIv, lcoor);
|
pokeLocalSite(Qxinv, CTIv, lcoor);
|
||||||
}
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Separate the even and odd parts
|
// Separate the even and odd parts
|
||||||
|
@ -154,6 +154,10 @@ public:
|
|||||||
return Hsum.real();
|
return Hsum.real();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void Project(Field &U) {
|
||||||
|
ProjectSUn(U);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
|
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||||
SU<Nc>::HotConfiguration(pRNG, U);
|
SU<Nc>::HotConfiguration(pRNG, U);
|
||||||
}
|
}
|
||||||
|
@ -54,6 +54,10 @@ public:
|
|||||||
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
|
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||||
U = 1.0;
|
U = 1.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void Project(Field &U) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
static void MomentumSpacePropagator(Field &out, RealD m)
|
static void MomentumSpacePropagator(Field &out, RealD m)
|
||||||
{
|
{
|
||||||
@ -234,6 +238,10 @@ public:
|
|||||||
#endif //USE_FFT_ACCELERATION
|
#endif //USE_FFT_ACCELERATION
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void Project(Field &U) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
|
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||||
Group::GaussianFundamentalLieAlgebraMatrix(pRNG, U);
|
Group::GaussianFundamentalLieAlgebraMatrix(pRNG, U);
|
||||||
}
|
}
|
||||||
|
@ -159,6 +159,13 @@ private:
|
|||||||
Resources.GetCheckPointer()->CheckpointRestore(Parameters.StartTrajectory, U,
|
Resources.GetCheckPointer()->CheckpointRestore(Parameters.StartTrajectory, U,
|
||||||
Resources.GetSerialRNG(),
|
Resources.GetSerialRNG(),
|
||||||
Resources.GetParallelRNG());
|
Resources.GetParallelRNG());
|
||||||
|
} else {
|
||||||
|
// others
|
||||||
|
std::cout << GridLogError << "Unrecognized StartingType\n";
|
||||||
|
std::cout
|
||||||
|
<< GridLogError
|
||||||
|
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
||||||
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
Smearing.set_Field(U);
|
Smearing.set_Field(U);
|
||||||
|
@ -95,7 +95,7 @@ private:
|
|||||||
|
|
||||||
typedef typename IntegratorType::Field Field;
|
typedef typename IntegratorType::Field Field;
|
||||||
typedef std::vector< HmcObservable<Field> * > ObsListType;
|
typedef std::vector< HmcObservable<Field> * > ObsListType;
|
||||||
|
|
||||||
//pass these from the resource manager
|
//pass these from the resource manager
|
||||||
GridSerialRNG &sRNG;
|
GridSerialRNG &sRNG;
|
||||||
GridParallelRNG &pRNG;
|
GridParallelRNG &pRNG;
|
||||||
|
@ -313,6 +313,8 @@ public:
|
|||||||
std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl;
|
std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FieldImplementation::Project(U);
|
||||||
|
|
||||||
// and that we indeed got to the end of the trajectory
|
// and that we indeed got to the end of the trajectory
|
||||||
assert(fabs(t_U - Params.trajL) < 1.0e-6);
|
assert(fabs(t_U - Params.trajL) < 1.0e-6);
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
template <class mobj, class robj>
|
template <class mobj, class robj>
|
||||||
static void baryon_site(const mobj &D1,
|
static void BaryonSite(const mobj &D1,
|
||||||
const mobj &D2,
|
const mobj &D2,
|
||||||
const mobj &D3,
|
const mobj &D3,
|
||||||
const Gamma GammaA_left,
|
const Gamma GammaA_left,
|
||||||
@ -61,8 +61,18 @@ public:
|
|||||||
const int parity,
|
const int parity,
|
||||||
const bool * wick_contractions,
|
const bool * wick_contractions,
|
||||||
robj &result);
|
robj &result);
|
||||||
|
template <class mobj, class robj>
|
||||||
|
static void BaryonSiteMatrix(const mobj &D1,
|
||||||
|
const mobj &D2,
|
||||||
|
const mobj &D3,
|
||||||
|
const Gamma GammaA_left,
|
||||||
|
const Gamma GammaB_left,
|
||||||
|
const Gamma GammaA_right,
|
||||||
|
const Gamma GammaB_right,
|
||||||
|
const bool * wick_contractions,
|
||||||
|
robj &result);
|
||||||
public:
|
public:
|
||||||
static void Wick_Contractions(std::string qi,
|
static void WickContractions(std::string qi,
|
||||||
std::string qf,
|
std::string qf,
|
||||||
bool* wick_contractions);
|
bool* wick_contractions);
|
||||||
static void ContractBaryons(const PropagatorField &q1_left,
|
static void ContractBaryons(const PropagatorField &q1_left,
|
||||||
@ -75,8 +85,17 @@ public:
|
|||||||
const bool* wick_contractions,
|
const bool* wick_contractions,
|
||||||
const int parity,
|
const int parity,
|
||||||
ComplexField &baryon_corr);
|
ComplexField &baryon_corr);
|
||||||
|
static void ContractBaryonsMatrix(const PropagatorField &q1_left,
|
||||||
|
const PropagatorField &q2_left,
|
||||||
|
const PropagatorField &q3_left,
|
||||||
|
const Gamma GammaA_left,
|
||||||
|
const Gamma GammaB_left,
|
||||||
|
const Gamma GammaA_right,
|
||||||
|
const Gamma GammaB_right,
|
||||||
|
const bool* wick_contractions,
|
||||||
|
SpinMatrixField &baryon_corr);
|
||||||
template <class mobj, class robj>
|
template <class mobj, class robj>
|
||||||
static void ContractBaryons_Sliced(const mobj &D1,
|
static void ContractBaryonsSliced(const mobj &D1,
|
||||||
const mobj &D2,
|
const mobj &D2,
|
||||||
const mobj &D3,
|
const mobj &D3,
|
||||||
const Gamma GammaA_left,
|
const Gamma GammaA_left,
|
||||||
@ -87,9 +106,20 @@ public:
|
|||||||
const int parity,
|
const int parity,
|
||||||
const int nt,
|
const int nt,
|
||||||
robj &result);
|
robj &result);
|
||||||
|
template <class mobj, class robj>
|
||||||
|
static void ContractBaryonsSlicedMatrix(const mobj &D1,
|
||||||
|
const mobj &D2,
|
||||||
|
const mobj &D3,
|
||||||
|
const Gamma GammaA_left,
|
||||||
|
const Gamma GammaB_left,
|
||||||
|
const Gamma GammaA_right,
|
||||||
|
const Gamma GammaB_right,
|
||||||
|
const bool* wick_contractions,
|
||||||
|
const int nt,
|
||||||
|
robj &result);
|
||||||
private:
|
private:
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
static void Baryon_Gamma_3pt_Group1_Site(
|
static void BaryonGamma3ptGroup1Site(
|
||||||
const mobj &Dq1_ti,
|
const mobj &Dq1_ti,
|
||||||
const mobj2 &Dq2_spec,
|
const mobj2 &Dq2_spec,
|
||||||
const mobj2 &Dq3_spec,
|
const mobj2 &Dq3_spec,
|
||||||
@ -101,7 +131,7 @@ public:
|
|||||||
robj &result);
|
robj &result);
|
||||||
|
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
static void Baryon_Gamma_3pt_Group2_Site(
|
static void BaryonGamma3ptGroup2Site(
|
||||||
const mobj2 &Dq1_spec,
|
const mobj2 &Dq1_spec,
|
||||||
const mobj &Dq2_ti,
|
const mobj &Dq2_ti,
|
||||||
const mobj2 &Dq3_spec,
|
const mobj2 &Dq3_spec,
|
||||||
@ -113,7 +143,7 @@ public:
|
|||||||
robj &result);
|
robj &result);
|
||||||
|
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
static void Baryon_Gamma_3pt_Group3_Site(
|
static void BaryonGamma3ptGroup3Site(
|
||||||
const mobj2 &Dq1_spec,
|
const mobj2 &Dq1_spec,
|
||||||
const mobj2 &Dq2_spec,
|
const mobj2 &Dq2_spec,
|
||||||
const mobj &Dq3_ti,
|
const mobj &Dq3_ti,
|
||||||
@ -125,7 +155,7 @@ public:
|
|||||||
robj &result);
|
robj &result);
|
||||||
public:
|
public:
|
||||||
template <class mobj>
|
template <class mobj>
|
||||||
static void Baryon_Gamma_3pt(
|
static void BaryonGamma3pt(
|
||||||
const PropagatorField &q_ti,
|
const PropagatorField &q_ti,
|
||||||
const mobj &Dq_spec1,
|
const mobj &Dq_spec1,
|
||||||
const mobj &Dq_spec2,
|
const mobj &Dq_spec2,
|
||||||
@ -138,7 +168,7 @@ public:
|
|||||||
SpinMatrixField &stn_corr);
|
SpinMatrixField &stn_corr);
|
||||||
private:
|
private:
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
static void Sigma_to_Nucleon_Q1_Eye_site(const mobj &Dq_loop,
|
static void SigmaToNucleonQ1EyeSite(const mobj &Dq_loop,
|
||||||
const mobj2 &Du_spec,
|
const mobj2 &Du_spec,
|
||||||
const mobj &Dd_tf,
|
const mobj &Dd_tf,
|
||||||
const mobj &Ds_ti,
|
const mobj &Ds_ti,
|
||||||
@ -147,7 +177,7 @@ public:
|
|||||||
const Gamma GammaB_nucl,
|
const Gamma GammaB_nucl,
|
||||||
robj &result);
|
robj &result);
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
static void Sigma_to_Nucleon_Q1_NonEye_site(const mobj &Du_ti,
|
static void SigmaToNucleonQ1NonEyeSite(const mobj &Du_ti,
|
||||||
const mobj &Du_tf,
|
const mobj &Du_tf,
|
||||||
const mobj2 &Du_spec,
|
const mobj2 &Du_spec,
|
||||||
const mobj &Dd_tf,
|
const mobj &Dd_tf,
|
||||||
@ -159,7 +189,7 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
static void Sigma_to_Nucleon_Q2_Eye_site(const mobj &Dq_loop,
|
static void SigmaToNucleonQ2EyeSite(const mobj &Dq_loop,
|
||||||
const mobj2 &Du_spec,
|
const mobj2 &Du_spec,
|
||||||
const mobj &Dd_tf,
|
const mobj &Dd_tf,
|
||||||
const mobj &Ds_ti,
|
const mobj &Ds_ti,
|
||||||
@ -168,7 +198,7 @@ public:
|
|||||||
const Gamma GammaB_nucl,
|
const Gamma GammaB_nucl,
|
||||||
robj &result);
|
robj &result);
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
static void Sigma_to_Nucleon_Q2_NonEye_site(const mobj &Du_ti,
|
static void SigmaToNucleonQ2NonEyeSite(const mobj &Du_ti,
|
||||||
const mobj &Du_tf,
|
const mobj &Du_tf,
|
||||||
const mobj2 &Du_spec,
|
const mobj2 &Du_spec,
|
||||||
const mobj &Dd_tf,
|
const mobj &Dd_tf,
|
||||||
@ -179,7 +209,7 @@ public:
|
|||||||
robj &result);
|
robj &result);
|
||||||
public:
|
public:
|
||||||
template <class mobj>
|
template <class mobj>
|
||||||
static void Sigma_to_Nucleon_Eye(const PropagatorField &qq_loop,
|
static void SigmaToNucleonEye(const PropagatorField &qq_loop,
|
||||||
const mobj &Du_spec,
|
const mobj &Du_spec,
|
||||||
const PropagatorField &qd_tf,
|
const PropagatorField &qd_tf,
|
||||||
const PropagatorField &qs_ti,
|
const PropagatorField &qs_ti,
|
||||||
@ -189,7 +219,7 @@ public:
|
|||||||
const std::string op,
|
const std::string op,
|
||||||
SpinMatrixField &stn_corr);
|
SpinMatrixField &stn_corr);
|
||||||
template <class mobj>
|
template <class mobj>
|
||||||
static void Sigma_to_Nucleon_NonEye(const PropagatorField &qq_ti,
|
static void SigmaToNucleonNonEye(const PropagatorField &qq_ti,
|
||||||
const PropagatorField &qq_tf,
|
const PropagatorField &qq_tf,
|
||||||
const mobj &Du_spec,
|
const mobj &Du_spec,
|
||||||
const PropagatorField &qd_tf,
|
const PropagatorField &qd_tf,
|
||||||
@ -217,7 +247,7 @@ const Real BaryonUtils<FImpl>::epsilon_sgn[6] = {1.,1.,1.,-1.,-1.,-1.};
|
|||||||
//This is the old version
|
//This is the old version
|
||||||
template <class FImpl>
|
template <class FImpl>
|
||||||
template <class mobj, class robj>
|
template <class mobj, class robj>
|
||||||
void BaryonUtils<FImpl>::baryon_site(const mobj &D1,
|
void BaryonUtils<FImpl>::BaryonSite(const mobj &D1,
|
||||||
const mobj &D2,
|
const mobj &D2,
|
||||||
const mobj &D3,
|
const mobj &D3,
|
||||||
const Gamma GammaA_i,
|
const Gamma GammaA_i,
|
||||||
@ -329,12 +359,132 @@ void BaryonUtils<FImpl>::baryon_site(const mobj &D1,
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//New version without parity projection or trace
|
||||||
|
template <class FImpl>
|
||||||
|
template <class mobj, class robj>
|
||||||
|
void BaryonUtils<FImpl>::BaryonSiteMatrix(const mobj &D1,
|
||||||
|
const mobj &D2,
|
||||||
|
const mobj &D3,
|
||||||
|
const Gamma GammaA_i,
|
||||||
|
const Gamma GammaB_i,
|
||||||
|
const Gamma GammaA_f,
|
||||||
|
const Gamma GammaB_f,
|
||||||
|
const bool * wick_contraction,
|
||||||
|
robj &result)
|
||||||
|
{
|
||||||
|
|
||||||
|
auto D1_GAi = D1 * GammaA_i;
|
||||||
|
auto GAf_D1_GAi = GammaA_f * D1_GAi;
|
||||||
|
auto GBf_D1_GAi = GammaB_f * D1_GAi;
|
||||||
|
|
||||||
|
auto D2_GBi = D2 * GammaB_i;
|
||||||
|
auto GBf_D2_GBi = GammaB_f * D2_GBi;
|
||||||
|
auto GAf_D2_GBi = GammaA_f * D2_GBi;
|
||||||
|
|
||||||
|
auto GBf_D3 = GammaB_f * D3;
|
||||||
|
auto GAf_D3 = GammaA_f * D3;
|
||||||
|
|
||||||
|
for (int ie_f=0; ie_f < 6 ; ie_f++){
|
||||||
|
int a_f = epsilon[ie_f][0]; //a
|
||||||
|
int b_f = epsilon[ie_f][1]; //b
|
||||||
|
int c_f = epsilon[ie_f][2]; //c
|
||||||
|
for (int ie_i=0; ie_i < 6 ; ie_i++){
|
||||||
|
int a_i = epsilon[ie_i][0]; //a'
|
||||||
|
int b_i = epsilon[ie_i][1]; //b'
|
||||||
|
int c_i = epsilon[ie_i][2]; //c'
|
||||||
|
|
||||||
|
Real ee = epsilon_sgn[ie_f] * epsilon_sgn[ie_i];
|
||||||
|
//This is the \delta_{456}^{123} part
|
||||||
|
if (wick_contraction[0]){
|
||||||
|
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||||
|
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||||
|
auto GAf_D1_GAi_rr_cc = GAf_D1_GAi()(rho_f,rho_i)(c_f,c_i);
|
||||||
|
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||||
|
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||||
|
result()(rho_f,rho_i)() += ee * GAf_D1_GAi_rr_cc
|
||||||
|
* D2_GBi ()(alpha_f,beta_i)(a_f,a_i)
|
||||||
|
* GBf_D3 ()(alpha_f,beta_i)(b_f,b_i);
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
//This is the \delta_{456}^{231} part
|
||||||
|
if (wick_contraction[1]){
|
||||||
|
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||||
|
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||||
|
auto D1_GAi_ar_ac = D1_GAi()(alpha_f,rho_i)(a_f,c_i);
|
||||||
|
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||||
|
auto GBf_D2_GBi_ab_ba = GBf_D2_GBi ()(alpha_f,beta_i)(b_f,a_i);
|
||||||
|
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||||
|
result()(rho_f,rho_i)() += ee * D1_GAi_ar_ac
|
||||||
|
* GBf_D2_GBi_ab_ba
|
||||||
|
* GAf_D3 ()(rho_f,beta_i)(c_f,b_i);
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
//This is the \delta_{456}^{312} part
|
||||||
|
if (wick_contraction[2]){
|
||||||
|
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||||
|
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||||
|
auto GBf_D1_GAi_ar_bc = GBf_D1_GAi()(alpha_f,rho_i)(b_f,c_i);
|
||||||
|
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||||
|
auto D3_ab_ab = D3 ()(alpha_f,beta_i)(a_f,b_i);
|
||||||
|
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||||
|
result()(rho_f,rho_i)() += ee * GBf_D1_GAi_ar_bc
|
||||||
|
* GAf_D2_GBi ()(rho_f,beta_i)(c_f,a_i)
|
||||||
|
* D3_ab_ab;
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
//This is the \delta_{456}^{132} part
|
||||||
|
if (wick_contraction[3]){
|
||||||
|
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||||
|
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||||
|
auto GAf_D1_GAi_rr_cc = GAf_D1_GAi()(rho_f,rho_i)(c_f,c_i);
|
||||||
|
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||||
|
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||||
|
result()(rho_f,rho_i)() -= ee * GAf_D1_GAi_rr_cc
|
||||||
|
* GBf_D2_GBi ()(alpha_f,beta_i)(b_f,a_i)
|
||||||
|
* D3 ()(alpha_f,beta_i)(a_f,b_i);
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
//This is the \delta_{456}^{321} part
|
||||||
|
if (wick_contraction[4]){
|
||||||
|
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||||
|
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||||
|
auto GBf_D1_GAi_ar_bc = GBf_D1_GAi()(alpha_f,rho_i)(b_f,c_i);
|
||||||
|
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||||
|
auto D2_GBi_ab_aa = D2_GBi()(alpha_f,beta_i)(a_f,a_i);
|
||||||
|
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||||
|
result()(rho_f,rho_i)() -= ee * GBf_D1_GAi_ar_bc
|
||||||
|
* D2_GBi_ab_aa
|
||||||
|
* GAf_D3 ()(rho_f,beta_i)(c_f,b_i);
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
//This is the \delta_{456}^{213} part
|
||||||
|
if (wick_contraction[5]){
|
||||||
|
for (int rho_i=0; rho_i<Ns; rho_i++){
|
||||||
|
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
|
||||||
|
auto D1_GAi_ar_ac = D1_GAi()(alpha_f,rho_i)(a_f,c_i);
|
||||||
|
for (int beta_i=0; beta_i<Ns; beta_i++){
|
||||||
|
auto GBf_D3_ab_bb = GBf_D3()(alpha_f,beta_i)(b_f,b_i);
|
||||||
|
for (int rho_f=0; rho_f<Ns; rho_f++){
|
||||||
|
result()(rho_f,rho_i)() -= ee * D1_GAi_ar_ac
|
||||||
|
* GAf_D2_GBi ()(rho_f,beta_i)(c_f,a_i)
|
||||||
|
* GBf_D3_ab_bb;
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
/* Computes which wick contractions should be performed for a *
|
/* Computes which wick contractions should be performed for a *
|
||||||
* baryon 2pt function given the initial and finals state quark *
|
* baryon 2pt function given the initial and finals state quark *
|
||||||
* flavours. *
|
* flavours. *
|
||||||
* The array wick_contractions must be of length 6 */
|
* The array wick_contractions must be of length 6 */
|
||||||
template<class FImpl>
|
template<class FImpl>
|
||||||
void BaryonUtils<FImpl>::Wick_Contractions(std::string qi, std::string qf, bool* wick_contractions) {
|
void BaryonUtils<FImpl>::WickContractions(std::string qi, std::string qf, bool* wick_contractions) {
|
||||||
const int epsilon[6][3] = {{0,1,2},{1,2,0},{2,0,1},{0,2,1},{2,1,0},{1,0,2}};
|
const int epsilon[6][3] = {{0,1,2},{1,2,0},{2,0,1},{0,2,1},{2,1,0},{1,0,2}};
|
||||||
for (int ie=0; ie < 6 ; ie++) {
|
for (int ie=0; ie < 6 ; ie++) {
|
||||||
wick_contractions[ie] = (qi.size() == 3 && qf.size() == 3
|
wick_contractions[ie] = (qi.size() == 3 && qf.size() == 3
|
||||||
@ -364,11 +514,6 @@ void BaryonUtils<FImpl>::ContractBaryons(const PropagatorField &q1_left,
|
|||||||
|
|
||||||
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
|
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
|
||||||
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
|
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
|
||||||
|
|
||||||
std::cout << "GammaA (left) " << (GammaA_left.g) << std::endl;
|
|
||||||
std::cout << "GammaB (left) " << (GammaB_left.g) << std::endl;
|
|
||||||
std::cout << "GammaA (right) " << (GammaA_right.g) << std::endl;
|
|
||||||
std::cout << "GammaB (right) " << (GammaB_right.g) << std::endl;
|
|
||||||
|
|
||||||
assert(parity==1 || parity == -1 && "Parity must be +1 or -1");
|
assert(parity==1 || parity == -1 && "Parity must be +1 or -1");
|
||||||
|
|
||||||
@ -397,13 +542,62 @@ void BaryonUtils<FImpl>::ContractBaryons(const PropagatorField &q1_left,
|
|||||||
auto D2 = v2[ss];
|
auto D2 = v2[ss];
|
||||||
auto D3 = v3[ss];
|
auto D3 = v3[ss];
|
||||||
vobj result=Zero();
|
vobj result=Zero();
|
||||||
baryon_site(D1,D2,D3,GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result);
|
BaryonSite(D1,D2,D3,GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result);
|
||||||
vbaryon_corr[ss] = result;
|
vbaryon_corr[ss] = result;
|
||||||
} );//end loop over lattice sites
|
} );//end loop over lattice sites
|
||||||
|
|
||||||
t += usecond();
|
t += usecond();
|
||||||
|
|
||||||
std::cout << std::setw(10) << bytes/t*1.0e6/1024/1024/1024 << " GB/s " << std::endl;
|
std::cout << GridLogDebug << std::setw(10) << bytes/t*1.0e6/1024/1024/1024 << " GB/s " << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class FImpl>
|
||||||
|
void BaryonUtils<FImpl>::ContractBaryonsMatrix(const PropagatorField &q1_left,
|
||||||
|
const PropagatorField &q2_left,
|
||||||
|
const PropagatorField &q3_left,
|
||||||
|
const Gamma GammaA_left,
|
||||||
|
const Gamma GammaB_left,
|
||||||
|
const Gamma GammaA_right,
|
||||||
|
const Gamma GammaB_right,
|
||||||
|
const bool* wick_contractions,
|
||||||
|
SpinMatrixField &baryon_corr)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
|
||||||
|
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
|
||||||
|
|
||||||
|
GridBase *grid = q1_left.Grid();
|
||||||
|
|
||||||
|
autoView(vbaryon_corr, baryon_corr,CpuWrite);
|
||||||
|
autoView( v1 , q1_left, CpuRead);
|
||||||
|
autoView( v2 , q2_left, CpuRead);
|
||||||
|
autoView( v3 , q3_left, CpuRead);
|
||||||
|
|
||||||
|
// Real bytes =0.;
|
||||||
|
// bytes += grid->oSites() * (432.*sizeof(vComplex) + 126.*sizeof(int) + 36.*sizeof(Real));
|
||||||
|
// for (int ie=0; ie < 6 ; ie++){
|
||||||
|
// if(ie==0 or ie==3){
|
||||||
|
// bytes += grid->oSites() * (4.*sizeof(int) + 4752.*sizeof(vComplex)) * wick_contractions[ie];
|
||||||
|
// }
|
||||||
|
// else{
|
||||||
|
// bytes += grid->oSites() * (64.*sizeof(int) + 5184.*sizeof(vComplex)) * wick_contractions[ie];
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// Real t=0.;
|
||||||
|
// t =-usecond();
|
||||||
|
|
||||||
|
accelerator_for(ss, grid->oSites(), grid->Nsimd(), {
|
||||||
|
auto D1 = v1[ss];
|
||||||
|
auto D2 = v2[ss];
|
||||||
|
auto D3 = v3[ss];
|
||||||
|
sobj result=Zero();
|
||||||
|
BaryonSiteMatrix(D1,D2,D3,GammaA_left,GammaB_left,GammaA_right,GammaB_right,wick_contractions,result);
|
||||||
|
vbaryon_corr[ss] = result;
|
||||||
|
} );//end loop over lattice sites
|
||||||
|
|
||||||
|
// t += usecond();
|
||||||
|
|
||||||
|
// std::cout << GridLogDebug << std::setw(10) << bytes/t*1.0e6/1024/1024/1024 << " GB/s " << std::endl;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -414,7 +608,7 @@ void BaryonUtils<FImpl>::ContractBaryons(const PropagatorField &q1_left,
|
|||||||
* Wick_Contractions function above */
|
* Wick_Contractions function above */
|
||||||
template <class FImpl>
|
template <class FImpl>
|
||||||
template <class mobj, class robj>
|
template <class mobj, class robj>
|
||||||
void BaryonUtils<FImpl>::ContractBaryons_Sliced(const mobj &D1,
|
void BaryonUtils<FImpl>::ContractBaryonsSliced(const mobj &D1,
|
||||||
const mobj &D2,
|
const mobj &D2,
|
||||||
const mobj &D3,
|
const mobj &D3,
|
||||||
const Gamma GammaA_left,
|
const Gamma GammaA_left,
|
||||||
@ -429,16 +623,33 @@ void BaryonUtils<FImpl>::ContractBaryons_Sliced(const mobj &D1,
|
|||||||
|
|
||||||
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
|
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
|
||||||
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
|
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
|
||||||
|
|
||||||
std::cout << "GammaA (left) " << (GammaA_left.g) << std::endl;
|
|
||||||
std::cout << "GammaB (left) " << (GammaB_left.g) << std::endl;
|
|
||||||
std::cout << "GammaA (right) " << (GammaA_right.g) << std::endl;
|
|
||||||
std::cout << "GammaB (right) " << (GammaB_right.g) << std::endl;
|
|
||||||
|
|
||||||
assert(parity==1 || parity == -1 && "Parity must be +1 or -1");
|
assert(parity==1 || parity == -1 && "Parity must be +1 or -1");
|
||||||
|
|
||||||
for (int t=0; t<nt; t++) {
|
for (int t=0; t<nt; t++) {
|
||||||
baryon_site(D1[t],D2[t],D3[t],GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result[t]);
|
BaryonSite(D1[t],D2[t],D3[t],GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result[t]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class FImpl>
|
||||||
|
template <class mobj, class robj>
|
||||||
|
void BaryonUtils<FImpl>::ContractBaryonsSlicedMatrix(const mobj &D1,
|
||||||
|
const mobj &D2,
|
||||||
|
const mobj &D3,
|
||||||
|
const Gamma GammaA_left,
|
||||||
|
const Gamma GammaB_left,
|
||||||
|
const Gamma GammaA_right,
|
||||||
|
const Gamma GammaB_right,
|
||||||
|
const bool* wick_contractions,
|
||||||
|
const int nt,
|
||||||
|
robj &result)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
|
||||||
|
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
|
||||||
|
|
||||||
|
for (int t=0; t<nt; t++) {
|
||||||
|
BaryonSiteMatrix(D1[t],D2[t],D3[t],GammaA_left,GammaB_left,GammaA_right,GammaB_right,wick_contractions,result[t]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -454,7 +665,7 @@ void BaryonUtils<FImpl>::ContractBaryons_Sliced(const mobj &D1,
|
|||||||
* Dq4_tf is a quark line from t_f to t_J */
|
* Dq4_tf is a quark line from t_f to t_J */
|
||||||
template<class FImpl>
|
template<class FImpl>
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group1_Site(
|
void BaryonUtils<FImpl>::BaryonGamma3ptGroup1Site(
|
||||||
const mobj &Dq1_ti,
|
const mobj &Dq1_ti,
|
||||||
const mobj2 &Dq2_spec,
|
const mobj2 &Dq2_spec,
|
||||||
const mobj2 &Dq3_spec,
|
const mobj2 &Dq3_spec,
|
||||||
@ -546,7 +757,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group1_Site(
|
|||||||
* Dq4_tf is a quark line from t_f to t_J */
|
* Dq4_tf is a quark line from t_f to t_J */
|
||||||
template<class FImpl>
|
template<class FImpl>
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group2_Site(
|
void BaryonUtils<FImpl>::BaryonGamma3ptGroup2Site(
|
||||||
const mobj2 &Dq1_spec,
|
const mobj2 &Dq1_spec,
|
||||||
const mobj &Dq2_ti,
|
const mobj &Dq2_ti,
|
||||||
const mobj2 &Dq3_spec,
|
const mobj2 &Dq3_spec,
|
||||||
@ -636,7 +847,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group2_Site(
|
|||||||
* Dq4_tf is a quark line from t_f to t_J */
|
* Dq4_tf is a quark line from t_f to t_J */
|
||||||
template<class FImpl>
|
template<class FImpl>
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group3_Site(
|
void BaryonUtils<FImpl>::BaryonGamma3ptGroup3Site(
|
||||||
const mobj2 &Dq1_spec,
|
const mobj2 &Dq1_spec,
|
||||||
const mobj2 &Dq2_spec,
|
const mobj2 &Dq2_spec,
|
||||||
const mobj &Dq3_ti,
|
const mobj &Dq3_ti,
|
||||||
@ -728,7 +939,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group3_Site(
|
|||||||
* https://aportelli.github.io/Hadrons-doc/#/mcontraction */
|
* https://aportelli.github.io/Hadrons-doc/#/mcontraction */
|
||||||
template<class FImpl>
|
template<class FImpl>
|
||||||
template <class mobj>
|
template <class mobj>
|
||||||
void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
|
void BaryonUtils<FImpl>::BaryonGamma3pt(
|
||||||
const PropagatorField &q_ti,
|
const PropagatorField &q_ti,
|
||||||
const mobj &Dq_spec1,
|
const mobj &Dq_spec1,
|
||||||
const mobj &Dq_spec2,
|
const mobj &Dq_spec2,
|
||||||
@ -751,7 +962,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
|
|||||||
auto Dq_ti = vq_ti[ss];
|
auto Dq_ti = vq_ti[ss];
|
||||||
auto Dq_tf = vq_tf[ss];
|
auto Dq_tf = vq_tf[ss];
|
||||||
sobj result=Zero();
|
sobj result=Zero();
|
||||||
Baryon_Gamma_3pt_Group1_Site(Dq_ti,Dq_spec1,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
BaryonGamma3ptGroup1Site(Dq_ti,Dq_spec1,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
||||||
vcorr[ss] += result;
|
vcorr[ss] += result;
|
||||||
});//end loop over lattice sites
|
});//end loop over lattice sites
|
||||||
} else if (group == 2) {
|
} else if (group == 2) {
|
||||||
@ -759,7 +970,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
|
|||||||
auto Dq_ti = vq_ti[ss];
|
auto Dq_ti = vq_ti[ss];
|
||||||
auto Dq_tf = vq_tf[ss];
|
auto Dq_tf = vq_tf[ss];
|
||||||
sobj result=Zero();
|
sobj result=Zero();
|
||||||
Baryon_Gamma_3pt_Group2_Site(Dq_spec1,Dq_ti,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
BaryonGamma3ptGroup2Site(Dq_spec1,Dq_ti,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
||||||
vcorr[ss] += result;
|
vcorr[ss] += result;
|
||||||
});//end loop over lattice sites
|
});//end loop over lattice sites
|
||||||
} else if (group == 3) {
|
} else if (group == 3) {
|
||||||
@ -767,7 +978,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
|
|||||||
auto Dq_ti = vq_ti[ss];
|
auto Dq_ti = vq_ti[ss];
|
||||||
auto Dq_tf = vq_tf[ss];
|
auto Dq_tf = vq_tf[ss];
|
||||||
sobj result=Zero();
|
sobj result=Zero();
|
||||||
Baryon_Gamma_3pt_Group3_Site(Dq_spec1,Dq_spec2,Dq_ti,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
BaryonGamma3ptGroup3Site(Dq_spec1,Dq_spec2,Dq_ti,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
|
||||||
|
|
||||||
vcorr[ss] += result;
|
vcorr[ss] += result;
|
||||||
});//end loop over lattice sites
|
});//end loop over lattice sites
|
||||||
@ -787,7 +998,7 @@ void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
|
|||||||
* Ds_ti is a quark line from t_i to t_H */
|
* Ds_ti is a quark line from t_i to t_H */
|
||||||
template <class FImpl>
|
template <class FImpl>
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q1_Eye_site(const mobj &Dq_loop,
|
void BaryonUtils<FImpl>::SigmaToNucleonQ1EyeSite(const mobj &Dq_loop,
|
||||||
const mobj2 &Du_spec,
|
const mobj2 &Du_spec,
|
||||||
const mobj &Dd_tf,
|
const mobj &Dd_tf,
|
||||||
const mobj &Ds_ti,
|
const mobj &Ds_ti,
|
||||||
@ -838,7 +1049,7 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q1_Eye_site(const mobj &Dq_loop,
|
|||||||
* Ds_ti is a quark line from t_i to t_H */
|
* Ds_ti is a quark line from t_i to t_H */
|
||||||
template <class FImpl>
|
template <class FImpl>
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q1_NonEye_site(const mobj &Du_ti,
|
void BaryonUtils<FImpl>::SigmaToNucleonQ1NonEyeSite(const mobj &Du_ti,
|
||||||
const mobj &Du_tf,
|
const mobj &Du_tf,
|
||||||
const mobj2 &Du_spec,
|
const mobj2 &Du_spec,
|
||||||
const mobj &Dd_tf,
|
const mobj &Dd_tf,
|
||||||
@ -897,7 +1108,7 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q1_NonEye_site(const mobj &Du_ti,
|
|||||||
* Ds_ti is a quark line from t_i to t_H */
|
* Ds_ti is a quark line from t_i to t_H */
|
||||||
template <class FImpl>
|
template <class FImpl>
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q2_Eye_site(const mobj &Dq_loop,
|
void BaryonUtils<FImpl>::SigmaToNucleonQ2EyeSite(const mobj &Dq_loop,
|
||||||
const mobj2 &Du_spec,
|
const mobj2 &Du_spec,
|
||||||
const mobj &Dd_tf,
|
const mobj &Dd_tf,
|
||||||
const mobj &Ds_ti,
|
const mobj &Ds_ti,
|
||||||
@ -948,7 +1159,7 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q2_Eye_site(const mobj &Dq_loop,
|
|||||||
* Ds_ti is a quark line from t_i to t_H */
|
* Ds_ti is a quark line from t_i to t_H */
|
||||||
template <class FImpl>
|
template <class FImpl>
|
||||||
template <class mobj, class mobj2, class robj>
|
template <class mobj, class mobj2, class robj>
|
||||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q2_NonEye_site(const mobj &Du_ti,
|
void BaryonUtils<FImpl>::SigmaToNucleonQ2NonEyeSite(const mobj &Du_ti,
|
||||||
const mobj &Du_tf,
|
const mobj &Du_tf,
|
||||||
const mobj2 &Du_spec,
|
const mobj2 &Du_spec,
|
||||||
const mobj &Dd_tf,
|
const mobj &Dd_tf,
|
||||||
@ -1002,7 +1213,7 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q2_NonEye_site(const mobj &Du_ti,
|
|||||||
|
|
||||||
template<class FImpl>
|
template<class FImpl>
|
||||||
template <class mobj>
|
template <class mobj>
|
||||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Eye(const PropagatorField &qq_loop,
|
void BaryonUtils<FImpl>::SigmaToNucleonEye(const PropagatorField &qq_loop,
|
||||||
const mobj &Du_spec,
|
const mobj &Du_spec,
|
||||||
const PropagatorField &qd_tf,
|
const PropagatorField &qd_tf,
|
||||||
const PropagatorField &qs_ti,
|
const PropagatorField &qs_ti,
|
||||||
@ -1029,9 +1240,9 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Eye(const PropagatorField &qq_loop,
|
|||||||
auto Ds_ti = vs_ti[ss];
|
auto Ds_ti = vs_ti[ss];
|
||||||
sobj result=Zero();
|
sobj result=Zero();
|
||||||
if(op == "Q1"){
|
if(op == "Q1"){
|
||||||
Sigma_to_Nucleon_Q1_Eye_site(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
SigmaToNucleonQ1EyeSite(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||||
} else if(op == "Q2"){
|
} else if(op == "Q2"){
|
||||||
Sigma_to_Nucleon_Q2_Eye_site(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
SigmaToNucleonQ2EyeSite(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||||
} else {
|
} else {
|
||||||
assert(0 && "Weak Operator not correctly specified");
|
assert(0 && "Weak Operator not correctly specified");
|
||||||
}
|
}
|
||||||
@ -1041,7 +1252,7 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_Eye(const PropagatorField &qq_loop,
|
|||||||
|
|
||||||
template<class FImpl>
|
template<class FImpl>
|
||||||
template <class mobj>
|
template <class mobj>
|
||||||
void BaryonUtils<FImpl>::Sigma_to_Nucleon_NonEye(const PropagatorField &qq_ti,
|
void BaryonUtils<FImpl>::SigmaToNucleonNonEye(const PropagatorField &qq_ti,
|
||||||
const PropagatorField &qq_tf,
|
const PropagatorField &qq_tf,
|
||||||
const mobj &Du_spec,
|
const mobj &Du_spec,
|
||||||
const PropagatorField &qd_tf,
|
const PropagatorField &qd_tf,
|
||||||
@ -1071,9 +1282,9 @@ void BaryonUtils<FImpl>::Sigma_to_Nucleon_NonEye(const PropagatorField &qq_ti,
|
|||||||
auto Ds_ti = vs_ti[ss];
|
auto Ds_ti = vs_ti[ss];
|
||||||
sobj result=Zero();
|
sobj result=Zero();
|
||||||
if(op == "Q1"){
|
if(op == "Q1"){
|
||||||
Sigma_to_Nucleon_Q1_NonEye_site(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
SigmaToNucleonQ1NonEyeSite(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||||
} else if(op == "Q2"){
|
} else if(op == "Q2"){
|
||||||
Sigma_to_Nucleon_Q2_NonEye_site(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
SigmaToNucleonQ2NonEyeSite(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
|
||||||
} else {
|
} else {
|
||||||
assert(0 && "Weak Operator not correctly specified");
|
assert(0 && "Weak Operator not correctly specified");
|
||||||
}
|
}
|
||||||
|
@ -735,7 +735,6 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template <typename GaugeField>
|
template <typename GaugeField>
|
||||||
static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
|
static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
|
||||||
typedef typename GaugeField::vector_type vector_type;
|
typedef typename GaugeField::vector_type vector_type;
|
||||||
@ -800,6 +799,88 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<int N>
|
||||||
|
LatticeComplexD Determinant(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
|
||||||
|
{
|
||||||
|
GridBase *grid=Umu.Grid();
|
||||||
|
auto lvol = grid->lSites();
|
||||||
|
LatticeComplexD ret(grid);
|
||||||
|
|
||||||
|
autoView(Umu_v,Umu,CpuRead);
|
||||||
|
autoView(ret_v,ret,CpuWrite);
|
||||||
|
thread_for(site,lvol,{
|
||||||
|
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
|
||||||
|
Coordinate lcoor;
|
||||||
|
grid->LocalIndexToLocalCoor(site, lcoor);
|
||||||
|
iScalar<iScalar<iMatrix<ComplexD, N> > > Us;
|
||||||
|
peekLocalSite(Us, Umu_v, lcoor);
|
||||||
|
for(int i=0;i<N;i++){
|
||||||
|
for(int j=0;j<N;j++){
|
||||||
|
EigenU(i,j) = Us()()(i,j);
|
||||||
|
}}
|
||||||
|
ComplexD det = EigenU.determinant();
|
||||||
|
pokeLocalSite(det,ret_v,lcoor);
|
||||||
|
});
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
template<int N>
|
||||||
|
static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
|
||||||
|
{
|
||||||
|
Umu = ProjectOnGroup(Umu);
|
||||||
|
auto det = Determinant(Umu);
|
||||||
|
|
||||||
|
det = conjugate(det);
|
||||||
|
|
||||||
|
for(int i=0;i<N;i++){
|
||||||
|
auto element = PeekIndex<ColourIndex>(Umu,N-1,i);
|
||||||
|
element = element * det;
|
||||||
|
PokeIndex<ColourIndex>(Umu,element,Nc-1,i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template<int N>
|
||||||
|
static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<vComplexD, N> >,Nd> > &U)
|
||||||
|
{
|
||||||
|
GridBase *grid=U.Grid();
|
||||||
|
// Reunitarise
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
auto Umu = PeekIndex<LorentzIndex>(U,mu);
|
||||||
|
Umu = ProjectOnGroup(Umu);
|
||||||
|
ProjectSUn(Umu);
|
||||||
|
PokeIndex<LorentzIndex>(U,Umu,mu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Explicit specialisation for SU(3).
|
||||||
|
// Explicit specialisation for SU(3).
|
||||||
|
static void
|
||||||
|
ProjectSU3 (Lattice<iScalar<iScalar<iMatrix<vComplexD, 3> > > > &Umu)
|
||||||
|
{
|
||||||
|
GridBase *grid=Umu.Grid();
|
||||||
|
const int x=0;
|
||||||
|
const int y=1;
|
||||||
|
const int z=2;
|
||||||
|
// Reunitarise
|
||||||
|
Umu = ProjectOnGroup(Umu);
|
||||||
|
autoView(Umu_v,Umu,CpuWrite);
|
||||||
|
thread_for(ss,grid->oSites(),{
|
||||||
|
auto cm = Umu_v[ss];
|
||||||
|
cm()()(2,x) = adj(cm()()(0,y)*cm()()(1,z)-cm()()(0,z)*cm()()(1,y)); //x= yz-zy
|
||||||
|
cm()()(2,y) = adj(cm()()(0,z)*cm()()(1,x)-cm()()(0,x)*cm()()(1,z)); //y= zx-xz
|
||||||
|
cm()()(2,z) = adj(cm()()(0,x)*cm()()(1,y)-cm()()(0,y)*cm()()(1,x)); //z= xy-yx
|
||||||
|
Umu_v[ss]=cm;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
static void ProjectSU3(Lattice<iVector<iScalar<iMatrix<vComplexD, 3> >,Nd> > &U)
|
||||||
|
{
|
||||||
|
GridBase *grid=U.Grid();
|
||||||
|
// Reunitarise
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
auto Umu = PeekIndex<LorentzIndex>(U,mu);
|
||||||
|
Umu = ProjectOnGroup(Umu);
|
||||||
|
ProjectSU3(Umu);
|
||||||
|
PokeIndex<LorentzIndex>(U,Umu,mu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
typedef SU<2> SU2;
|
typedef SU<2> SU2;
|
||||||
typedef SU<3> SU3;
|
typedef SU<3> SU3;
|
||||||
typedef SU<4> SU4;
|
typedef SU<4> SU4;
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/Grid.h>
|
||||||
#ifndef __NVCC__
|
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
@ -38,12 +38,20 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifdef GRID_HIP
|
#ifdef GRID_HIP
|
||||||
#include <hip/hip_fp16.h>
|
#include <hip/hip_fp16.h>
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef GRID_SYCL
|
||||||
|
namespace Grid {
|
||||||
|
typedef struct { uint16_t x;} half;
|
||||||
|
typedef struct { half x; half y;} half2;
|
||||||
|
typedef struct { float x; float y;} float2;
|
||||||
|
typedef struct { double x; double y;} double2;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
|
|
||||||
typedef struct { uint16_t x;} half;
|
|
||||||
#endif
|
|
||||||
typedef struct Half2_t { half x; half y; } Half2;
|
typedef struct Half2_t { half x; half y; } Half2;
|
||||||
|
|
||||||
#define COALESCE_GRANULARITY ( GEN_SIMD_WIDTH )
|
#define COALESCE_GRANULARITY ( GEN_SIMD_WIDTH )
|
||||||
@ -156,7 +164,7 @@ accelerator_inline float half2float(half h)
|
|||||||
f = __half2float(h);
|
f = __half2float(h);
|
||||||
#else
|
#else
|
||||||
Grid_half hh;
|
Grid_half hh;
|
||||||
hh.x = hr.x;
|
hh.x = h.x;
|
||||||
f= sfw_half_to_float(hh);
|
f= sfw_half_to_float(hh);
|
||||||
#endif
|
#endif
|
||||||
return f;
|
return f;
|
||||||
|
@ -125,14 +125,6 @@ accelerator_inline Grid_simd<S, V> sqrt(const Grid_simd<S, V> &r) {
|
|||||||
return SimdApply(SqrtRealFunctor<S>(), r);
|
return SimdApply(SqrtRealFunctor<S>(), r);
|
||||||
}
|
}
|
||||||
template <class S, class V>
|
template <class S, class V>
|
||||||
accelerator_inline Grid_simd<S, V> rsqrt(const Grid_simd<S, V> &r) {
|
|
||||||
return SimdApply(RSqrtRealFunctor<S>(), r);
|
|
||||||
}
|
|
||||||
template <class Scalar>
|
|
||||||
accelerator_inline Scalar rsqrt(const Scalar &r) {
|
|
||||||
return (RSqrtRealFunctor<Scalar>(), r);
|
|
||||||
}
|
|
||||||
template <class S, class V>
|
|
||||||
accelerator_inline Grid_simd<S, V> cos(const Grid_simd<S, V> &r) {
|
accelerator_inline Grid_simd<S, V> cos(const Grid_simd<S, V> &r) {
|
||||||
return SimdApply(CosRealFunctor<S>(), r);
|
return SimdApply(CosRealFunctor<S>(), r);
|
||||||
}
|
}
|
||||||
|
@ -269,7 +269,7 @@ public:
|
|||||||
std::vector<Vector<std::pair<int,int> > > face_table ;
|
std::vector<Vector<std::pair<int,int> > > face_table ;
|
||||||
Vector<int> surface_list;
|
Vector<int> surface_list;
|
||||||
|
|
||||||
Vector<StencilEntry> _entries; // Resident in managed memory
|
stencilVector<StencilEntry> _entries; // Resident in managed memory
|
||||||
std::vector<Packet> Packets;
|
std::vector<Packet> Packets;
|
||||||
std::vector<Merge> Mergers;
|
std::vector<Merge> Mergers;
|
||||||
std::vector<Merge> MergersSHM;
|
std::vector<Merge> MergersSHM;
|
||||||
|
@ -95,14 +95,18 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
|
|||||||
vtype nrm;
|
vtype nrm;
|
||||||
vtype inner;
|
vtype inner;
|
||||||
for(int c1=0;c1<N;c1++){
|
for(int c1=0;c1<N;c1++){
|
||||||
|
|
||||||
|
// Normalises row c1
|
||||||
zeroit(inner);
|
zeroit(inner);
|
||||||
for(int c2=0;c2<N;c2++)
|
for(int c2=0;c2<N;c2++)
|
||||||
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
|
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
|
||||||
|
|
||||||
nrm = rsqrt(inner);
|
nrm = sqrt(inner);
|
||||||
|
nrm = 1.0/nrm;
|
||||||
for(int c2=0;c2<N;c2++)
|
for(int c2=0;c2<N;c2++)
|
||||||
ret._internal[c1][c2]*= nrm;
|
ret._internal[c1][c2]*= nrm;
|
||||||
|
|
||||||
|
// Remove c1 from rows c1+1...N-1
|
||||||
for (int b=c1+1; b<N; ++b){
|
for (int b=c1+1; b<N; ++b){
|
||||||
decltype(ret._internal[b][b]*ret._internal[b][b]) pr;
|
decltype(ret._internal[b][b]*ret._internal[b][b]) pr;
|
||||||
zeroit(pr);
|
zeroit(pr);
|
||||||
|
@ -84,7 +84,6 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
}
|
}
|
||||||
|
|
||||||
UNARY(sqrt);
|
UNARY(sqrt);
|
||||||
UNARY(rsqrt);
|
|
||||||
UNARY(sin);
|
UNARY(sin);
|
||||||
UNARY(cos);
|
UNARY(cos);
|
||||||
UNARY(asin);
|
UNARY(asin);
|
||||||
|
@ -21,22 +21,26 @@ void acceleratorInit(void)
|
|||||||
#define ENV_RANK_SLURM "SLURM_PROCID"
|
#define ENV_RANK_SLURM "SLURM_PROCID"
|
||||||
#define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK"
|
#define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK"
|
||||||
#define ENV_RANK_MVAPICH "MV2_COMM_WORLD_RANK"
|
#define ENV_RANK_MVAPICH "MV2_COMM_WORLD_RANK"
|
||||||
// We extract the local rank initialization using an environment variable
|
|
||||||
if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL) {
|
|
||||||
printf("OPENMPI detected\n");
|
|
||||||
rank = atoi(localRankStr);
|
|
||||||
} else if ((localRankStr = getenv(ENV_LOCAL_RANK_MVAPICH)) != NULL) {
|
|
||||||
printf("MVAPICH detected\n");
|
|
||||||
rank = atoi(localRankStr);
|
|
||||||
} else if ((localRankStr = getenv(ENV_LOCAL_RANK_SLURM)) != NULL) {
|
|
||||||
printf("SLURM detected\n");
|
|
||||||
rank = atoi(localRankStr);
|
|
||||||
} else {
|
|
||||||
printf("MPI version is unknown - bad things may happen\n");
|
|
||||||
}
|
|
||||||
if ((localRankStr = getenv(ENV_RANK_OMPI )) != NULL) { world_rank = atoi(localRankStr);}
|
if ((localRankStr = getenv(ENV_RANK_OMPI )) != NULL) { world_rank = atoi(localRankStr);}
|
||||||
if ((localRankStr = getenv(ENV_RANK_MVAPICH)) != NULL) { world_rank = atoi(localRankStr);}
|
if ((localRankStr = getenv(ENV_RANK_MVAPICH)) != NULL) { world_rank = atoi(localRankStr);}
|
||||||
if ((localRankStr = getenv(ENV_RANK_SLURM )) != NULL) { world_rank = atoi(localRankStr);}
|
if ((localRankStr = getenv(ENV_RANK_SLURM )) != NULL) { world_rank = atoi(localRankStr);}
|
||||||
|
// We extract the local rank initialization using an environment variable
|
||||||
|
if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL) {
|
||||||
|
if (!world_rank)
|
||||||
|
printf("OPENMPI detected\n");
|
||||||
|
rank = atoi(localRankStr);
|
||||||
|
} else if ((localRankStr = getenv(ENV_LOCAL_RANK_MVAPICH)) != NULL) {
|
||||||
|
if (!world_rank)
|
||||||
|
printf("MVAPICH detected\n");
|
||||||
|
rank = atoi(localRankStr);
|
||||||
|
} else if ((localRankStr = getenv(ENV_LOCAL_RANK_SLURM)) != NULL) {
|
||||||
|
if (!world_rank)
|
||||||
|
printf("SLURM detected\n");
|
||||||
|
rank = atoi(localRankStr);
|
||||||
|
} else {
|
||||||
|
if (!world_rank)
|
||||||
|
printf("MPI version is unknown - bad things may happen\n");
|
||||||
|
}
|
||||||
|
|
||||||
size_t totalDeviceMem=0;
|
size_t totalDeviceMem=0;
|
||||||
for (int i = 0; i < nDevices; i++) {
|
for (int i = 0; i < nDevices; i++) {
|
||||||
@ -48,7 +52,7 @@ void acceleratorInit(void)
|
|||||||
prop = gpu_props[i];
|
prop = gpu_props[i];
|
||||||
totalDeviceMem = prop.totalGlobalMem;
|
totalDeviceMem = prop.totalGlobalMem;
|
||||||
if ( world_rank == 0) {
|
if ( world_rank == 0) {
|
||||||
#ifndef GRID_IBM_SUMMIT
|
#ifndef GRID_DEFAULT_GPU
|
||||||
if ( i==rank ) {
|
if ( i==rank ) {
|
||||||
printf("AcceleratorCudaInit[%d]: ========================\n",rank);
|
printf("AcceleratorCudaInit[%d]: ========================\n",rank);
|
||||||
printf("AcceleratorCudaInit[%d]: Device Number : %d\n", rank,i);
|
printf("AcceleratorCudaInit[%d]: Device Number : %d\n", rank,i);
|
||||||
@ -73,11 +77,17 @@ void acceleratorInit(void)
|
|||||||
#undef GPU_PROP_FMT
|
#undef GPU_PROP_FMT
|
||||||
#undef GPU_PROP
|
#undef GPU_PROP
|
||||||
|
|
||||||
#ifdef GRID_IBM_SUMMIT
|
#ifdef GRID_DEFAULT_GPU
|
||||||
// IBM Jsrun makes cuda Device numbering screwy and not match rank
|
// IBM Jsrun makes cuda Device numbering screwy and not match rank
|
||||||
if ( world_rank == 0 ) printf("AcceleratorCudaInit: IBM Summit or similar - use default device\n");
|
if ( world_rank == 0 ) {
|
||||||
|
printf("AcceleratorCudaInit: using default device \n");
|
||||||
|
printf("AcceleratorCudaInit: assume user either uses a) IBM jsrun, or \n");
|
||||||
|
printf("AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding \n");
|
||||||
|
printf("AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no \n");
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
printf("AcceleratorCudaInit: rank %d setting device to node rank %d\n",world_rank,rank);
|
printf("AcceleratorCudaInit: rank %d setting device to node rank %d\n",world_rank,rank);
|
||||||
|
printf("AcceleratorCudaInit: Configure options --enable-select-gpu=yes \n");
|
||||||
cudaSetDevice(rank);
|
cudaSetDevice(rank);
|
||||||
#endif
|
#endif
|
||||||
if ( world_rank == 0 ) printf("AcceleratorCudaInit: ================================================\n");
|
if ( world_rank == 0 ) printf("AcceleratorCudaInit: ================================================\n");
|
||||||
@ -139,11 +149,18 @@ void acceleratorInit(void)
|
|||||||
MemoryManager::DeviceMaxBytes = (8*totalDeviceMem)/10; // Assume 80% ours
|
MemoryManager::DeviceMaxBytes = (8*totalDeviceMem)/10; // Assume 80% ours
|
||||||
#undef GPU_PROP_FMT
|
#undef GPU_PROP_FMT
|
||||||
#undef GPU_PROP
|
#undef GPU_PROP
|
||||||
#ifdef GRID_IBM_SUMMIT
|
|
||||||
// IBM Jsrun makes cuda Device numbering screwy and not match rank
|
#ifdef GRID_DEFAULT_GPU
|
||||||
if ( world_rank == 0 ) printf("AcceleratorHipInit: IBM Summit or similar - NOT setting device to node rank\n");
|
if ( world_rank == 0 ) {
|
||||||
|
printf("AcceleratorHipInit: using default device \n");
|
||||||
|
printf("AcceleratorHipInit: assume user either uses a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding \n");
|
||||||
|
printf("AcceleratorHipInit: Configure options --enable-summit, --enable-select-gpu=no \n");
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
if ( world_rank == 0 ) printf("AcceleratorHipInit: setting device to node rank\n");
|
if ( world_rank == 0 ) {
|
||||||
|
printf("AcceleratorHipInit: rank %d setting device to node rank %d\n",world_rank,rank);
|
||||||
|
printf("AcceleratorHipInit: Configure options --enable-select-gpu=yes \n");
|
||||||
|
}
|
||||||
hipSetDevice(rank);
|
hipSetDevice(rank);
|
||||||
#endif
|
#endif
|
||||||
if ( world_rank == 0 ) printf("AcceleratorHipInit: ================================================\n");
|
if ( world_rank == 0 ) printf("AcceleratorHipInit: ================================================\n");
|
||||||
|
@ -166,15 +166,18 @@ inline void *acceleratorAllocDevice(size_t bytes)
|
|||||||
inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);};
|
inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);};
|
||||||
inline void acceleratorFreeDevice(void *ptr){ cudaFree(ptr);};
|
inline void acceleratorFreeDevice(void *ptr){ cudaFree(ptr);};
|
||||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);}
|
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);}
|
||||||
|
inline void acceleratorCopyDeviceToDevice(void *from,void *to,size_t bytes) { cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToDevice);}
|
||||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);}
|
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);}
|
||||||
|
inline void acceleratorMemSet(void *base,int value,size_t bytes) { cudaMemset(base,value,bytes);}
|
||||||
inline int acceleratorIsCommunicable(void *ptr)
|
inline int acceleratorIsCommunicable(void *ptr)
|
||||||
{
|
{
|
||||||
int uvm;
|
// int uvm=0;
|
||||||
auto
|
// auto
|
||||||
cuerr = cuPointerGetAttribute( &uvm, CU_POINTER_ATTRIBUTE_IS_MANAGED, (CUdeviceptr) ptr);
|
// cuerr = cuPointerGetAttribute( &uvm, CU_POINTER_ATTRIBUTE_IS_MANAGED, (CUdeviceptr) ptr);
|
||||||
assert(cuerr == cudaSuccess );
|
// assert(cuerr == cudaSuccess );
|
||||||
if(uvm) return 0;
|
// if(uvm) return 0;
|
||||||
else return 1;
|
// else return 1;
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -229,8 +232,10 @@ inline void *acceleratorAllocShared(size_t bytes){ return malloc_shared(bytes,*t
|
|||||||
inline void *acceleratorAllocDevice(size_t bytes){ return malloc_device(bytes,*theGridAccelerator);};
|
inline void *acceleratorAllocDevice(size_t bytes){ return malloc_device(bytes,*theGridAccelerator);};
|
||||||
inline void acceleratorFreeShared(void *ptr){free(ptr,*theGridAccelerator);};
|
inline void acceleratorFreeShared(void *ptr){free(ptr,*theGridAccelerator);};
|
||||||
inline void acceleratorFreeDevice(void *ptr){free(ptr,*theGridAccelerator);};
|
inline void acceleratorFreeDevice(void *ptr){free(ptr,*theGridAccelerator);};
|
||||||
|
inline void acceleratorCopyDeviceToDevice(void *from,void *to,size_t bytes) { theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
|
||||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
|
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
|
||||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
|
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
|
||||||
|
inline void acceleratorMemSet(void *base,int value,size_t bytes) { theGridAccelerator->memset(base,value,bytes); theGridAccelerator->wait();}
|
||||||
inline int acceleratorIsCommunicable(void *ptr)
|
inline int acceleratorIsCommunicable(void *ptr)
|
||||||
{
|
{
|
||||||
#if 0
|
#if 0
|
||||||
@ -328,10 +333,12 @@ inline void *acceleratorAllocDevice(size_t bytes)
|
|||||||
return ptr;
|
return ptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline void acceleratorFreeShared(void *ptr){ free(ptr);};
|
inline void acceleratorFreeShared(void *ptr){ hipFree(ptr);};
|
||||||
inline void acceleratorFreeDevice(void *ptr){ hipFree(ptr);};
|
inline void acceleratorFreeDevice(void *ptr){ hipFree(ptr);};
|
||||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
|
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
|
||||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
|
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
|
||||||
|
inline void acceleratorCopyDeviceToDevice(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);}
|
||||||
|
inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(base,value,bytes);}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -354,7 +361,7 @@ inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ hipMemc
|
|||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
// CPU Target - No accelerator just thread instead
|
// CPU Target - No accelerator just thread instead
|
||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
#define GRID_ALLOC_ALIGN (2*1024*1024) // 2MB aligned
|
|
||||||
#if ( (!defined(GRID_SYCL)) && (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) )
|
#if ( (!defined(GRID_SYCL)) && (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) )
|
||||||
|
|
||||||
#undef GRID_SIMT
|
#undef GRID_SIMT
|
||||||
@ -369,8 +376,10 @@ inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ hipMemc
|
|||||||
accelerator_inline int acceleratorSIMTlane(int Nsimd) { return 0; } // CUDA specific
|
accelerator_inline int acceleratorSIMTlane(int Nsimd) { return 0; } // CUDA specific
|
||||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { memcpy(to,from,bytes);}
|
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { memcpy(to,from,bytes);}
|
||||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ memcpy(to,from,bytes);}
|
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ memcpy(to,from,bytes);}
|
||||||
|
inline void acceleratorCopyDeviceToDevice(void *from,void *to,size_t bytes) { memcpy(to,from,bytes);}
|
||||||
|
|
||||||
inline int acceleratorIsCommunicable(void *ptr){ return 1; }
|
inline int acceleratorIsCommunicable(void *ptr){ return 1; }
|
||||||
|
inline void acceleratorMemSet(void *base,int value,size_t bytes) { memset(base,value,bytes);}
|
||||||
#ifdef HAVE_MM_MALLOC_H
|
#ifdef HAVE_MM_MALLOC_H
|
||||||
inline void *acceleratorAllocShared(size_t bytes){return _mm_malloc(bytes,GRID_ALLOC_ALIGN);};
|
inline void *acceleratorAllocShared(size_t bytes){return _mm_malloc(bytes,GRID_ALLOC_ALIGN);};
|
||||||
inline void *acceleratorAllocDevice(size_t bytes){return _mm_malloc(bytes,GRID_ALLOC_ALIGN);};
|
inline void *acceleratorAllocDevice(size_t bytes){return _mm_malloc(bytes,GRID_ALLOC_ALIGN);};
|
||||||
@ -393,6 +402,8 @@ inline void *acceleratorAllocCpu(size_t bytes){return memalign(GRID_ALLOC_ALIGN,
|
|||||||
inline void acceleratorFreeCpu (void *ptr){free(ptr);};
|
inline void acceleratorFreeCpu (void *ptr){free(ptr);};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
// Synchronise across local threads for divergence resynch
|
// Synchronise across local threads for divergence resynch
|
||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
|
@ -473,11 +473,13 @@ void Grid_init(int *argc,char ***argv)
|
|||||||
LebesgueOrder::UseLebesgueOrder=1;
|
LebesgueOrder::UseLebesgueOrder=1;
|
||||||
}
|
}
|
||||||
CartesianCommunicator::nCommThreads = 1;
|
CartesianCommunicator::nCommThreads = 1;
|
||||||
|
#ifdef GRID_COMMS_THREADS
|
||||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-threads") ){
|
if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-threads") ){
|
||||||
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--comms-threads");
|
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--comms-threads");
|
||||||
GridCmdOptionInt(arg,CartesianCommunicator::nCommThreads);
|
GridCmdOptionInt(arg,CartesianCommunicator::nCommThreads);
|
||||||
assert(CartesianCommunicator::nCommThreads > 0);
|
assert(CartesianCommunicator::nCommThreads > 0);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--cacheblocking") ){
|
if( GridCmdOptionExists(*argv,*argv+*argc,"--cacheblocking") ){
|
||||||
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--cacheblocking");
|
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--cacheblocking");
|
||||||
GridCmdOptionIntVector(arg,LebesgueOrder::Block);
|
GridCmdOptionIntVector(arg,LebesgueOrder::Block);
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
#include "Benchmark_IO.hpp"
|
#include "Benchmark_IO.hpp"
|
||||||
|
|
||||||
#ifndef BENCH_IO_LMIN
|
#ifndef BENCH_IO_LMIN
|
||||||
@ -13,6 +12,7 @@
|
|||||||
#define BENCH_IO_NPASS 10
|
#define BENCH_IO_NPASS 10
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef HAVE_LIME
|
||||||
using namespace Grid;
|
using namespace Grid;
|
||||||
|
|
||||||
std::string filestem(const int l)
|
std::string filestem(const int l)
|
||||||
@ -196,3 +196,6 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
return EXIT_SUCCESS;
|
return EXIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
int main(int argc,char ** argv){}
|
||||||
|
#endif
|
||||||
|
@ -2,12 +2,12 @@
|
|||||||
#define Benchmark_IO_hpp_
|
#define Benchmark_IO_hpp_
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/Grid.h>
|
||||||
#ifdef HAVE_LIME
|
|
||||||
#define MSG std::cout << GridLogMessage
|
#define MSG std::cout << GridLogMessage
|
||||||
#define SEP \
|
#define SEP \
|
||||||
"-----------------------------------------------------------------------------"
|
"-----------------------------------------------------------------------------"
|
||||||
#define BIGSEP \
|
#define BIGSEP \
|
||||||
"============================================================================="
|
"============================================================================="
|
||||||
|
#ifdef HAVE_LIME
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#include "Benchmark_IO.hpp"
|
#include "Benchmark_IO.hpp"
|
||||||
|
#ifdef HAVE_LIME
|
||||||
using namespace Grid;
|
using namespace Grid;
|
||||||
|
|
||||||
int main (int argc, char ** argv)
|
int main (int argc, char ** argv)
|
||||||
@ -97,3 +97,6 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
return EXIT_SUCCESS;
|
return EXIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
int main(int argc,char ** argv){}
|
||||||
|
#endif
|
||||||
|
@ -334,8 +334,9 @@ public:
|
|||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
Coordinate mpi = GridDefaultMpi(); assert(mpi.size()==4);
|
Coordinate mpi = GridDefaultMpi(); assert(mpi.size()==4);
|
||||||
Coordinate local({L,L,L,L});
|
Coordinate local({L,L,L,L});
|
||||||
|
Coordinate latt4({local[0]*mpi[0],local[1]*mpi[1],local[2]*mpi[2],local[3]*mpi[3]});
|
||||||
|
|
||||||
GridCartesian * TmpGrid = SpaceTimeGrid::makeFourDimGrid(Coordinate({72,72,72,72}),
|
GridCartesian * TmpGrid = SpaceTimeGrid::makeFourDimGrid(latt4,
|
||||||
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
||||||
GridDefaultMpi());
|
GridDefaultMpi());
|
||||||
uint64_t NP = TmpGrid->RankCount();
|
uint64_t NP = TmpGrid->RankCount();
|
||||||
@ -343,7 +344,6 @@ public:
|
|||||||
NN_global=NN;
|
NN_global=NN;
|
||||||
uint64_t SHM=NP/NN;
|
uint64_t SHM=NP/NN;
|
||||||
|
|
||||||
Coordinate latt4({local[0]*mpi[0],local[1]*mpi[1],local[2]*mpi[2],local[3]*mpi[3]});
|
|
||||||
|
|
||||||
///////// Welcome message ////////////
|
///////// Welcome message ////////////
|
||||||
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
||||||
@ -445,7 +445,11 @@ public:
|
|||||||
// 1344= 3*(2*8+6)*2*8 + 8*3*2*2 + 3*4*2*8
|
// 1344= 3*(2*8+6)*2*8 + 8*3*2*2 + 3*4*2*8
|
||||||
// 1344 = Nc* (6+(Nc-1)*8)*2*Nd + Nd*Nc*2*2 + Nd*Nc*Ns*2
|
// 1344 = Nc* (6+(Nc-1)*8)*2*Nd + Nd*Nc*2*2 + Nd*Nc*Ns*2
|
||||||
// double flops=(1344.0*volume)/2;
|
// double flops=(1344.0*volume)/2;
|
||||||
|
#if 1
|
||||||
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + Nd*Nc*Ns + Nd*Nc*Ns*2;
|
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + Nd*Nc*Ns + Nd*Nc*Ns*2;
|
||||||
|
#else
|
||||||
|
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + 2*Nd*Nc*Ns + 2*Nd*Nc*Ns*2;
|
||||||
|
#endif
|
||||||
double flops=(fps*volume)/2;
|
double flops=(fps*volume)/2;
|
||||||
double mf_hi, mf_lo, mf_err;
|
double mf_hi, mf_lo, mf_err;
|
||||||
|
|
||||||
@ -498,8 +502,9 @@ public:
|
|||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
Coordinate mpi = GridDefaultMpi(); assert(mpi.size()==4);
|
Coordinate mpi = GridDefaultMpi(); assert(mpi.size()==4);
|
||||||
Coordinate local({L,L,L,L});
|
Coordinate local({L,L,L,L});
|
||||||
|
Coordinate latt4({local[0]*mpi[0],local[1]*mpi[1],local[2]*mpi[2],local[3]*mpi[3]});
|
||||||
|
|
||||||
GridCartesian * TmpGrid = SpaceTimeGrid::makeFourDimGrid(Coordinate({72,72,72,72}),
|
GridCartesian * TmpGrid = SpaceTimeGrid::makeFourDimGrid(latt4,
|
||||||
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
||||||
GridDefaultMpi());
|
GridDefaultMpi());
|
||||||
uint64_t NP = TmpGrid->RankCount();
|
uint64_t NP = TmpGrid->RankCount();
|
||||||
@ -507,7 +512,6 @@ public:
|
|||||||
NN_global=NN;
|
NN_global=NN;
|
||||||
uint64_t SHM=NP/NN;
|
uint64_t SHM=NP/NN;
|
||||||
|
|
||||||
Coordinate latt4({local[0]*mpi[0],local[1]*mpi[1],local[2]*mpi[2],local[3]*mpi[3]});
|
|
||||||
|
|
||||||
///////// Welcome message ////////////
|
///////// Welcome message ////////////
|
||||||
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
|
||||||
|
@ -94,8 +94,8 @@ int main (int argc, char ** argv)
|
|||||||
RealD Nnode = Grid.NodeCount();
|
RealD Nnode = Grid.NodeCount();
|
||||||
RealD ppn = Nrank/Nnode;
|
RealD ppn = Nrank/Nnode;
|
||||||
|
|
||||||
std::vector<Vector<HalfSpinColourVectorD> > xbuf(8);
|
std::vector<std::vector<HalfSpinColourVectorD> > xbuf(8);
|
||||||
std::vector<Vector<HalfSpinColourVectorD> > rbuf(8);
|
std::vector<std::vector<HalfSpinColourVectorD> > rbuf(8);
|
||||||
|
|
||||||
for(int mu=0;mu<8;mu++){
|
for(int mu=0;mu<8;mu++){
|
||||||
xbuf[mu].resize(lat*lat*lat*Ls);
|
xbuf[mu].resize(lat*lat*lat*Ls);
|
||||||
|
260
benchmarks/Benchmark_comms_host_device.cc
Normal file
260
benchmarks/Benchmark_comms_host_device.cc
Normal file
@ -0,0 +1,260 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./benchmarks/Benchmark_comms.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
struct time_statistics{
|
||||||
|
double mean;
|
||||||
|
double err;
|
||||||
|
double min;
|
||||||
|
double max;
|
||||||
|
|
||||||
|
void statistics(std::vector<double> v){
|
||||||
|
double sum = std::accumulate(v.begin(), v.end(), 0.0);
|
||||||
|
mean = sum / v.size();
|
||||||
|
|
||||||
|
std::vector<double> diff(v.size());
|
||||||
|
std::transform(v.begin(), v.end(), diff.begin(), [=](double x) { return x - mean; });
|
||||||
|
double sq_sum = std::inner_product(diff.begin(), diff.end(), diff.begin(), 0.0);
|
||||||
|
err = std::sqrt(sq_sum / (v.size()*(v.size() - 1)));
|
||||||
|
|
||||||
|
auto result = std::minmax_element(v.begin(), v.end());
|
||||||
|
min = *result.first;
|
||||||
|
max = *result.second;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void header(){
|
||||||
|
std::cout <<GridLogMessage << " L "<<"\t"<<" Ls "<<"\t"
|
||||||
|
<<std::setw(11)<<"bytes\t\t"<<"MB/s uni (err/min/max)"<<"\t\t"<<"MB/s bidi (err/min/max)"<<std::endl;
|
||||||
|
};
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
Coordinate simd_layout = GridDefaultSimd(Nd,vComplexD::Nsimd());
|
||||||
|
Coordinate mpi_layout = GridDefaultMpi();
|
||||||
|
int threads = GridThread::GetThreads();
|
||||||
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
|
||||||
|
int Nloop=250;
|
||||||
|
int nmu=0;
|
||||||
|
int maxlat=32;
|
||||||
|
for(int mu=0;mu<Nd;mu++) if (mpi_layout[mu]>1) nmu++;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Number of iterations to average: "<< Nloop << std::endl;
|
||||||
|
std::vector<double> t_time(Nloop);
|
||||||
|
time_statistics timestat;
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "= Benchmarking sequential halo exchange from host memory "<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
|
header();
|
||||||
|
|
||||||
|
for(int lat=8;lat<=maxlat;lat+=4){
|
||||||
|
for(int Ls=8;Ls<=8;Ls*=2){
|
||||||
|
|
||||||
|
Coordinate latt_size ({lat*mpi_layout[0],
|
||||||
|
lat*mpi_layout[1],
|
||||||
|
lat*mpi_layout[2],
|
||||||
|
lat*mpi_layout[3]});
|
||||||
|
|
||||||
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
RealD Nrank = Grid._Nprocessors;
|
||||||
|
RealD Nnode = Grid.NodeCount();
|
||||||
|
RealD ppn = Nrank/Nnode;
|
||||||
|
|
||||||
|
std::vector<std::vector<HalfSpinColourVectorD> > xbuf(8);
|
||||||
|
std::vector<std::vector<HalfSpinColourVectorD> > rbuf(8);
|
||||||
|
|
||||||
|
for(int mu=0;mu<8;mu++){
|
||||||
|
xbuf[mu].resize(lat*lat*lat*Ls);
|
||||||
|
rbuf[mu].resize(lat*lat*lat*Ls);
|
||||||
|
}
|
||||||
|
uint64_t bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
|
||||||
|
|
||||||
|
int ncomm;
|
||||||
|
|
||||||
|
for(int mu=0;mu<4;mu++){
|
||||||
|
if (mpi_layout[mu]>1 ) {
|
||||||
|
double start=usecond();
|
||||||
|
for(int i=0;i<Nloop;i++){
|
||||||
|
|
||||||
|
ncomm=0;
|
||||||
|
|
||||||
|
|
||||||
|
ncomm++;
|
||||||
|
int comm_proc=1;
|
||||||
|
int xmit_to_rank;
|
||||||
|
int recv_from_rank;
|
||||||
|
|
||||||
|
{
|
||||||
|
std::vector<CommsRequest_t> requests;
|
||||||
|
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
|
Grid.SendToRecvFrom((void *)&xbuf[mu][0],
|
||||||
|
xmit_to_rank,
|
||||||
|
(void *)&rbuf[mu][0],
|
||||||
|
recv_from_rank,
|
||||||
|
bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
comm_proc = mpi_layout[mu]-1;
|
||||||
|
{
|
||||||
|
std::vector<CommsRequest_t> requests;
|
||||||
|
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
|
Grid.SendToRecvFrom((void *)&xbuf[mu+4][0],
|
||||||
|
xmit_to_rank,
|
||||||
|
(void *)&rbuf[mu+4][0],
|
||||||
|
recv_from_rank,
|
||||||
|
bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Grid.Barrier();
|
||||||
|
double stop=usecond();
|
||||||
|
double mean=(stop-start)/Nloop;
|
||||||
|
double dbytes = bytes*ppn;
|
||||||
|
double xbytes = dbytes*2.0*ncomm;
|
||||||
|
double rbytes = xbytes;
|
||||||
|
double bidibytes = xbytes+rbytes;
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage << std::setw(4) << lat<<"\t"<<Ls<<"\t"
|
||||||
|
<<std::setw(11) << bytes<< std::fixed << std::setprecision(1) << std::setw(7)<<" "
|
||||||
|
<<std::right<< xbytes/mean<<" "
|
||||||
|
<< "\t\t"<<std::setw(7)<< bidibytes/mean<< std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "= Benchmarking sequential halo exchange from GPU memory "<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
|
header();
|
||||||
|
|
||||||
|
for(int lat=8;lat<=maxlat;lat+=4){
|
||||||
|
for(int Ls=8;Ls<=8;Ls*=2){
|
||||||
|
|
||||||
|
Coordinate latt_size ({lat*mpi_layout[0],
|
||||||
|
lat*mpi_layout[1],
|
||||||
|
lat*mpi_layout[2],
|
||||||
|
lat*mpi_layout[3]});
|
||||||
|
|
||||||
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
RealD Nrank = Grid._Nprocessors;
|
||||||
|
RealD Nnode = Grid.NodeCount();
|
||||||
|
RealD ppn = Nrank/Nnode;
|
||||||
|
|
||||||
|
|
||||||
|
std::vector<HalfSpinColourVectorD *> xbuf(8);
|
||||||
|
std::vector<HalfSpinColourVectorD *> rbuf(8);
|
||||||
|
|
||||||
|
uint64_t bytes = lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
|
||||||
|
for(int d=0;d<8;d++){
|
||||||
|
xbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes);
|
||||||
|
rbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
int ncomm;
|
||||||
|
|
||||||
|
for(int mu=0;mu<4;mu++){
|
||||||
|
if (mpi_layout[mu]>1 ) {
|
||||||
|
double start=usecond();
|
||||||
|
for(int i=0;i<Nloop;i++){
|
||||||
|
|
||||||
|
ncomm=0;
|
||||||
|
|
||||||
|
|
||||||
|
ncomm++;
|
||||||
|
int comm_proc=1;
|
||||||
|
int xmit_to_rank;
|
||||||
|
int recv_from_rank;
|
||||||
|
|
||||||
|
{
|
||||||
|
std::vector<CommsRequest_t> requests;
|
||||||
|
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
|
Grid.SendToRecvFrom((void *)&xbuf[mu][0],
|
||||||
|
xmit_to_rank,
|
||||||
|
(void *)&rbuf[mu][0],
|
||||||
|
recv_from_rank,
|
||||||
|
bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
comm_proc = mpi_layout[mu]-1;
|
||||||
|
{
|
||||||
|
std::vector<CommsRequest_t> requests;
|
||||||
|
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
|
Grid.SendToRecvFrom((void *)&xbuf[mu+4][0],
|
||||||
|
xmit_to_rank,
|
||||||
|
(void *)&rbuf[mu+4][0],
|
||||||
|
recv_from_rank,
|
||||||
|
bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Grid.Barrier();
|
||||||
|
double stop=usecond();
|
||||||
|
double mean=(stop-start)/Nloop;
|
||||||
|
double dbytes = bytes*ppn;
|
||||||
|
double xbytes = dbytes*2.0*ncomm;
|
||||||
|
double rbytes = xbytes;
|
||||||
|
double bidibytes = xbytes+rbytes;
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage << std::setw(4) << lat<<"\t"<<Ls<<"\t"
|
||||||
|
<<std::setw(11) << bytes<< std::fixed << std::setprecision(1) << std::setw(7)<<" "
|
||||||
|
<<std::right<< xbytes/mean<<" "
|
||||||
|
<< "\t\t"<<std::setw(7)<< bidibytes/mean<< std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int d=0;d<8;d++){
|
||||||
|
acceleratorFreeDevice(xbuf[d]);
|
||||||
|
acceleratorFreeDevice(rbuf[d]);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "= All done; Bye Bye"<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
@ -24,7 +24,7 @@ typedef typename GparityDomainWallFermionD::FermionField GparityLatticeFermionD;
|
|||||||
int main (int argc, char ** argv)
|
int main (int argc, char ** argv)
|
||||||
{
|
{
|
||||||
Grid_init(&argc,&argv);
|
Grid_init(&argc,&argv);
|
||||||
|
#ifdef ENABLE_GPARITY
|
||||||
int Ls=16;
|
int Ls=16;
|
||||||
for(int i=0;i<argc;i++)
|
for(int i=0;i<argc;i++)
|
||||||
if(std::string(argv[i]) == "-Ls"){
|
if(std::string(argv[i]) == "-Ls"){
|
||||||
@ -184,7 +184,7 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
||||||
DwD.Report();
|
DwD.Report();
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
Grid_finalize();
|
Grid_finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
137
configure.ac
137
configure.ac
@ -123,6 +123,24 @@ case ${ac_LAPACK} in
|
|||||||
AC_DEFINE([USE_LAPACK],[1],[use LAPACK]);;
|
AC_DEFINE([USE_LAPACK],[1],[use LAPACK]);;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
############### fermions
|
||||||
|
AC_ARG_ENABLE([fermion-reps],
|
||||||
|
[AC_HELP_STRING([--fermion-reps=yes|no], [enable extra fermion representation support])],
|
||||||
|
[ac_FERMION_REPS=${enable_fermion_reps}], [ac_FERMION_REPS=yes])
|
||||||
|
|
||||||
|
AM_CONDITIONAL(BUILD_FERMION_REPS, [ test "${ac_FERMION_REPS}X" == "yesX" ])
|
||||||
|
|
||||||
|
AC_ARG_ENABLE([gparity],
|
||||||
|
[AC_HELP_STRING([--enable-gparity=yes|no], [enable G-parity support])],
|
||||||
|
[ac_GPARITY=${enable_gparity}], [ac_GPARITY=yes])
|
||||||
|
|
||||||
|
AM_CONDITIONAL(BUILD_GPARITY, [ test "${ac_GPARITY}X" == "yesX" ])
|
||||||
|
case ${ac_FERMION_REPS} in
|
||||||
|
yes) AC_DEFINE([ENABLE_FERMION_REPS],[1],[non QCD fermion reps]);;
|
||||||
|
esac
|
||||||
|
case ${ac_GPARITY} in
|
||||||
|
yes) AC_DEFINE([ENABLE_GPARITY],[1],[fermion actions with GPARITY BCs]);;
|
||||||
|
esac
|
||||||
############### Nc
|
############### Nc
|
||||||
AC_ARG_ENABLE([Nc],
|
AC_ARG_ENABLE([Nc],
|
||||||
[AC_HELP_STRING([--enable-Nc=2|3|4], [enable number of colours])],
|
[AC_HELP_STRING([--enable-Nc=2|3|4], [enable number of colours])],
|
||||||
@ -153,18 +171,28 @@ case ${ac_SFW_FP16} in
|
|||||||
AC_MSG_ERROR(["SFW FP16 option not supported ${ac_SFW_FP16}"]);;
|
AC_MSG_ERROR(["SFW FP16 option not supported ${ac_SFW_FP16}"]);;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
############### SUMMIT JSRUN
|
############### Default to accelerator cshift, but revert to host if UCX is buggy or other reasons
|
||||||
AC_ARG_ENABLE([summit],
|
AC_ARG_ENABLE([accelerator-cshift],
|
||||||
[AC_HELP_STRING([--enable-summit=yes|no], [enable IBMs jsrun resource manager for SUMMIT])],
|
[AC_HELP_STRING([--enable-accelerator-cshift=yes|no], [run cshift on the device])],
|
||||||
[ac_SUMMIT=${enable_summit}], [ac_SUMMIT=no])
|
[ac_ACC_CSHIFT=${enable_accelerator_cshift}], [ac_ACC_CSHIFT=yes])
|
||||||
case ${ac_SUMMIT} in
|
|
||||||
no);;
|
AC_ARG_ENABLE([ucx-buggy],
|
||||||
|
[AC_HELP_STRING([--enable-ucx-buggy=yes|no], [enable workaround for UCX device buffer bugs])],
|
||||||
|
[ac_UCXBUGGY=${enable_ucx_buggy}], [ac_UCXBUGGY=no])
|
||||||
|
|
||||||
|
case ${ac_UCXBUGGY} in
|
||||||
yes)
|
yes)
|
||||||
AC_DEFINE([GRID_IBM_SUMMIT],[1],[Let JSRUN manage the GPU device allocation]);;
|
ac_ACC_CSHIFT=no;;
|
||||||
*)
|
*);;
|
||||||
AC_DEFINE([GRID_IBM_SUMMIT],[1],[Let JSRUN manage the GPU device allocation]);;
|
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
case ${ac_ACC_CSHIFT} in
|
||||||
|
yes)
|
||||||
|
AC_DEFINE([ACCELERATOR_CSHIFT],[1],[ UCX device buffer bugs are not present]);;
|
||||||
|
*);;
|
||||||
|
esac
|
||||||
|
|
||||||
|
|
||||||
############### SYCL/CUDA/HIP/none
|
############### SYCL/CUDA/HIP/none
|
||||||
AC_ARG_ENABLE([accelerator],
|
AC_ARG_ENABLE([accelerator],
|
||||||
[AC_HELP_STRING([--enable-accelerator=cuda|sycl|hip|none], [enable none,cuda,sycl,hip acceleration])],
|
[AC_HELP_STRING([--enable-accelerator=cuda|sycl|hip|none], [enable none,cuda,sycl,hip acceleration])],
|
||||||
@ -181,8 +209,9 @@ case ${ac_ACCELERATOR} in
|
|||||||
echo HIP acceleration
|
echo HIP acceleration
|
||||||
AC_DEFINE([GRID_HIP],[1],[Use HIP offload]);;
|
AC_DEFINE([GRID_HIP],[1],[Use HIP offload]);;
|
||||||
none)
|
none)
|
||||||
echo NO acceleration
|
echo NO acceleration ;;
|
||||||
;;
|
no)
|
||||||
|
echo NO acceleration ;;
|
||||||
*)
|
*)
|
||||||
AC_MSG_ERROR(["Acceleration not suppoorted ${ac_ACCELERATOR}"]);;
|
AC_MSG_ERROR(["Acceleration not suppoorted ${ac_ACCELERATOR}"]);;
|
||||||
esac
|
esac
|
||||||
@ -477,28 +506,48 @@ esac
|
|||||||
AM_CXXFLAGS="$SIMD_FLAGS $AM_CXXFLAGS"
|
AM_CXXFLAGS="$SIMD_FLAGS $AM_CXXFLAGS"
|
||||||
AM_CFLAGS="$SIMD_FLAGS $AM_CFLAGS"
|
AM_CFLAGS="$SIMD_FLAGS $AM_CFLAGS"
|
||||||
|
|
||||||
############### Precision selection - deprecate
|
###### PRECISION ALWAYS DOUBLE
|
||||||
#AC_ARG_ENABLE([precision],
|
|
||||||
# [AC_HELP_STRING([--enable-precision=single|double],
|
|
||||||
# [Select default word size of Real])],
|
|
||||||
# [ac_PRECISION=${enable_precision}],[ac_PRECISION=double])
|
|
||||||
|
|
||||||
AC_DEFINE([GRID_DEFAULT_PRECISION_DOUBLE],[1],[GRID_DEFAULT_PRECISION is DOUBLE] )
|
AC_DEFINE([GRID_DEFAULT_PRECISION_DOUBLE],[1],[GRID_DEFAULT_PRECISION is DOUBLE] )
|
||||||
|
|
||||||
#case ${ac_PRECISION} in
|
#########################################################
|
||||||
# single)
|
###################### GRID ALLOCATOR ALIGNMENT ##
|
||||||
# AC_DEFINE([GRID_DEFAULT_PRECISION_SINGLE],[1],[GRID_DEFAULT_PRECISION is SINGLE] )
|
#########################################################
|
||||||
# ;;
|
AC_ARG_ENABLE([alloc-align],[AC_HELP_STRING([--enable-alloc-align=2MB|4k],
|
||||||
# double)
|
[Alignment in bytes of GRID Allocator ])],[ac_ALLOC_ALIGN=${enable_alloc_align}],[ac_ALLOC_ALIGN=2MB])
|
||||||
# ;;
|
case ${ac_ALLOC_ALIGN} in
|
||||||
# *)
|
4k)
|
||||||
# AC_MSG_ERROR([${ac_PRECISION} unsupported --enable-precision option]);
|
AC_DEFINE([GRID_ALLOC_ALIGN],[(4096)],[GRID_ALLOC_ALIGN]);;
|
||||||
# ;;
|
2MB)
|
||||||
#esac
|
AC_DEFINE([GRID_ALLOC_ALIGN],[(2*1024*1024)],[GRID_ALLOC_ALIGN]);;
|
||||||
|
*);;
|
||||||
|
esac
|
||||||
|
|
||||||
###################### Shared memory allocation technique under MPI3
|
AC_ARG_ENABLE([alloc-cache],[AC_HELP_STRING([--enable-alloc-cache ],
|
||||||
AC_ARG_ENABLE([shm],[AC_HELP_STRING([--enable-shm=shmopen|shmget|hugetlbfs|shmnone],
|
[Cache a pool of recent "frees" to reuse])],[ac_ALLOC_CACHE=${enable_alloc_cache}],[ac_ALLOC_CACHE=yes])
|
||||||
[Select SHM allocation technique])],[ac_SHM=${enable_shm}],[ac_SHM=shmopen])
|
case ${ac_ALLOC_CACHE} in
|
||||||
|
yes)
|
||||||
|
AC_DEFINE([ALLOCATION_CACHE],[1],[ALLOCATION_CACHE]);;
|
||||||
|
*);;
|
||||||
|
esac
|
||||||
|
|
||||||
|
|
||||||
|
#########################################################
|
||||||
|
###################### set GPU device to rank in node ##
|
||||||
|
#########################################################
|
||||||
|
AC_ARG_ENABLE([setdevice],[AC_HELP_STRING([--enable-setdevice | --disable-setdevice],
|
||||||
|
[Set GPU to rank in node with cudaSetDevice or similar])],[ac_SETDEVICE=${enable_SETDEVICE}],[ac_SETDEVICE=no])
|
||||||
|
case ${ac_SETDEVICE} in
|
||||||
|
yes);;
|
||||||
|
no)
|
||||||
|
AC_DEFINE([GRID_DEFAULT_GPU],[1],[GRID_DEFAULT_GPU] )
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
#########################################################
|
||||||
|
###################### Shared memory intranode #########
|
||||||
|
#########################################################
|
||||||
|
AC_ARG_ENABLE([shm],[AC_HELP_STRING([--enable-shm=shmopen|shmget|hugetlbfs|shmnone|nvlink|no],
|
||||||
|
[Select SHM allocation technique])],[ac_SHM=${enable_shm}],[ac_SHM=no])
|
||||||
|
|
||||||
case ${ac_SHM} in
|
case ${ac_SHM} in
|
||||||
|
|
||||||
@ -517,10 +566,14 @@ case ${ac_SHM} in
|
|||||||
AC_DEFINE([GRID_MPI3_SHMGET],[1],[GRID_MPI3_SHMGET] )
|
AC_DEFINE([GRID_MPI3_SHMGET],[1],[GRID_MPI3_SHMGET] )
|
||||||
;;
|
;;
|
||||||
|
|
||||||
shmnone)
|
shmnone | no)
|
||||||
AC_DEFINE([GRID_MPI3_SHM_NONE],[1],[GRID_MPI3_SHM_NONE] )
|
AC_DEFINE([GRID_MPI3_SHM_NONE],[1],[GRID_MPI3_SHM_NONE] )
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
nvlink)
|
||||||
|
AC_DEFINE([GRID_MPI3_SHM_NVLINK],[1],[GRID_MPI3_SHM_NVLINK] )
|
||||||
|
;;
|
||||||
|
|
||||||
hugetlbfs)
|
hugetlbfs)
|
||||||
AC_DEFINE([GRID_MPI3_SHMMMAP],[1],[GRID_MPI3_SHMMMAP] )
|
AC_DEFINE([GRID_MPI3_SHMMMAP],[1],[GRID_MPI3_SHMMMAP] )
|
||||||
;;
|
;;
|
||||||
@ -537,10 +590,32 @@ AC_ARG_ENABLE([shmpath],[AC_HELP_STRING([--enable-shmpath=path],
|
|||||||
[ac_SHMPATH=/var/lib/hugetlbfs/global/pagesize-2MB/])
|
[ac_SHMPATH=/var/lib/hugetlbfs/global/pagesize-2MB/])
|
||||||
AC_DEFINE_UNQUOTED([GRID_SHM_PATH],["$ac_SHMPATH"],[Path to a hugetlbfs filesystem for MMAPing])
|
AC_DEFINE_UNQUOTED([GRID_SHM_PATH],["$ac_SHMPATH"],[Path to a hugetlbfs filesystem for MMAPing])
|
||||||
|
|
||||||
|
############### force MPI in SMP
|
||||||
|
AC_ARG_ENABLE([shm-force-mpi],[AC_HELP_STRING([--enable-shm-force-mpi],
|
||||||
|
[Force MPI within shared memory])],[ac_SHM_FORCE_MPI=${enable_shm_force_mpi}],[ac_SHM_FORCE_MPI=no])
|
||||||
|
case ${ac_SHM_FORCE_MPI} in
|
||||||
|
yes)
|
||||||
|
AC_DEFINE([GRID_SHM_FORCE_MPI],[1],[GRID_SHM_FORCE_MPI] )
|
||||||
|
;;
|
||||||
|
*) ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
############### communication type selection
|
||||||
|
AC_ARG_ENABLE([comms-threads],[AC_HELP_STRING([--enable-comms-threads | --disable-comms-threads],
|
||||||
|
[Use multiple threads in MPI calls])],[ac_COMMS_THREADS=${enable_comms_threads}],[ac_COMMS_THREADS=yes])
|
||||||
|
|
||||||
|
case ${ac_COMMS_THREADS} in
|
||||||
|
yes)
|
||||||
|
AC_DEFINE([GRID_COMMS_THREADING],[1],[GRID_COMMS_NONE] )
|
||||||
|
;;
|
||||||
|
*) ;;
|
||||||
|
esac
|
||||||
|
|
||||||
############### communication type selection
|
############### communication type selection
|
||||||
AC_ARG_ENABLE([comms],[AC_HELP_STRING([--enable-comms=none|mpi|mpi-auto],
|
AC_ARG_ENABLE([comms],[AC_HELP_STRING([--enable-comms=none|mpi|mpi-auto],
|
||||||
[Select communications])],[ac_COMMS=${enable_comms}],[ac_COMMS=none])
|
[Select communications])],[ac_COMMS=${enable_comms}],[ac_COMMS=none])
|
||||||
|
|
||||||
|
|
||||||
case ${ac_COMMS} in
|
case ${ac_COMMS} in
|
||||||
none)
|
none)
|
||||||
AC_DEFINE([GRID_COMMS_NONE],[1],[GRID_COMMS_NONE] )
|
AC_DEFINE([GRID_COMMS_NONE],[1],[GRID_COMMS_NONE] )
|
||||||
|
@ -6,13 +6,27 @@ home=`pwd`
|
|||||||
cd $home/Grid
|
cd $home/Grid
|
||||||
HFILES=`find . -type f -name '*.h' -not -name '*Hdf5*' -not -path '*/gamma-gen/*' -not -path '*/Old/*' -not -path '*/Eigen/*'`
|
HFILES=`find . -type f -name '*.h' -not -name '*Hdf5*' -not -path '*/gamma-gen/*' -not -path '*/Old/*' -not -path '*/Eigen/*'`
|
||||||
HFILES="$HFILES"
|
HFILES="$HFILES"
|
||||||
CCFILES=`find . -name '*.cc' -not -path '*/gamma-gen/*' -not -name '*Communicator*.cc' -not -name '*SharedMemory*.cc' -not -name '*Hdf5*'`
|
CCFILES=`find . -name '*.cc' -not -path '*/instantiation/*/*' -not -path '*/gamma-gen/*' -not -name '*Communicator*.cc' -not -name '*SharedMemory*.cc' -not -name '*Hdf5*'`
|
||||||
|
|
||||||
|
|
||||||
|
ZWILS_FERMION_FILES=` find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/ZWilsonImpl*' `
|
||||||
|
WILS_FERMION_FILES=` find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/WilsonImpl*' `
|
||||||
|
STAG_FERMION_FILES=` find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/Staggered*' `
|
||||||
|
GP_FERMION_FILES=` find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/Gparity*' `
|
||||||
|
ADJ_FERMION_FILES=` find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/WilsonAdj*' `
|
||||||
|
TWOIND_FERMION_FILES=`find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/WilsonTwoIndex*'`
|
||||||
|
|
||||||
HPPFILES=`find . -type f -name '*.hpp'`
|
HPPFILES=`find . -type f -name '*.hpp'`
|
||||||
echo HFILES=$HFILES $HPPFILES > Make.inc
|
echo HFILES=$HFILES $HPPFILES > Make.inc
|
||||||
echo >> Make.inc
|
echo >> Make.inc
|
||||||
echo CCFILES=$CCFILES >> Make.inc
|
echo CCFILES=$CCFILES >> Make.inc
|
||||||
|
|
||||||
|
echo ZWILS_FERMION_FILES=$ZWILS_FERMION_FILES >> Make.inc
|
||||||
|
echo WILS_FERMION_FILES=$WILS_FERMION_FILES >> Make.inc
|
||||||
|
echo STAG_FERMION_FILES=$STAG_FERMION_FILES >> Make.inc
|
||||||
|
echo GP_FERMION_FILES=$GP_FERMION_FILES >> Make.inc
|
||||||
|
echo ADJ_FERMION_FILES=$ADJ_FERMION_FILES >> Make.inc
|
||||||
|
echo TWOIND_FERMION_FILES=$TWOIND_FERMION_FILES >> Make.inc
|
||||||
|
|
||||||
# tests Make.inc
|
# tests Make.inc
|
||||||
cd $home/tests
|
cd $home/tests
|
||||||
@ -26,11 +40,10 @@ for subdir in $dirs; do
|
|||||||
echo "tests-local: ${TESTLIST} " > Make.inc
|
echo "tests-local: ${TESTLIST} " > Make.inc
|
||||||
echo ${PREF}_PROGRAMS = ${TESTLIST} >> Make.inc
|
echo ${PREF}_PROGRAMS = ${TESTLIST} >> Make.inc
|
||||||
echo >> Make.inc
|
echo >> Make.inc
|
||||||
HADLINK=`[ $subdir = './hadrons' ] && echo '-lHadrons '`
|
|
||||||
for f in $TESTS; do
|
for f in $TESTS; do
|
||||||
BNAME=`basename $f .cc`
|
BNAME=`basename $f .cc`
|
||||||
echo ${BNAME}_SOURCES=$f >> Make.inc
|
echo ${BNAME}_SOURCES=$f >> Make.inc
|
||||||
echo ${BNAME}_LDADD=${HADLINK}-lGrid >> Make.inc
|
echo ${BNAME}_LDADD='$(top_builddir)/Grid/libGrid.a' >> Make.inc
|
||||||
echo >> Make.inc
|
echo >> Make.inc
|
||||||
done
|
done
|
||||||
if [ $subdir != '.' ]; then
|
if [ $subdir != '.' ]; then
|
||||||
@ -49,7 +62,7 @@ echo >> Make.inc
|
|||||||
for f in $TESTS; do
|
for f in $TESTS; do
|
||||||
BNAME=`basename $f .cc`
|
BNAME=`basename $f .cc`
|
||||||
echo ${BNAME}_SOURCES=$f >> Make.inc
|
echo ${BNAME}_SOURCES=$f >> Make.inc
|
||||||
echo ${BNAME}_LDADD=-lGrid>> Make.inc
|
echo ${BNAME}_LDADD='$(top_builddir)/Grid/libGrid.a' >> Make.inc
|
||||||
echo >> Make.inc
|
echo >> Make.inc
|
||||||
done
|
done
|
||||||
cd ..
|
cd ..
|
||||||
@ -65,7 +78,7 @@ echo >> Make.inc
|
|||||||
for f in $TESTS; do
|
for f in $TESTS; do
|
||||||
BNAME=`basename $f .cc`
|
BNAME=`basename $f .cc`
|
||||||
echo ${BNAME}_SOURCES=$f >> Make.inc
|
echo ${BNAME}_SOURCES=$f >> Make.inc
|
||||||
echo ${BNAME}_LDADD=-lGrid>> Make.inc
|
echo ${BNAME}_LDADD='$(top_builddir)/Grid/libGrid.a'>> Make.inc
|
||||||
echo >> Make.inc
|
echo >> Make.inc
|
||||||
done
|
done
|
||||||
cd ..
|
cd ..
|
||||||
|
144
tests/core/Test_reunitarise.cc
Normal file
144
tests/core/Test_reunitarise.cc
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/Test_quenched_update.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
;
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
std::vector<int> latt({8,8,8,8});
|
||||||
|
GridCartesian * grid = SpaceTimeGrid::makeFourDimGrid(latt,
|
||||||
|
GridDefaultSimd(Nd,vComplexD::Nsimd()),
|
||||||
|
GridDefaultMpi());
|
||||||
|
|
||||||
|
GridCartesian * gridF = SpaceTimeGrid::makeFourDimGrid(latt,
|
||||||
|
GridDefaultSimd(Nd,vComplexF::Nsimd()),
|
||||||
|
GridDefaultMpi());
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////
|
||||||
|
// Configuration of known size
|
||||||
|
///////////////////////////////
|
||||||
|
LatticeColourMatrixD ident(grid);
|
||||||
|
LatticeColourMatrixD U(grid);
|
||||||
|
LatticeColourMatrixD UU(grid);
|
||||||
|
LatticeColourMatrixD tmp(grid);
|
||||||
|
LatticeColourMatrixD org(grid);
|
||||||
|
LatticeColourMatrixF UF(gridF);
|
||||||
|
|
||||||
|
LatticeGaugeField Umu(grid);
|
||||||
|
|
||||||
|
ident =1.0;
|
||||||
|
|
||||||
|
// RNG set up for test
|
||||||
|
std::vector<int> pseeds({1,2,3,4,5}); // once I caught a fish alive
|
||||||
|
std::vector<int> sseeds({6,7,8,9,10});// then i let it go again
|
||||||
|
GridParallelRNG pRNG(grid); pRNG.SeedFixedIntegers(pseeds);
|
||||||
|
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(sseeds);
|
||||||
|
|
||||||
|
SU<Nc>::HotConfiguration(pRNG,Umu);
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(Umu,0);
|
||||||
|
org=U;
|
||||||
|
|
||||||
|
|
||||||
|
tmp= U*adj(U) - ident ;
|
||||||
|
RealD Def1 = norm2( tmp );
|
||||||
|
std::cout << " Defect1 "<<Def1<<std::endl;
|
||||||
|
|
||||||
|
tmp = U - org;
|
||||||
|
std::cout << "Diff1 "<<norm2(tmp)<<std::endl;
|
||||||
|
precisionChange(UF,U);
|
||||||
|
precisionChange(U,UF);
|
||||||
|
|
||||||
|
tmp= U*adj(U) - ident ;
|
||||||
|
RealD Def2 = norm2( tmp );
|
||||||
|
std::cout << " Defect2 "<<Def2<<std::endl;
|
||||||
|
|
||||||
|
tmp = U - org;
|
||||||
|
std::cout << "Diff2 "<<norm2(tmp)<<std::endl;
|
||||||
|
|
||||||
|
U = ProjectOnGroup(U);
|
||||||
|
|
||||||
|
tmp= U*adj(U) - ident ;
|
||||||
|
RealD Def3 = norm2( tmp);
|
||||||
|
std::cout << " Defect3 "<<Def3<<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
tmp = U - org;
|
||||||
|
std::cout << "Diff3 "<<norm2(tmp)<<std::endl;
|
||||||
|
|
||||||
|
LatticeComplexD detU(grid);
|
||||||
|
LatticeComplexD detUU(grid);
|
||||||
|
|
||||||
|
detU= Determinant(U) ;
|
||||||
|
detU=detU-1.0;
|
||||||
|
std::cout << "Determinant before screw up " << norm2(detU)<<std::endl;
|
||||||
|
|
||||||
|
std::cout << " Screwing up determinant " << std::endl;
|
||||||
|
|
||||||
|
RealD theta = 0.2;
|
||||||
|
ComplexD phase(cos(theta),sin(theta));
|
||||||
|
for(int i=0;i<Nc;i++){
|
||||||
|
auto element = PeekIndex<ColourIndex>(U,Nc-1,i);
|
||||||
|
element = element * phase;
|
||||||
|
PokeIndex<ColourIndex>(U,element,Nc-1,i);
|
||||||
|
}
|
||||||
|
UU=U;
|
||||||
|
|
||||||
|
detU= Determinant(U) ;
|
||||||
|
detU=detU-1.0;
|
||||||
|
std::cout << "Determinant defect before projection " <<norm2(detU)<<std::endl;
|
||||||
|
tmp = U*adj(U) - ident;
|
||||||
|
std::cout << "Unitarity check before projection " << norm2(tmp)<<std::endl;
|
||||||
|
|
||||||
|
ProjectSU3(U);
|
||||||
|
detU= Determinant(U) ;
|
||||||
|
detU= detU -1.0;
|
||||||
|
std::cout << "Determinant ProjectSU3 defect " <<norm2(detU)<<std::endl;
|
||||||
|
tmp = U*adj(U) - ident;
|
||||||
|
std::cout << "Unitarity check after projection " << norm2(tmp)<<std::endl;
|
||||||
|
|
||||||
|
ProjectSUn(UU);
|
||||||
|
detUU= Determinant(UU);
|
||||||
|
detUU= detUU -1.0;
|
||||||
|
std::cout << "Determinant ProjectSUn defect " <<norm2(detUU)<<std::endl;
|
||||||
|
tmp = UU*adj(UU) - ident;
|
||||||
|
std::cout << "Unitarity check after projection " << norm2(tmp)<<std::endl;
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
106
tests/core/Test_unary.cc
Normal file
106
tests/core/Test_unary.cc
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/Test_quenched_update.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
;
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
std::vector<int> latt({8,8,8,8});
|
||||||
|
GridCartesian * grid = SpaceTimeGrid::makeFourDimGrid(latt,
|
||||||
|
GridDefaultSimd(Nd,vComplexD::Nsimd()),
|
||||||
|
GridDefaultMpi());
|
||||||
|
|
||||||
|
GridCartesian * gridF = SpaceTimeGrid::makeFourDimGrid(latt,
|
||||||
|
GridDefaultSimd(Nd,vComplexF::Nsimd()),
|
||||||
|
GridDefaultMpi());
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////
|
||||||
|
// Configuration of known size
|
||||||
|
///////////////////////////////
|
||||||
|
LatticeColourMatrixD ident(grid);
|
||||||
|
LatticeColourMatrixD U(grid);
|
||||||
|
LatticeColourMatrixD tmp(grid);
|
||||||
|
LatticeColourMatrixD org(grid);
|
||||||
|
LatticeColourMatrixF UF(gridF);
|
||||||
|
|
||||||
|
LatticeGaugeField Umu(grid);
|
||||||
|
|
||||||
|
ident =1.0;
|
||||||
|
|
||||||
|
// RNG set up for test
|
||||||
|
std::vector<int> pseeds({1,2,3,4,5}); // once I caught a fish alive
|
||||||
|
std::vector<int> sseeds({6,7,8,9,10});// then i let it go again
|
||||||
|
GridParallelRNG pRNG(grid); pRNG.SeedFixedIntegers(pseeds);
|
||||||
|
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(sseeds);
|
||||||
|
|
||||||
|
SU<Nc>::HotConfiguration(pRNG,Umu);
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(Umu,0);
|
||||||
|
org=U;
|
||||||
|
|
||||||
|
|
||||||
|
tmp= U*adj(U) - ident ;
|
||||||
|
RealD Def1 = norm2( tmp );
|
||||||
|
std::cout << " Defect1 "<<Def1<<std::endl;
|
||||||
|
|
||||||
|
tmp = U - org;
|
||||||
|
std::cout << "Diff1 "<<norm2(tmp)<<std::endl;
|
||||||
|
precisionChange(UF,U);
|
||||||
|
precisionChange(U,UF);
|
||||||
|
|
||||||
|
tmp= U*adj(U) - ident ;
|
||||||
|
RealD Def2 = norm2( tmp );
|
||||||
|
std::cout << " Defect2 "<<Def2<<std::endl;
|
||||||
|
|
||||||
|
tmp = U - org;
|
||||||
|
std::cout << "Diff2 "<<norm2(tmp)<<std::endl;
|
||||||
|
|
||||||
|
U = ProjectOnGroup(U);
|
||||||
|
|
||||||
|
tmp= U*adj(U) - ident ;
|
||||||
|
RealD Def3 = norm2( tmp);
|
||||||
|
std::cout << " Defect3 "<<Def3<<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
tmp = U - org;
|
||||||
|
std::cout << "Diff3 "<<norm2(tmp)<<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -108,8 +108,18 @@ int main (int argc, char ** argv)
|
|||||||
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||||
|
|
||||||
LatticeGaugeField Umu(UGrid);
|
LatticeGaugeField Umu(UGrid);
|
||||||
SU<Nc>::ColdConfiguration(Umu);
|
if( argc > 1 && argv[1][0] != '-' )
|
||||||
// SU<Nc>::HotConfiguration(RNG4,Umu);
|
{
|
||||||
|
std::cout<<GridLogMessage <<"Loading configuration from "<<argv[1]<<std::endl;
|
||||||
|
FieldMetaData header;
|
||||||
|
NerscIO::readConfiguration(Umu, header, argv[1]);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
std::cout<<GridLogMessage <<"Using cold configuration"<<std::endl;
|
||||||
|
SU<Nc>::ColdConfiguration(Umu);
|
||||||
|
// SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||||
|
}
|
||||||
|
|
||||||
RealD mass=0.3;
|
RealD mass=0.3;
|
||||||
RealD M5 =1.0;
|
RealD M5 =1.0;
|
||||||
|
475
tests/solver/Test_coarse_even_odd.cc
Normal file
475
tests/solver/Test_coarse_even_odd.cc
Normal file
@ -0,0 +1,475 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/solver/Test_coarse_even_odd.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015-2020
|
||||||
|
|
||||||
|
Author: Daniel Richtmann <daniel.richtmann@ur.de>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
#ifndef NBASIS
|
||||||
|
#define NBASIS 40
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// NOTE: The tests in this file are written in analogy to
|
||||||
|
// - tests/core/Test_wilson_even_odd.cc
|
||||||
|
// - tests/core/Test_wilson_clover.cc
|
||||||
|
|
||||||
|
std::vector<int> readFromCommandlineIvec(int* argc,
|
||||||
|
char*** argv,
|
||||||
|
std::string&& option,
|
||||||
|
const std::vector<int>& defaultValue) {
|
||||||
|
std::string arg;
|
||||||
|
std::vector<int> ret(defaultValue);
|
||||||
|
if(GridCmdOptionExists(*argv, *argv + *argc, option)) {
|
||||||
|
arg = GridCmdOptionPayload(*argv, *argv + *argc, option);
|
||||||
|
GridCmdOptionIntVector(arg, ret);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char** argv) {
|
||||||
|
Grid_init(&argc, &argv);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Read from command line //
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
const int nbasis = NBASIS; static_assert((nbasis & 0x1) == 0, "");
|
||||||
|
const int nb = nbasis/2;
|
||||||
|
Coordinate blockSize = readFromCommandlineIvec(&argc, &argv, "--blocksize", {2, 2, 2, 2});
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Compiled with nbasis = " << nbasis << " -> nb = " << nb << std::endl;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// General setup //
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
Coordinate clatt = GridDefaultLatt();
|
||||||
|
for(int d=0; d<clatt.size(); d++) clatt[d] = clatt[d] / blockSize[d];
|
||||||
|
|
||||||
|
GridCartesian* Grid_f = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
|
||||||
|
GridCartesian* Grid_c = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
|
||||||
|
GridRedBlackCartesian* RBGrid_f = SpaceTimeGrid::makeFourDimRedBlackGrid(Grid_f);
|
||||||
|
GridRedBlackCartesian* RBGrid_c = SpaceTimeGrid::makeFourDimRedBlackGrid(Grid_c);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Grid_f:" << std::endl; Grid_f->show_decomposition();
|
||||||
|
std::cout << GridLogMessage << "Grid_c:" << std::endl; Grid_c->show_decomposition();
|
||||||
|
std::cout << GridLogMessage << "RBGrid_f:" << std::endl; RBGrid_f->show_decomposition();
|
||||||
|
std::cout << GridLogMessage << "RBGrid_c:" << std::endl; RBGrid_c->show_decomposition();
|
||||||
|
|
||||||
|
GridParallelRNG pRNG_f(Grid_f);
|
||||||
|
GridParallelRNG pRNG_c(Grid_c);
|
||||||
|
|
||||||
|
std::vector<int> seeds({1, 2, 3, 4});
|
||||||
|
|
||||||
|
pRNG_f.SeedFixedIntegers(seeds);
|
||||||
|
pRNG_c.SeedFixedIntegers(seeds);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Setup of Dirac Matrix and Operator //
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
LatticeGaugeField Umu(Grid_f); SU3::HotConfiguration(pRNG_f, Umu);
|
||||||
|
|
||||||
|
RealD checkTolerance = (getPrecision<LatticeFermion>::value == 1) ? 1e-7 : 1e-15;
|
||||||
|
|
||||||
|
RealD mass = -0.30;
|
||||||
|
RealD csw = 1.9192;
|
||||||
|
|
||||||
|
WilsonCloverFermionR Dwc(Umu, *Grid_f, *RBGrid_f, mass, csw, csw);
|
||||||
|
MdagMLinearOperator<WilsonCloverFermionR, LatticeFermion> MdagMOp_Dwc(Dwc);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Type definitions //
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
typedef Aggregation<vSpinColourVector, vTComplex, nbasis> Aggregates;
|
||||||
|
typedef CoarsenedMatrix<vSpinColourVector, vTComplex, nbasis> CoarseDiracMatrix;
|
||||||
|
typedef CoarseDiracMatrix::CoarseVector CoarseVector;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Setup of Aggregation //
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
Aggregates Aggs(Grid_c, Grid_f, 0);
|
||||||
|
{
|
||||||
|
LatticeFermion tmp(Aggs.subspace[0].Grid());
|
||||||
|
for(int n = 0; n < nb; n++) {
|
||||||
|
gaussian(pRNG_f, Aggs.subspace[n]);
|
||||||
|
G5C(tmp, Aggs.subspace[n]);
|
||||||
|
axpby(Aggs.subspace[n + nb], 0.5, -0.5, Aggs.subspace[n], tmp);
|
||||||
|
axpby(Aggs.subspace[n], 0.5, 0.5, Aggs.subspace[n], tmp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Setup of CoarsenedMatrix and Operator //
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
const int hermitian = 0;
|
||||||
|
CoarseDiracMatrix Dc(*Grid_c, *RBGrid_c, hermitian);
|
||||||
|
Dc.CoarsenOperator(Grid_f, MdagMOp_Dwc, Aggs);
|
||||||
|
MdagMLinearOperator<CoarseDiracMatrix, CoarseVector> MdagMOp_Dc(Dc);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Setup vectors used in all tests //
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
CoarseVector src(Grid_c); random(pRNG_c, src);
|
||||||
|
CoarseVector diff(Grid_c); diff = Zero();
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Start of tests //
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= Test Dhop + Mdiag = Munprec" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
|
||||||
|
CoarseVector phi(Grid_c); phi = Zero();
|
||||||
|
CoarseVector chi(Grid_c); chi = Zero();
|
||||||
|
CoarseVector res(Grid_c); res = Zero();
|
||||||
|
CoarseVector ref(Grid_c); ref = Zero();
|
||||||
|
|
||||||
|
Dc.Mdiag(src, phi); std::cout << GridLogMessage << "Applied Mdiag" << std::endl;
|
||||||
|
Dc.Dhop(src, chi, DaggerNo); std::cout << GridLogMessage << "Applied Dhop" << std::endl;
|
||||||
|
Dc.M(src, ref); std::cout << GridLogMessage << "Applied M" << std::endl;
|
||||||
|
|
||||||
|
res = phi + chi;
|
||||||
|
|
||||||
|
diff = ref - res;
|
||||||
|
auto absDev = norm2(diff);
|
||||||
|
auto relDev = absDev / norm2(ref);
|
||||||
|
std::cout << GridLogMessage << "norm2(Munprec), norm2(Dhop + Mdiag), abs. deviation, rel. deviation: "
|
||||||
|
<< norm2(ref) << " " << norm2(res) << " " << absDev << " " << relDev << " -> check "
|
||||||
|
<< ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
|
||||||
|
assert(relDev <= checkTolerance);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= Test Meo + Moe = Dhop" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
|
||||||
|
CoarseVector src_e(RBGrid_c); src_e = Zero();
|
||||||
|
CoarseVector src_o(RBGrid_c); src_o = Zero();
|
||||||
|
CoarseVector res_e(RBGrid_c); res_e = Zero();
|
||||||
|
CoarseVector res_o(RBGrid_c); res_o = Zero();
|
||||||
|
CoarseVector res(Grid_c); res = Zero();
|
||||||
|
CoarseVector ref(Grid_c); ref = Zero();
|
||||||
|
|
||||||
|
pickCheckerboard(Even, src_e, src);
|
||||||
|
pickCheckerboard(Odd, src_o, src);
|
||||||
|
|
||||||
|
Dc.Meooe(src_e, res_o); std::cout << GridLogMessage << "Applied Meo" << std::endl;
|
||||||
|
Dc.Meooe(src_o, res_e); std::cout << GridLogMessage << "Applied Moe" << std::endl;
|
||||||
|
Dc.Dhop(src, ref, DaggerNo); std::cout << GridLogMessage << "Applied Dhop" << std::endl;
|
||||||
|
|
||||||
|
setCheckerboard(res, res_o);
|
||||||
|
setCheckerboard(res, res_e);
|
||||||
|
|
||||||
|
diff = ref - res;
|
||||||
|
auto absDev = norm2(diff);
|
||||||
|
auto relDev = absDev / norm2(ref);
|
||||||
|
std::cout << GridLogMessage << "norm2(Dhop), norm2(Meo + Moe), abs. deviation, rel. deviation: "
|
||||||
|
<< norm2(ref) << " " << norm2(res) << " " << absDev << " " << relDev
|
||||||
|
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
|
||||||
|
assert(relDev <= checkTolerance);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= Test |(Im(v^dag M^dag M v)| = 0" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
|
||||||
|
CoarseVector tmp(Grid_c); tmp = Zero();
|
||||||
|
CoarseVector phi(Grid_c); phi = Zero();
|
||||||
|
|
||||||
|
Dc.M(src, tmp); std::cout << GridLogMessage << "Applied M" << std::endl;
|
||||||
|
Dc.Mdag(tmp, phi); std::cout << GridLogMessage << "Applied Mdag" << std::endl;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "src = " << norm2(src) << " tmp = " << norm2(tmp) << " phi = " << norm2(phi) << std::endl;
|
||||||
|
|
||||||
|
ComplexD dot = innerProduct(src, phi);
|
||||||
|
|
||||||
|
auto relDev = abs(imag(dot)) / abs(real(dot));
|
||||||
|
std::cout << GridLogMessage << "Re(v^dag M^dag M v), Im(v^dag M^dag M v), rel.deviation: "
|
||||||
|
<< real(dot) << " " << imag(dot) << " " << relDev
|
||||||
|
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
|
||||||
|
assert(relDev <= checkTolerance);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= Test |(Im(v^dag Mooee^dag Mooee v)| = 0 (full grid)" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
|
||||||
|
CoarseVector tmp(Grid_c); tmp = Zero();
|
||||||
|
CoarseVector phi(Grid_c); phi = Zero();
|
||||||
|
|
||||||
|
Dc.Mooee(src, tmp); std::cout << GridLogMessage << "Applied Mooee" << std::endl;
|
||||||
|
Dc.MooeeDag(tmp, phi); std::cout << GridLogMessage << "Applied MooeeDag" << std::endl;
|
||||||
|
|
||||||
|
ComplexD dot = innerProduct(src, phi);
|
||||||
|
|
||||||
|
auto relDev = abs(imag(dot)) / abs(real(dot));
|
||||||
|
std::cout << GridLogMessage << "Re(v^dag Mooee^dag Mooee v), Im(v^dag Mooee^dag Mooee v), rel.deviation: "
|
||||||
|
<< real(dot) << " " << imag(dot) << " " << relDev
|
||||||
|
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
|
||||||
|
assert(relDev <= checkTolerance);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= Test MooeeInv Mooee = 1 (full grid)" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
|
||||||
|
CoarseVector tmp(Grid_c); tmp = Zero();
|
||||||
|
CoarseVector phi(Grid_c); phi = Zero();
|
||||||
|
|
||||||
|
Dc.Mooee(src, tmp); std::cout << GridLogMessage << "Applied Mooee" << std::endl;
|
||||||
|
Dc.MooeeInv(tmp, phi); std::cout << GridLogMessage << "Applied MooeeInv" << std::endl;
|
||||||
|
|
||||||
|
diff = src - phi;
|
||||||
|
auto absDev = norm2(diff);
|
||||||
|
auto relDev = absDev / norm2(src);
|
||||||
|
std::cout << GridLogMessage << "norm2(src), norm2(MooeeInv Mooee src), abs. deviation, rel. deviation: "
|
||||||
|
<< norm2(src) << " " << norm2(phi) << " " << absDev << " " << relDev
|
||||||
|
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
|
||||||
|
assert(relDev <= checkTolerance);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= Test MeooeDagger is the dagger of Meooe by requiring" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= < phi | Meooe | chi > * = < chi | Meooe^dag| phi>" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
|
CoarseVector phi(Grid_c); random(pRNG_c, phi);
|
||||||
|
CoarseVector chi(Grid_c); random(pRNG_c, chi);
|
||||||
|
CoarseVector chi_e(RBGrid_c); chi_e = Zero();
|
||||||
|
CoarseVector chi_o(RBGrid_c); chi_o = Zero();
|
||||||
|
CoarseVector dchi_e(RBGrid_c); dchi_e = Zero();
|
||||||
|
CoarseVector dchi_o(RBGrid_c); dchi_o = Zero();
|
||||||
|
CoarseVector phi_e(RBGrid_c); phi_e = Zero();
|
||||||
|
CoarseVector phi_o(RBGrid_c); phi_o = Zero();
|
||||||
|
CoarseVector dphi_e(RBGrid_c); dphi_e = Zero();
|
||||||
|
CoarseVector dphi_o(RBGrid_c); dphi_o = Zero();
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
|
pickCheckerboard(Even, chi_e, chi);
|
||||||
|
pickCheckerboard(Odd, chi_o, chi);
|
||||||
|
pickCheckerboard(Even, phi_e, phi);
|
||||||
|
pickCheckerboard(Odd, phi_o, phi);
|
||||||
|
|
||||||
|
Dc.Meooe(chi_e, dchi_o); std::cout << GridLogMessage << "Applied Meo" << std::endl;
|
||||||
|
Dc.Meooe(chi_o, dchi_e); std::cout << GridLogMessage << "Applied Moe" << std::endl;
|
||||||
|
Dc.MeooeDag(phi_e, dphi_o); std::cout << GridLogMessage << "Applied MeoDag" << std::endl;
|
||||||
|
Dc.MeooeDag(phi_o, dphi_e); std::cout << GridLogMessage << "Applied MoeDag" << std::endl;
|
||||||
|
|
||||||
|
ComplexD phiDchi_e = innerProduct(phi_e, dchi_e);
|
||||||
|
ComplexD phiDchi_o = innerProduct(phi_o, dchi_o);
|
||||||
|
ComplexD chiDphi_e = innerProduct(chi_e, dphi_e);
|
||||||
|
ComplexD chiDphi_o = innerProduct(chi_o, dphi_o);
|
||||||
|
|
||||||
|
std::cout << GridLogDebug << "norm dchi_e = " << norm2(dchi_e) << " norm dchi_o = " << norm2(dchi_o) << " norm dphi_e = " << norm2(dphi_e)
|
||||||
|
<< " norm dphi_o = " << norm2(dphi_e) << std::endl;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "e " << phiDchi_e << " " << chiDphi_e << std::endl;
|
||||||
|
std::cout << GridLogMessage << "o " << phiDchi_o << " " << chiDphi_o << std::endl;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "phiDchi_e - conj(chiDphi_o) " << phiDchi_e - conj(chiDphi_o) << std::endl;
|
||||||
|
std::cout << GridLogMessage << "phiDchi_o - conj(chiDphi_e) " << phiDchi_o - conj(chiDphi_e) << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= Test MooeeInv Mooee = 1 (checkerboards separately)" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
|
||||||
|
CoarseVector chi(Grid_c); random(pRNG_c, chi);
|
||||||
|
CoarseVector tmp(Grid_c); tmp = Zero();
|
||||||
|
CoarseVector phi(Grid_c); phi = Zero();
|
||||||
|
CoarseVector chi_e(RBGrid_c); chi_e = Zero();
|
||||||
|
CoarseVector chi_o(RBGrid_c); chi_o = Zero();
|
||||||
|
CoarseVector phi_e(RBGrid_c); phi_e = Zero();
|
||||||
|
CoarseVector phi_o(RBGrid_c); phi_o = Zero();
|
||||||
|
CoarseVector tmp_e(RBGrid_c); tmp_e = Zero();
|
||||||
|
CoarseVector tmp_o(RBGrid_c); tmp_o = Zero();
|
||||||
|
|
||||||
|
pickCheckerboard(Even, chi_e, chi);
|
||||||
|
pickCheckerboard(Odd, chi_o, chi);
|
||||||
|
pickCheckerboard(Even, tmp_e, tmp);
|
||||||
|
pickCheckerboard(Odd, tmp_o, tmp);
|
||||||
|
|
||||||
|
Dc.Mooee(chi_e, tmp_e); std::cout << GridLogMessage << "Applied Mee" << std::endl;
|
||||||
|
Dc.MooeeInv(tmp_e, phi_e); std::cout << GridLogMessage << "Applied MeeInv" << std::endl;
|
||||||
|
Dc.Mooee(chi_o, tmp_o); std::cout << GridLogMessage << "Applied Moo" << std::endl;
|
||||||
|
Dc.MooeeInv(tmp_o, phi_o); std::cout << GridLogMessage << "Applied MooInv" << std::endl;
|
||||||
|
|
||||||
|
setCheckerboard(phi, phi_e);
|
||||||
|
setCheckerboard(phi, phi_o);
|
||||||
|
|
||||||
|
diff = chi - phi;
|
||||||
|
auto absDev = norm2(diff);
|
||||||
|
auto relDev = absDev / norm2(chi);
|
||||||
|
std::cout << GridLogMessage << "norm2(chi), norm2(MeeInv Mee chi), abs. deviation, rel. deviation: "
|
||||||
|
<< norm2(chi) << " " << norm2(phi) << " " << absDev << " " << relDev
|
||||||
|
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
|
||||||
|
assert(relDev <= checkTolerance);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= Test MooeeDag MooeeInvDag = 1 (checkerboards separately)" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
|
||||||
|
CoarseVector chi(Grid_c); random(pRNG_c, chi);
|
||||||
|
CoarseVector tmp(Grid_c); tmp = Zero();
|
||||||
|
CoarseVector phi(Grid_c); phi = Zero();
|
||||||
|
CoarseVector chi_e(RBGrid_c); chi_e = Zero();
|
||||||
|
CoarseVector chi_o(RBGrid_c); chi_o = Zero();
|
||||||
|
CoarseVector phi_e(RBGrid_c); phi_e = Zero();
|
||||||
|
CoarseVector phi_o(RBGrid_c); phi_o = Zero();
|
||||||
|
CoarseVector tmp_e(RBGrid_c); tmp_e = Zero();
|
||||||
|
CoarseVector tmp_o(RBGrid_c); tmp_o = Zero();
|
||||||
|
|
||||||
|
pickCheckerboard(Even, chi_e, chi);
|
||||||
|
pickCheckerboard(Odd, chi_o, chi);
|
||||||
|
pickCheckerboard(Even, tmp_e, tmp);
|
||||||
|
pickCheckerboard(Odd, tmp_o, tmp);
|
||||||
|
|
||||||
|
Dc.MooeeDag(chi_e, tmp_e); std::cout << GridLogMessage << "Applied MeeDag" << std::endl;
|
||||||
|
Dc.MooeeInvDag(tmp_e, phi_e); std::cout << GridLogMessage << "Applied MeeInvDag" << std::endl;
|
||||||
|
Dc.MooeeDag(chi_o, tmp_o); std::cout << GridLogMessage << "Applied MooDag" << std::endl;
|
||||||
|
Dc.MooeeInvDag(tmp_o, phi_o); std::cout << GridLogMessage << "Applied MooInvDag" << std::endl;
|
||||||
|
|
||||||
|
setCheckerboard(phi, phi_e);
|
||||||
|
setCheckerboard(phi, phi_o);
|
||||||
|
|
||||||
|
diff = chi - phi;
|
||||||
|
auto absDev = norm2(diff);
|
||||||
|
auto relDev = absDev / norm2(chi);
|
||||||
|
std::cout << GridLogMessage << "norm2(chi), norm2(MeeDag MeeInvDag chi), abs. deviation, rel. deviation: "
|
||||||
|
<< norm2(chi) << " " << norm2(phi) << " " << absDev << " " << relDev
|
||||||
|
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
|
||||||
|
assert(relDev <= checkTolerance);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= Test Meo + Moe + Moo + Mee = Munprec" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
|
||||||
|
CoarseVector chi(Grid_c); chi = Zero();
|
||||||
|
CoarseVector phi(Grid_c); phi = Zero();
|
||||||
|
CoarseVector ref(Grid_c); ref = Zero();
|
||||||
|
CoarseVector src_e(RBGrid_c); src_e = Zero();
|
||||||
|
CoarseVector src_o(RBGrid_c); src_o = Zero();
|
||||||
|
CoarseVector phi_e(RBGrid_c); phi_e = Zero();
|
||||||
|
CoarseVector phi_o(RBGrid_c); phi_o = Zero();
|
||||||
|
CoarseVector chi_e(RBGrid_c); chi_e = Zero();
|
||||||
|
CoarseVector chi_o(RBGrid_c); chi_o = Zero();
|
||||||
|
|
||||||
|
pickCheckerboard(Even, src_e, src);
|
||||||
|
pickCheckerboard(Odd, src_o, src);
|
||||||
|
pickCheckerboard(Even, phi_e, phi);
|
||||||
|
pickCheckerboard(Odd, phi_o, phi);
|
||||||
|
pickCheckerboard(Even, chi_e, chi);
|
||||||
|
pickCheckerboard(Odd, chi_o, chi);
|
||||||
|
|
||||||
|
// M phi = (Mooee src_e + Meooe src_o , Mooee src_o + Meooe src_e)
|
||||||
|
|
||||||
|
Dc.M(src, ref); // Reference result from the unpreconditioned operator
|
||||||
|
|
||||||
|
// EO matrix
|
||||||
|
Dc.Mooee(src_e, chi_e); std::cout << GridLogMessage << "Applied Mee" << std::endl;
|
||||||
|
Dc.Mooee(src_o, chi_o); std::cout << GridLogMessage << "Applied Moo" << std::endl;
|
||||||
|
Dc.Meooe(src_o, phi_e); std::cout << GridLogMessage << "Applied Moe" << std::endl;
|
||||||
|
Dc.Meooe(src_e, phi_o); std::cout << GridLogMessage << "Applied Meo" << std::endl;
|
||||||
|
|
||||||
|
phi_o += chi_o;
|
||||||
|
phi_e += chi_e;
|
||||||
|
|
||||||
|
setCheckerboard(phi, phi_e);
|
||||||
|
setCheckerboard(phi, phi_o);
|
||||||
|
|
||||||
|
std::cout << GridLogDebug << "norm phi_e = " << norm2(phi_e) << " norm phi_o = " << norm2(phi_o) << " norm phi = " << norm2(phi) << std::endl;
|
||||||
|
|
||||||
|
diff = ref - phi;
|
||||||
|
auto absDev = norm2(diff);
|
||||||
|
auto relDev = absDev / norm2(ref);
|
||||||
|
std::cout << GridLogMessage << "norm2(Dunprec), norm2(Deoprec), abs. deviation, rel. deviation: "
|
||||||
|
<< norm2(ref) << " " << norm2(phi) << " " << absDev << " " << relDev
|
||||||
|
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
|
||||||
|
assert(relDev <= checkTolerance);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "= Test MpcDagMpc is hermitian" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "===========================================================================" << std::endl;
|
||||||
|
|
||||||
|
CoarseVector phi(Grid_c); random(pRNG_c, phi);
|
||||||
|
CoarseVector chi(Grid_c); random(pRNG_c, chi);
|
||||||
|
CoarseVector chi_e(RBGrid_c); chi_e = Zero();
|
||||||
|
CoarseVector chi_o(RBGrid_c); chi_o = Zero();
|
||||||
|
CoarseVector dchi_e(RBGrid_c); dchi_e = Zero();
|
||||||
|
CoarseVector dchi_o(RBGrid_c); dchi_o = Zero();
|
||||||
|
CoarseVector phi_e(RBGrid_c); phi_e = Zero();
|
||||||
|
CoarseVector phi_o(RBGrid_c); phi_o = Zero();
|
||||||
|
CoarseVector dphi_e(RBGrid_c); dphi_e = Zero();
|
||||||
|
CoarseVector dphi_o(RBGrid_c); dphi_o = Zero();
|
||||||
|
|
||||||
|
pickCheckerboard(Even, chi_e, chi);
|
||||||
|
pickCheckerboard(Odd, chi_o, chi);
|
||||||
|
pickCheckerboard(Even, phi_e, phi);
|
||||||
|
pickCheckerboard(Odd, phi_o, phi);
|
||||||
|
|
||||||
|
SchurDiagMooeeOperator<CoarseDiracMatrix,CoarseVector> HermOpEO(Dc);
|
||||||
|
|
||||||
|
HermOpEO.MpcDagMpc(chi_e, dchi_e); std::cout << GridLogMessage << "Applied MpcDagMpc to chi_e" << std::endl;
|
||||||
|
HermOpEO.MpcDagMpc(chi_o, dchi_o); std::cout << GridLogMessage << "Applied MpcDagMpc to chi_o" << std::endl;
|
||||||
|
HermOpEO.MpcDagMpc(phi_e, dphi_e); std::cout << GridLogMessage << "Applied MpcDagMpc to phi_e" << std::endl;
|
||||||
|
HermOpEO.MpcDagMpc(phi_o, dphi_o); std::cout << GridLogMessage << "Applied MpcDagMpc to phi_o" << std::endl;
|
||||||
|
|
||||||
|
ComplexD phiDchi_e = innerProduct(phi_e, dchi_e);
|
||||||
|
ComplexD phiDchi_o = innerProduct(phi_o, dchi_o);
|
||||||
|
ComplexD chiDphi_e = innerProduct(chi_e, dphi_e);
|
||||||
|
ComplexD chiDphi_o = innerProduct(chi_o, dphi_o);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "e " << phiDchi_e << " " << chiDphi_e << std::endl;
|
||||||
|
std::cout << GridLogMessage << "o " << phiDchi_o << " " << chiDphi_o << std::endl;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "phiDchi_e - conj(chiDphi_e) " << phiDchi_e - conj(chiDphi_e) << std::endl;
|
||||||
|
std::cout << GridLogMessage << "phiDchi_o - conj(chiDphi_o) " << phiDchi_o - conj(chiDphi_o) << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user