1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-23 18:22:02 +01:00

Compare commits

..

1 Commits

Author SHA1 Message Date
deab11e68b Flop cout matches DiRAC-ITT-2020 2020-11-16 17:15:34 +01:00
82 changed files with 5011 additions and 6939 deletions

View File

@ -37,9 +37,7 @@ directory
#endif
//disables and intel compiler specific warning (in json.hpp)
#ifdef __ICC
#pragma warning disable 488
#endif
#ifdef __NVCC__
//disables nvcc specific warning in json.hpp

View File

@ -21,7 +21,6 @@ if BUILD_HDF5
extra_headers+=serialisation/Hdf5Type.h
endif
all: version-cache Version.h
version-cache:
@ -54,17 +53,6 @@ Version.h: version-cache
include Make.inc
include Eigen.inc
extra_sources+=$(ZWILS_FERMION_FILES)
extra_sources+=$(WILS_FERMION_FILES)
extra_sources+=$(STAG_FERMION_FILES)
if BUILD_GPARITY
extra_sources+=$(GP_FERMION_FILES)
endif
if BUILD_FERMION_REPS
extra_sources+=$(ADJ_FERMION_FILES)
extra_sources+=$(TWOIND_FERMION_FILES)
endif
lib_LIBRARIES = libGrid.a
CCFILES += $(extra_sources)

View File

@ -31,7 +31,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#ifndef GRID_ALGORITHM_COARSENED_MATRIX_H
#define GRID_ALGORITHM_COARSENED_MATRIX_H
#include <Grid/qcd/QCD.h> // needed for Dagger(Yes|No), Inverse(Yes|No)
NAMESPACE_BEGIN(Grid);
@ -60,14 +59,12 @@ inline void blockMaskedInnerProduct(Lattice<CComplex> &CoarseInner,
class Geometry {
public:
int npoint;
int base;
std::vector<int> directions ;
std::vector<int> displacements;
std::vector<int> points_dagger;
Geometry(int _d) {
base = (_d==5) ? 1:0;
int base = (_d==5) ? 1:0;
// make coarse grid stencil for 4d , not 5d
if ( _d==5 ) _d=4;
@ -75,51 +72,16 @@ public:
npoint = 2*_d+1;
directions.resize(npoint);
displacements.resize(npoint);
points_dagger.resize(npoint);
for(int d=0;d<_d;d++){
directions[d ] = d+base;
directions[d+_d] = d+base;
displacements[d ] = +1;
displacements[d+_d]= -1;
points_dagger[d ] = d+_d;
points_dagger[d+_d] = d;
}
directions [2*_d]=0;
displacements[2*_d]=0;
points_dagger[2*_d]=2*_d;
}
int point(int dir, int disp) {
assert(disp == -1 || disp == 0 || disp == 1);
assert(base+0 <= dir && dir < base+4);
// directions faster index = new indexing
// 4d (base = 0):
// point 0 1 2 3 4 5 6 7 8
// dir 0 1 2 3 0 1 2 3 0
// disp +1 +1 +1 +1 -1 -1 -1 -1 0
// 5d (base = 1):
// point 0 1 2 3 4 5 6 7 8
// dir 1 2 3 4 1 2 3 4 0
// disp +1 +1 +1 +1 -1 -1 -1 -1 0
// displacements faster index = old indexing
// 4d (base = 0):
// point 0 1 2 3 4 5 6 7 8
// dir 0 0 1 1 2 2 3 3 0
// disp +1 -1 +1 -1 +1 -1 +1 -1 0
// 5d (base = 1):
// point 0 1 2 3 4 5 6 7 8
// dir 1 1 2 2 3 3 4 4 0
// disp +1 -1 +1 -1 +1 -1 +1 -1 0
if(dir == 0 and disp == 0)
return 8;
else // New indexing
return (1 - disp) / 2 * 4 + dir - base;
// else // Old indexing
// return (4 * (dir - base) + 1 - disp) / 2;
}
};
template<class Fobj,class CComplex,int nbasis>
@ -296,7 +258,7 @@ public:
// Fine Object == (per site) type of fine field
// nbasis == number of deflation vectors
template<class Fobj,class CComplex,int nbasis>
class CoarsenedMatrix : public CheckerBoardedSparseMatrixBase<Lattice<iVector<CComplex,nbasis > > > {
class CoarsenedMatrix : public SparseMatrixBase<Lattice<iVector<CComplex,nbasis > > > {
public:
typedef iVector<CComplex,nbasis > siteVector;
@ -306,59 +268,33 @@ public:
typedef iMatrix<CComplex,nbasis > Cobj;
typedef Lattice< CComplex > CoarseScalar; // used for inner products on fine field
typedef Lattice<Fobj > FineField;
typedef CoarseVector FermionField;
// enrich interface, use default implementation as in FermionOperator ///////
void Dminus(CoarseVector const& in, CoarseVector& out) { out = in; }
void DminusDag(CoarseVector const& in, CoarseVector& out) { out = in; }
void ImportPhysicalFermionSource(CoarseVector const& input, CoarseVector& imported) { imported = input; }
void ImportUnphysicalFermion(CoarseVector const& input, CoarseVector& imported) { imported = input; }
void ExportPhysicalFermionSolution(CoarseVector const& solution, CoarseVector& exported) { exported = solution; };
void ExportPhysicalFermionSource(CoarseVector const& solution, CoarseVector& exported) { exported = solution; };
////////////////////
// Data members
////////////////////
Geometry geom;
GridBase * _grid;
GridBase* _cbgrid;
int hermitian;
CartesianStencil<siteVector,siteVector,int> Stencil;
CartesianStencil<siteVector,siteVector,int> StencilEven;
CartesianStencil<siteVector,siteVector,int> StencilOdd;
std::vector<CoarseMatrix> A;
std::vector<CoarseMatrix> Aeven;
std::vector<CoarseMatrix> Aodd;
CoarseMatrix AselfInv;
CoarseMatrix AselfInvEven;
CoarseMatrix AselfInvOdd;
Vector<RealD> dag_factor;
///////////////////////
// Interface
///////////////////////
GridBase * Grid(void) { return _grid; }; // this is all the linalg routines need to know
GridBase * RedBlackGrid() { return _cbgrid; };
int ConstEE() { return 0; }
void M (const CoarseVector &in, CoarseVector &out)
{
conformable(_grid,in.Grid());
conformable(in.Grid(),out.Grid());
out.Checkerboard() = in.Checkerboard();
SimpleCompressor<siteVector> compressor;
Stencil.HaloExchange(in,compressor);
autoView( in_v , in, AcceleratorRead);
autoView( out_v , out, AcceleratorWrite);
autoView( Stencil_v , Stencil, AcceleratorRead);
auto& geom_v = geom;
typedef LatticeView<Cobj> Aview;
Vector<Aview> AcceleratorViewContainer;
@ -380,14 +316,14 @@ public:
int ptype;
StencilEntry *SE;
for(int point=0;point<geom_v.npoint;point++){
for(int point=0;point<geom.npoint;point++){
SE=Stencil_v.GetEntry(ptype,point,ss);
SE=Stencil.GetEntry(ptype,point,ss);
if(SE->_is_local) {
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
} else {
nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
nbr = coalescedRead(Stencil.CommBuf()[SE->_offset]);
}
acceleratorSynchronise();
@ -408,72 +344,12 @@ public:
return M(in,out);
} else {
// corresponds to Galerkin coarsening
return MdagNonHermitian(in, out);
CoarseVector tmp(Grid());
G5C(tmp, in);
M(tmp, out);
G5C(out, out);
}
};
void MdagNonHermitian(const CoarseVector &in, CoarseVector &out)
{
conformable(_grid,in.Grid());
conformable(in.Grid(),out.Grid());
out.Checkerboard() = in.Checkerboard();
SimpleCompressor<siteVector> compressor;
Stencil.HaloExchange(in,compressor);
autoView( in_v , in, AcceleratorRead);
autoView( out_v , out, AcceleratorWrite);
autoView( Stencil_v , Stencil, AcceleratorRead);
auto& geom_v = geom;
typedef LatticeView<Cobj> Aview;
Vector<Aview> AcceleratorViewContainer;
for(int p=0;p<geom.npoint;p++) AcceleratorViewContainer.push_back(A[p].View(AcceleratorRead));
Aview *Aview_p = & AcceleratorViewContainer[0];
const int Nsimd = CComplex::Nsimd();
typedef decltype(coalescedRead(in_v[0])) calcVector;
typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
int osites=Grid()->oSites();
Vector<int> points(geom.npoint, 0);
for(int p=0; p<geom.npoint; p++)
points[p] = geom.points_dagger[p];
RealD* dag_factor_p = &dag_factor[0];
accelerator_for(sss, Grid()->oSites()*nbasis, Nsimd, {
int ss = sss/nbasis;
int b = sss%nbasis;
calcComplex res = Zero();
calcVector nbr;
int ptype;
StencilEntry *SE;
for(int p=0;p<geom_v.npoint;p++){
int point = points[p];
SE=Stencil_v.GetEntry(ptype,point,ss);
if(SE->_is_local) {
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
} else {
nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
}
acceleratorSynchronise();
for(int bb=0;bb<nbasis;bb++) {
res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
}
}
coalescedWrite(out_v[ss](b),res);
});
for(int p=0;p<geom.npoint;p++) AcceleratorViewContainer[p].ViewClose();
}
void MdirComms(const CoarseVector &in)
{
SimpleCompressor<siteVector> compressor;
@ -483,7 +359,6 @@ public:
{
conformable(_grid,in.Grid());
conformable(_grid,out.Grid());
out.Checkerboard() = in.Checkerboard();
typedef LatticeView<Cobj> Aview;
Vector<Aview> AcceleratorViewContainer;
@ -492,7 +367,6 @@ public:
autoView( out_v , out, AcceleratorWrite);
autoView( in_v , in, AcceleratorRead);
autoView( Stencil_v , Stencil, AcceleratorRead);
const int Nsimd = CComplex::Nsimd();
typedef decltype(coalescedRead(in_v[0])) calcVector;
@ -506,12 +380,12 @@ public:
int ptype;
StencilEntry *SE;
SE=Stencil_v.GetEntry(ptype,point,ss);
SE=Stencil.GetEntry(ptype,point,ss);
if(SE->_is_local) {
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
} else {
nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
nbr = coalescedRead(Stencil.CommBuf()[SE->_offset]);
}
acceleratorSynchronise();
@ -539,7 +413,34 @@ public:
this->MdirComms(in);
MdirCalc(in,out,geom.point(dir,disp));
int ndim = in.Grid()->Nd();
//////////////
// 4D action like wilson
// 0+ => 0
// 0- => 1
// 1+ => 2
// 1- => 3
// etc..
//////////////
// 5D action like DWF
// 1+ => 0
// 1- => 1
// 2+ => 2
// 2- => 3
// etc..
auto point = [dir, disp, ndim](){
if(dir == 0 and disp == 0)
return 8;
else if ( ndim==4 ) {
return (4 * dir + 1 - disp) / 2;
} else {
return (4 * (dir-1) + 1 - disp) / 2;
}
}();
MdirCalc(in,out,point);
};
void Mdiag(const CoarseVector &in, CoarseVector &out)
@ -548,296 +449,23 @@ public:
MdirCalc(in, out, point); // No comms
};
void Mooee(const CoarseVector &in, CoarseVector &out) {
MooeeInternal(in, out, DaggerNo, InverseNo);
}
void MooeeInv(const CoarseVector &in, CoarseVector &out) {
MooeeInternal(in, out, DaggerNo, InverseYes);
}
void MooeeDag(const CoarseVector &in, CoarseVector &out) {
MooeeInternal(in, out, DaggerYes, InverseNo);
}
void MooeeInvDag(const CoarseVector &in, CoarseVector &out) {
MooeeInternal(in, out, DaggerYes, InverseYes);
}
void Meooe(const CoarseVector &in, CoarseVector &out) {
if(in.Checkerboard() == Odd) {
DhopEO(in, out, DaggerNo);
} else {
DhopOE(in, out, DaggerNo);
}
}
void MeooeDag(const CoarseVector &in, CoarseVector &out) {
if(in.Checkerboard() == Odd) {
DhopEO(in, out, DaggerYes);
} else {
DhopOE(in, out, DaggerYes);
}
}
void Dhop(const CoarseVector &in, CoarseVector &out, int dag) {
conformable(in.Grid(), _grid); // verifies full grid
conformable(in.Grid(), out.Grid());
out.Checkerboard() = in.Checkerboard();
DhopInternal(Stencil, A, in, out, dag);
}
void DhopOE(const CoarseVector &in, CoarseVector &out, int dag) {
conformable(in.Grid(), _cbgrid); // verifies half grid
conformable(in.Grid(), out.Grid()); // drops the cb check
assert(in.Checkerboard() == Even);
out.Checkerboard() = Odd;
DhopInternal(StencilEven, Aodd, in, out, dag);
}
void DhopEO(const CoarseVector &in, CoarseVector &out, int dag) {
conformable(in.Grid(), _cbgrid); // verifies half grid
conformable(in.Grid(), out.Grid()); // drops the cb check
assert(in.Checkerboard() == Odd);
out.Checkerboard() = Even;
DhopInternal(StencilOdd, Aeven, in, out, dag);
}
void MooeeInternal(const CoarseVector &in, CoarseVector &out, int dag, int inv) {
out.Checkerboard() = in.Checkerboard();
assert(in.Checkerboard() == Odd || in.Checkerboard() == Even);
CoarseMatrix *Aself = nullptr;
if(in.Grid()->_isCheckerBoarded) {
if(in.Checkerboard() == Odd) {
Aself = (inv) ? &AselfInvOdd : &Aodd[geom.npoint-1];
DselfInternal(StencilOdd, *Aself, in, out, dag);
} else {
Aself = (inv) ? &AselfInvEven : &Aeven[geom.npoint-1];
DselfInternal(StencilEven, *Aself, in, out, dag);
}
} else {
Aself = (inv) ? &AselfInv : &A[geom.npoint-1];
DselfInternal(Stencil, *Aself, in, out, dag);
}
assert(Aself != nullptr);
}
void DselfInternal(CartesianStencil<siteVector,siteVector,int> &st, CoarseMatrix &a,
const CoarseVector &in, CoarseVector &out, int dag) {
int point = geom.npoint-1;
autoView( out_v, out, AcceleratorWrite);
autoView( in_v, in, AcceleratorRead);
autoView( st_v, st, AcceleratorRead);
autoView( a_v, a, AcceleratorRead);
const int Nsimd = CComplex::Nsimd();
typedef decltype(coalescedRead(in_v[0])) calcVector;
typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
RealD* dag_factor_p = &dag_factor[0];
if(dag) {
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
int ss = sss/nbasis;
int b = sss%nbasis;
calcComplex res = Zero();
calcVector nbr;
int ptype;
StencilEntry *SE;
SE=st_v.GetEntry(ptype,point,ss);
if(SE->_is_local) {
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
} else {
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
}
acceleratorSynchronise();
for(int bb=0;bb<nbasis;bb++) {
res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(a_v[ss](b,bb))*nbr(bb);
}
coalescedWrite(out_v[ss](b),res);
});
} else {
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
int ss = sss/nbasis;
int b = sss%nbasis;
calcComplex res = Zero();
calcVector nbr;
int ptype;
StencilEntry *SE;
SE=st_v.GetEntry(ptype,point,ss);
if(SE->_is_local) {
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
} else {
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
}
acceleratorSynchronise();
for(int bb=0;bb<nbasis;bb++) {
res = res + coalescedRead(a_v[ss](b,bb))*nbr(bb);
}
coalescedWrite(out_v[ss](b),res);
});
}
}
void DhopInternal(CartesianStencil<siteVector,siteVector,int> &st, std::vector<CoarseMatrix> &a,
const CoarseVector &in, CoarseVector &out, int dag) {
SimpleCompressor<siteVector> compressor;
st.HaloExchange(in,compressor);
autoView( in_v, in, AcceleratorRead);
autoView( out_v, out, AcceleratorWrite);
autoView( st_v , st, AcceleratorRead);
typedef LatticeView<Cobj> Aview;
// determine in what order we need the points
int npoint = geom.npoint-1;
Vector<int> points(npoint, 0);
for(int p=0; p<npoint; p++)
points[p] = (dag && !hermitian) ? geom.points_dagger[p] : p;
Vector<Aview> AcceleratorViewContainer;
for(int p=0;p<npoint;p++) AcceleratorViewContainer.push_back(a[p].View(AcceleratorRead));
Aview *Aview_p = & AcceleratorViewContainer[0];
const int Nsimd = CComplex::Nsimd();
typedef decltype(coalescedRead(in_v[0])) calcVector;
typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
RealD* dag_factor_p = &dag_factor[0];
if(dag) {
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
int ss = sss/nbasis;
int b = sss%nbasis;
calcComplex res = Zero();
calcVector nbr;
int ptype;
StencilEntry *SE;
for(int p=0;p<npoint;p++){
int point = points[p];
SE=st_v.GetEntry(ptype,point,ss);
if(SE->_is_local) {
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
} else {
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
}
acceleratorSynchronise();
for(int bb=0;bb<nbasis;bb++) {
res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
}
}
coalescedWrite(out_v[ss](b),res);
});
} else {
accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
int ss = sss/nbasis;
int b = sss%nbasis;
calcComplex res = Zero();
calcVector nbr;
int ptype;
StencilEntry *SE;
for(int p=0;p<npoint;p++){
int point = points[p];
SE=st_v.GetEntry(ptype,point,ss);
if(SE->_is_local) {
nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
} else {
nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
}
acceleratorSynchronise();
for(int bb=0;bb<nbasis;bb++) {
res = res + coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
}
}
coalescedWrite(out_v[ss](b),res);
});
}
for(int p=0;p<npoint;p++) AcceleratorViewContainer[p].ViewClose();
}
CoarsenedMatrix(GridCartesian &CoarseGrid, int hermitian_=0) :
CoarsenedMatrix(GridCartesian &CoarseGrid, int hermitian_=0) :
_grid(&CoarseGrid),
_cbgrid(new GridRedBlackCartesian(&CoarseGrid)),
geom(CoarseGrid._ndimension),
hermitian(hermitian_),
Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
StencilEven(_cbgrid,geom.npoint,Even,geom.directions,geom.displacements,0),
StencilOdd(_cbgrid,geom.npoint,Odd,geom.directions,geom.displacements,0),
A(geom.npoint,&CoarseGrid),
Aeven(geom.npoint,_cbgrid),
Aodd(geom.npoint,_cbgrid),
AselfInv(&CoarseGrid),
AselfInvEven(_cbgrid),
AselfInvOdd(_cbgrid),
dag_factor(nbasis*nbasis)
A(geom.npoint,&CoarseGrid)
{
fillFactor();
};
CoarsenedMatrix(GridCartesian &CoarseGrid, GridRedBlackCartesian &CoarseRBGrid, int hermitian_=0) :
_grid(&CoarseGrid),
_cbgrid(&CoarseRBGrid),
geom(CoarseGrid._ndimension),
hermitian(hermitian_),
Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
StencilEven(&CoarseRBGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
StencilOdd(&CoarseRBGrid,geom.npoint,Odd,geom.directions,geom.displacements,0),
A(geom.npoint,&CoarseGrid),
Aeven(geom.npoint,&CoarseRBGrid),
Aodd(geom.npoint,&CoarseRBGrid),
AselfInv(&CoarseGrid),
AselfInvEven(&CoarseRBGrid),
AselfInvOdd(&CoarseRBGrid),
dag_factor(nbasis*nbasis)
{
fillFactor();
};
void fillFactor() {
Eigen::MatrixXd dag_factor_eigen = Eigen::MatrixXd::Ones(nbasis, nbasis);
if(!hermitian) {
const int nb = nbasis/2;
dag_factor_eigen.block(0,nb,nb,nb) *= -1.0;
dag_factor_eigen.block(nb,0,nb,nb) *= -1.0;
}
// GPU readable prefactor
thread_for(i, nbasis*nbasis, {
int j = i/nbasis;
int k = i%nbasis;
dag_factor[i] = dag_factor_eigen(j, k);
});
}
void CoarsenOperator(GridBase *FineGrid,LinearOperatorBase<Lattice<Fobj> > &linop,
Aggregation<Fobj,CComplex,nbasis> & Subspace)
{
typedef Lattice<typename Fobj::tensor_reduced> FineComplexField;
typedef typename Fobj::scalar_type scalar_type;
std::cout << GridLogMessage<< "CoarsenMatrix "<< std::endl;
FineComplexField one(FineGrid); one=scalar_type(1.0,0.0);
FineComplexField zero(FineGrid); zero=scalar_type(0.0,0.0);
@ -868,13 +496,11 @@ public:
CoarseScalar InnerProd(Grid());
std::cout << GridLogMessage<< "CoarsenMatrix Orthog "<< std::endl;
// Orthogonalise the subblocks over the basis
blockOrthogonalise(InnerProd,Subspace.subspace);
// Compute the matrix elements of linop between this orthonormal
// set of vectors.
std::cout << GridLogMessage<< "CoarsenMatrix masks "<< std::endl;
int self_stencil=-1;
for(int p=0;p<geom.npoint;p++)
{
@ -913,7 +539,7 @@ public:
phi=Subspace.subspace[i];
std::cout << GridLogMessage<< "CoarsenMatrix vector "<<i << std::endl;
// std::cout << GridLogMessage<< "CoarsenMatrix vector "<<i << std::endl;
linop.OpDirAll(phi,Mphi_p);
linop.OpDiag (phi,Mphi_p[geom.npoint-1]);
@ -942,18 +568,6 @@ public:
autoView( A_self , A[self_stencil], AcceleratorWrite);
accelerator_for(ss, Grid()->oSites(), Fobj::Nsimd(),{ coalescedWrite(A_p[ss](j,i),oZProj_v(ss)); });
if ( hermitian && (disp==-1) ) {
for(int pp=0;pp<geom.npoint;pp++){// Find the opposite link and set <j|A|i> = <i|A|j>*
int dirp = geom.directions[pp];
int dispp = geom.displacements[pp];
if ( (dirp==dir) && (dispp==1) ){
auto sft = conjugate(Cshift(oZProj,dir,1));
autoView( sft_v , sft , AcceleratorWrite);
autoView( A_pp , A[pp], AcceleratorWrite);
accelerator_for(ss, Grid()->oSites(), Fobj::Nsimd(),{ coalescedWrite(A_pp[ss](i,j),sft_v(ss)); });
}
}
}
}
}
@ -992,54 +606,28 @@ public:
}
if(hermitian) {
std::cout << GridLogMessage << " ForceHermitian, new code "<<std::endl;
ForceHermitian();
}
InvertSelfStencilLink(); std::cout << GridLogMessage << "Coarse self link inverted" << std::endl;
FillHalfCbs(); std::cout << GridLogMessage << "Coarse half checkerboards filled" << std::endl;
}
void InvertSelfStencilLink() {
std::cout << GridLogDebug << "CoarsenedMatrix::InvertSelfStencilLink" << std::endl;
int localVolume = Grid()->lSites();
typedef typename Cobj::scalar_object scalar_object;
autoView(Aself_v, A[geom.npoint-1], CpuRead);
autoView(AselfInv_v, AselfInv, CpuWrite);
thread_for(site, localVolume, { // NOTE: Not able to bring this to GPU because of Eigen + peek/poke
Eigen::MatrixXcd selfLinkEigen = Eigen::MatrixXcd::Zero(nbasis, nbasis);
Eigen::MatrixXcd selfLinkInvEigen = Eigen::MatrixXcd::Zero(nbasis, nbasis);
scalar_object selfLink = Zero();
scalar_object selfLinkInv = Zero();
Coordinate lcoor;
Grid()->LocalIndexToLocalCoor(site, lcoor);
peekLocalSite(selfLink, Aself_v, lcoor);
for (int i = 0; i < nbasis; ++i)
for (int j = 0; j < nbasis; ++j)
selfLinkEigen(i, j) = static_cast<ComplexD>(TensorRemove(selfLink(i, j)));
selfLinkInvEigen = selfLinkEigen.inverse();
for(int i = 0; i < nbasis; ++i)
for(int j = 0; j < nbasis; ++j)
selfLinkInv(i, j) = selfLinkInvEigen(i, j);
pokeLocalSite(selfLinkInv, AselfInv_v, lcoor);
});
}
void FillHalfCbs() {
std::cout << GridLogDebug << "CoarsenedMatrix::FillHalfCbs" << std::endl;
for(int p = 0; p < geom.npoint; ++p) {
pickCheckerboard(Even, Aeven[p], A[p]);
pickCheckerboard(Odd, Aodd[p], A[p]);
void ForceHermitian(void) {
CoarseMatrix Diff (Grid());
for(int p=0;p<geom.npoint;p++){
int dir = geom.directions[p];
int disp = geom.displacements[p];
if(disp==-1) {
// Find the opposite link
for(int pp=0;pp<geom.npoint;pp++){
int dirp = geom.directions[pp];
int dispp = geom.displacements[pp];
if ( (dirp==dir) && (dispp==1) ){
// Diff = adj(Cshift(A[p],dir,1)) - A[pp];
// std::cout << GridLogMessage<<" Replacing stencil leg "<<pp<<" with leg "<<p<< " diff "<<norm2(Diff) <<std::endl;
A[pp] = adj(Cshift(A[p],dir,1));
}
}
}
}
pickCheckerboard(Even, AselfInvEven, AselfInv);
pickCheckerboard(Odd, AselfInvOdd, AselfInv);
}
};

View File

@ -0,0 +1,67 @@
#include <Grid/GridCore.h>
#include <fcntl.h>
NAMESPACE_BEGIN(Grid);
MemoryStats *MemoryProfiler::stats = nullptr;
bool MemoryProfiler::debug = false;
void check_huge_pages(void *Buf,uint64_t BYTES)
{
#ifdef __linux__
int fd = open("/proc/self/pagemap", O_RDONLY);
assert(fd >= 0);
const int page_size = 4096;
uint64_t virt_pfn = (uint64_t)Buf / page_size;
off_t offset = sizeof(uint64_t) * virt_pfn;
uint64_t npages = (BYTES + page_size-1) / page_size;
uint64_t pagedata[npages];
uint64_t ret = lseek(fd, offset, SEEK_SET);
assert(ret == offset);
ret = ::read(fd, pagedata, sizeof(uint64_t)*npages);
assert(ret == sizeof(uint64_t) * npages);
int nhugepages = npages / 512;
int n4ktotal, nnothuge;
n4ktotal = 0;
nnothuge = 0;
for (int i = 0; i < nhugepages; ++i) {
uint64_t baseaddr = (pagedata[i*512] & 0x7fffffffffffffULL) * page_size;
for (int j = 0; j < 512; ++j) {
uint64_t pageaddr = (pagedata[i*512+j] & 0x7fffffffffffffULL) * page_size;
++n4ktotal;
if (pageaddr != baseaddr + j * page_size)
++nnothuge;
}
}
int rank = CartesianCommunicator::RankWorld();
printf("rank %d Allocated %d 4k pages, %d not in huge pages\n", rank, n4ktotal, nnothuge);
#endif
}
std::string sizeString(const size_t bytes)
{
constexpr unsigned int bufSize = 256;
const char *suffixes[7] = {"", "K", "M", "G", "T", "P", "E"};
char buf[256];
size_t s = 0;
double count = bytes;
while (count >= 1024 && s < 7)
{
s++;
count /= 1024;
}
if (count - floor(count) == 0.0)
{
snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]);
}
else
{
snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]);
}
return std::string(buf);
}
NAMESPACE_END(Grid);

View File

@ -173,8 +173,7 @@ template<class T> using cshiftAllocator = devAllocator<T>;
template<class T> using cshiftAllocator = std::allocator<T>;
#endif
template<class T> using Vector = std::vector<T,uvmAllocator<T> >;
template<class T> using stencilVector = std::vector<T,alignedAllocator<T> >;
template<class T> using Vector = std::vector<T,uvmAllocator<T> >;
template<class T> using commVector = std::vector<T,devAllocator<T> >;
template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >;

View File

@ -34,6 +34,8 @@ NAMESPACE_BEGIN(Grid);
// Move control to configure.ac and Config.h?
#define ALLOCATION_CACHE
#define GRID_ALLOC_ALIGN (2*1024*1024)
#define GRID_ALLOC_SMALL_LIMIT (4096)
/*Pinning pages is costly*/

View File

@ -1,12 +1,11 @@
#include <Grid/GridCore.h>
#ifndef GRID_UVM
#warning "Using explicit device memory copies"
NAMESPACE_BEGIN(Grid);
//define dprintf(...) printf ( __VA_ARGS__ ); fflush(stdout);
#define dprintf(...)
////////////////////////////////////////////////////////////
// For caching copies of data on device
////////////////////////////////////////////////////////////
@ -104,7 +103,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
///////////////////////////////////////////////////////////
assert(AccCache.state!=Empty);
dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
// dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
assert(AccCache.accLock==0);
assert(AccCache.cpuLock==0);
assert(AccCache.CpuPtr!=(uint64_t)NULL);
@ -112,7 +111,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
DeviceBytes -=AccCache.bytes;
LRUremove(AccCache);
dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
// dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
}
uint64_t CpuPtr = AccCache.CpuPtr;
EntryErase(CpuPtr);
@ -126,7 +125,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
///////////////////////////////////////////////////////////////////////////
assert(AccCache.state!=Empty);
dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
// dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
assert(AccCache.accLock==0);
assert(AccCache.cpuLock==0);
if(AccCache.state==AccDirty) {
@ -137,7 +136,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
DeviceBytes -=AccCache.bytes;
LRUremove(AccCache);
dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
// dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
}
uint64_t CpuPtr = AccCache.CpuPtr;
EntryErase(CpuPtr);
@ -150,7 +149,7 @@ void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
assert(AccCache.AccPtr!=(uint64_t)NULL);
assert(AccCache.CpuPtr!=(uint64_t)NULL);
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
dprintf("MemoryManager: Flush %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
// dprintf("MemoryManager: Flush %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
DeviceToHostBytes+=AccCache.bytes;
DeviceToHostXfer++;
AccCache.state=Consistent;
@ -165,7 +164,7 @@ void MemoryManager::Clone(AcceleratorViewEntry &AccCache)
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
DeviceBytes+=AccCache.bytes;
}
dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
// dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
HostToDeviceBytes+=AccCache.bytes;
HostToDeviceXfer++;
@ -228,24 +227,18 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
// Find if present, otherwise get or force an empty
////////////////////////////////////////////////////////////////////////////
if ( EntryPresent(CpuPtr)==0 ){
EvictVictims(bytes);
EntryCreate(CpuPtr,bytes,mode,hint);
}
auto AccCacheIterator = EntryLookup(CpuPtr);
auto & AccCache = AccCacheIterator->second;
if (!AccCache.AccPtr) {
EvictVictims(bytes);
}
assert((mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard));
assert(AccCache.cpuLock==0); // Programming error
if(AccCache.state!=Empty) {
dprintf("ViewOpen found entry %llx %llx : %lld %lld\n",
(uint64_t)AccCache.CpuPtr,
(uint64_t)CpuPtr,
(uint64_t)AccCache.bytes,
(uint64_t)bytes);
assert(AccCache.CpuPtr == CpuPtr);
assert(AccCache.bytes ==bytes);
}
@ -292,21 +285,21 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
}
AccCache.accLock++;
dprintf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
// printf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
} else if(AccCache.state==Consistent) {
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
else
AccCache.state = Consistent; // Consistent + AccRead => Consistent
AccCache.accLock++;
dprintf("Consistent entry into device accLock %d\n",AccCache.accLock);
// printf("Consistent entry into device accLock %d\n",AccCache.accLock);
} else if(AccCache.state==AccDirty) {
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
else
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
AccCache.accLock++;
dprintf("AccDirty entry into device accLock %d\n",AccCache.accLock);
// printf("AccDirty entry into device accLock %d\n",AccCache.accLock);
} else {
assert(0);
}
@ -368,16 +361,13 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V
// Find if present, otherwise get or force an empty
////////////////////////////////////////////////////////////////////////////
if ( EntryPresent(CpuPtr)==0 ){
EvictVictims(bytes);
EntryCreate(CpuPtr,bytes,mode,transient);
}
auto AccCacheIterator = EntryLookup(CpuPtr);
auto & AccCache = AccCacheIterator->second;
if (!AccCache.AccPtr) {
EvictVictims(bytes);
}
assert((mode==CpuRead)||(mode==CpuWrite));
assert(AccCache.accLock==0); // Programming error

View File

@ -1,6 +1,7 @@
#include <Grid/GridCore.h>
#ifdef GRID_UVM
#warning "Grid is assuming unified virtual memory address space"
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////////////////////////
// View management is 1:1 address space mapping

View File

@ -102,7 +102,7 @@ public:
///////////////////////////////////////////////////
static void SharedMemoryAllocate(uint64_t bytes, int flags);
static void SharedMemoryFree(void);
static void SharedMemoryCopy(void *dest,void *src,size_t bytes);
static void SharedMemoryCopy(void *dest,const void *src,size_t bytes);
static void SharedMemoryZero(void *dest,size_t bytes);
};

View File

@ -666,6 +666,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
#endif
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0);
// std::cout << "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< size<< "bytes)"<<std::endl;
if ( ptr == (void * )MAP_FAILED ) {
perror("failed mmap");
assert(0);
@ -715,7 +716,7 @@ void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
bzero(dest,bytes);
#endif
}
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
void GlobalSharedMemory::SharedMemoryCopy(void *dest,const void *src,size_t bytes)
{
#ifdef GRID_CUDA
cudaMemcpy(dest,src,bytes,cudaMemcpyDefault);
@ -771,13 +772,11 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
std::vector<int> ranks(size); for(int r=0;r<size;r++) ranks[r]=r;
MPI_Group_translate_ranks (FullGroup,size,&ranks[0],ShmGroup, &ShmRanks[0]);
#ifdef GRID_SHM_FORCE_MPI
#ifdef GRID_SHM_DISABLE
// Hide the shared memory path between ranks
{
for(int r=0;r<size;r++){
if ( r!=rank ) {
ShmRanks[r] = MPI_UNDEFINED;
}
ShmRanks[r] = MPI_UNDEFINED;
}
}
#endif

View File

@ -29,7 +29,6 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/GridCore.h>
NAMESPACE_BEGIN(Grid);
#define header "SharedMemoryNone: "
/*Construct from an MPI communicator*/
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
@ -56,38 +55,6 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
////////////////////////////////////////////////////////////////////////////////////////////
// Hugetlbfs mapping intended, use anonymous mmap
////////////////////////////////////////////////////////////////////////////////////////////
#if 1
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
std::cout << header "SharedMemoryAllocate "<< bytes<< " GPU implementation "<<std::endl;
void * ShmCommBuf ;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Each MPI rank should allocate our own buffer
///////////////////////////////////////////////////////////////////////////////////////////////////////////
ShmCommBuf = acceleratorAllocDevice(bytes);
if (ShmCommBuf == (void *)NULL ) {
std::cerr << " SharedMemoryNone.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
exit(EXIT_FAILURE);
}
if ( WorldRank == 0 ){
std::cout << WorldRank << header " SharedMemoryNone.cc acceleratorAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
}
SharedMemoryZero(ShmCommBuf,bytes);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Loop over ranks/gpu's on our node
///////////////////////////////////////////////////////////////////////////////////////////////////////////
WorldShmCommBufs[0] = ShmCommBuf;
_ShmAllocBytes=bytes;
_ShmAlloc=1;
}
#else
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
void * ShmCommBuf ;
@ -116,15 +83,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
_ShmAllocBytes=bytes;
_ShmAlloc=1;
};
#endif
void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
{
acceleratorMemSet(dest,0,bytes);
}
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
{
acceleratorCopyToDevice(src,dest,bytes);
}
////////////////////////////////////////////////////////
// Global shared functionality finished
// Now move to per communicator functionality

View File

@ -36,7 +36,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/lattice/Lattice_local.h>
#include <Grid/lattice/Lattice_reduction.h>
#include <Grid/lattice/Lattice_peekpoke.h>
#include <Grid/lattice/Lattice_reality.h>
//#include <Grid/lattice/Lattice_reality.h>
#include <Grid/lattice/Lattice_real_imag.h>
#include <Grid/lattice/Lattice_comparison_utils.h>
#include <Grid/lattice/Lattice_comparison.h>

View File

@ -342,14 +342,19 @@ inline void ExpressionViewClose(LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
GridUnopClass(UnarySub, -a);
GridUnopClass(UnaryNot, Not(a));
GridUnopClass(UnaryAdj, adj(a));
GridUnopClass(UnaryConj, conjugate(a));
GridUnopClass(UnaryTrace, trace(a));
GridUnopClass(UnaryTranspose, transpose(a));
GridUnopClass(UnaryTa, Ta(a));
GridUnopClass(UnaryProjectOnGroup, ProjectOnGroup(a));
GridUnopClass(UnaryToReal, toReal(a));
GridUnopClass(UnaryToComplex, toComplex(a));
GridUnopClass(UnaryTimesI, timesI(a));
GridUnopClass(UnaryTimesMinusI, timesMinusI(a));
GridUnopClass(UnaryAbs, abs(a));
GridUnopClass(UnarySqrt, sqrt(a));
GridUnopClass(UnaryRsqrt, rsqrt(a));
GridUnopClass(UnarySin, sin(a));
GridUnopClass(UnaryCos, cos(a));
GridUnopClass(UnaryAsin, asin(a));
@ -451,17 +456,20 @@ GridTrinOpClass(TrinaryWhere,
GRID_DEF_UNOP(operator-, UnarySub);
GRID_DEF_UNOP(Not, UnaryNot);
GRID_DEF_UNOP(operator!, UnaryNot);
//GRID_DEF_UNOP(adj, UnaryAdj);
//GRID_DEF_UNOP(conjugate, UnaryConj);
GRID_DEF_UNOP(adj, UnaryAdj);
GRID_DEF_UNOP(conjugate, UnaryConj);
GRID_DEF_UNOP(trace, UnaryTrace);
GRID_DEF_UNOP(transpose, UnaryTranspose);
GRID_DEF_UNOP(Ta, UnaryTa);
GRID_DEF_UNOP(ProjectOnGroup, UnaryProjectOnGroup);
GRID_DEF_UNOP(toReal, UnaryToReal);
GRID_DEF_UNOP(toComplex, UnaryToComplex);
GRID_DEF_UNOP(timesI, UnaryTimesI);
GRID_DEF_UNOP(timesMinusI, UnaryTimesMinusI);
GRID_DEF_UNOP(abs, UnaryAbs); // abs overloaded in cmath C++98; DON'T do the
// abs-fabs-dabs-labs thing
GRID_DEF_UNOP(sqrt, UnarySqrt);
GRID_DEF_UNOP(rsqrt, UnaryRsqrt);
GRID_DEF_UNOP(sin, UnarySin);
GRID_DEF_UNOP(cos, UnaryCos);
GRID_DEF_UNOP(asin, UnaryAsin);
@ -486,27 +494,27 @@ GRID_DEF_TRINOP(where, TrinaryWhere);
/////////////////////////////////////////////////////////////
template <class Op, class T1>
auto closure(const LatticeUnaryExpression<Op, T1> &expr)
-> Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1)))>::type >
-> Lattice<decltype(expr.op.func(vecEval(0, expr.arg1)))>
{
Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1)))>::type > ret(expr);
Lattice<decltype(expr.op.func(vecEval(0, expr.arg1)))> ret(expr);
return ret;
}
template <class Op, class T1, class T2>
auto closure(const LatticeBinaryExpression<Op, T1, T2> &expr)
-> Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),vecEval(0, expr.arg2)))>::type >
-> Lattice<decltype(expr.op.func(vecEval(0, expr.arg1),vecEval(0, expr.arg2)))>
{
Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),vecEval(0, expr.arg2)))>::type > ret(expr);
Lattice<decltype(expr.op.func(vecEval(0, expr.arg1),vecEval(0, expr.arg2)))> ret(expr);
return ret;
}
template <class Op, class T1, class T2, class T3>
auto closure(const LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
-> Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),
-> Lattice<decltype(expr.op.func(vecEval(0, expr.arg1),
vecEval(0, expr.arg2),
vecEval(0, expr.arg3)))>::type >
vecEval(0, expr.arg3)))>
{
Lattice<typename std::remove_const<decltype(expr.op.func(vecEval(0, expr.arg1),
Lattice<decltype(expr.op.func(vecEval(0, expr.arg1),
vecEval(0, expr.arg2),
vecEval(0, expr.arg3)))>::type > ret(expr);
vecEval(0, expr.arg3)))> ret(expr);
return ret;
}
#define EXPRESSION_CLOSURE(function) \

View File

@ -62,7 +62,7 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
basis_v.push_back(basis[k].View(AcceleratorWrite));
}
#if ( (!defined(GRID_CUDA)) )
#if ( (!defined(GRID_SYCL)) && (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) )
int max_threads = thread_max();
Vector < vobj > Bt(Nm * max_threads);
thread_region
@ -161,13 +161,11 @@ void basisRotateJ(Field &result,std::vector<Field> &basis,Eigen::MatrixXd& Qt,in
double * Qt_j = & Qt_jv[0];
for(int k=0;k<Nm;++k) Qt_j[k]=Qt(j,k);
auto basis_vp=& basis_v[0];
autoView(result_v,result,AcceleratorWrite);
accelerator_for(ss, grid->oSites(),vobj::Nsimd(),{
vobj zzz=Zero();
auto B=coalescedRead(zzz);
auto B=coalescedRead(zz);
for(int k=k0; k<k1; ++k){
B +=Qt_j[k] * coalescedRead(basis_vp[k][ss]);
B +=Qt_j[k] * coalescedRead(basis_v[k][ss]);
}
coalescedWrite(result_v[ss], B);
});

View File

@ -45,8 +45,8 @@ template<class vobj> inline Lattice<vobj> adj(const Lattice<vobj> &lhs){
autoView( ret_v, ret, AcceleratorWrite);
ret.Checkerboard()=lhs.Checkerboard();
accelerator_for( ss, lhs_v.size(), 1, {
ret_v[ss] = adj(lhs_v[ss]);
accelerator_for( ss, lhs_v.size(), vobj::Nsimd(), {
coalescedWrite(ret_v[ss], adj(lhs_v(ss)));
});
return ret;
};
@ -64,53 +64,6 @@ template<class vobj> inline Lattice<vobj> conjugate(const Lattice<vobj> &lhs){
return ret;
};
template<class vobj> inline Lattice<typename vobj::Complexified> toComplex(const Lattice<vobj> &lhs){
Lattice<typename vobj::Complexified> ret(lhs.Grid());
autoView( lhs_v, lhs, AcceleratorRead);
autoView( ret_v, ret, AcceleratorWrite);
ret.Checkerboard() = lhs.Checkerboard();
accelerator_for( ss, lhs_v.size(), 1, {
ret_v[ss] = toComplex(lhs_v[ss]);
});
return ret;
};
template<class vobj> inline Lattice<typename vobj::Realified> toReal(const Lattice<vobj> &lhs){
Lattice<typename vobj::Realified> ret(lhs.Grid());
autoView( lhs_v, lhs, AcceleratorRead);
autoView( ret_v, ret, AcceleratorWrite);
ret.Checkerboard() = lhs.Checkerboard();
accelerator_for( ss, lhs_v.size(), 1, {
ret_v[ss] = toReal(lhs_v[ss]);
});
return ret;
};
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
auto toComplex(const Expression &expr) -> decltype(closure(expr))
{
return toComplex(closure(expr));
}
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
auto toReal(const Expression &expr) -> decltype(closure(expr))
{
return toReal(closure(expr));
}
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
auto adj(const Expression &expr) -> decltype(closure(expr))
{
return adj(closure(expr));
}
template<class Expression,typename std::enable_if<is_lattice_expr<Expression>::value,void>::type * = nullptr>
auto conjugate(const Expression &expr) -> decltype(closure(expr))
{
return conjugate(closure(expr));
}
NAMESPACE_END(Grid);
#endif

View File

@ -127,11 +127,6 @@ accelerator_inline void convertType(T1 & out, const iScalar<T2> & in) {
convertType(out,in._internal);
}
template<typename T1, typename std::enable_if<!isGridScalar<T1>::value, T1>::type* = nullptr>
accelerator_inline void convertType(T1 & out, const iScalar<T1> & in) {
convertType(out,in._internal);
}
template<typename T1,typename T2>
accelerator_inline void convertType(iScalar<T1> & out, const T2 & in) {
convertType(out._internal,in);

View File

@ -123,7 +123,7 @@ assert(GRID_FIELD_NORM_CALC(FieldNormMetaData_, n2ck) < 1.0e-5);
////////////////////////////////////////////////////////////
// Helper to fill out metadata
////////////////////////////////////////////////////////////
template<class vobj> void ScidacMetaData(Lattice<vobj> & field,
template<class vobj> void ScidacMetaData(Lattice<vobj> & field,
FieldMetaData &header,
scidacRecord & _scidacRecord,
scidacFile & _scidacFile)
@ -619,12 +619,12 @@ class IldgWriter : public ScidacWriter {
// Don't require scidac records EXCEPT checksum
// Use Grid MetaData object if present.
////////////////////////////////////////////////////////////////
template <class stats = PeriodicGaugeStatistics>
void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,int sequence,std::string LFN,std::string description)
template <class vsimd>
void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,int sequence,std::string LFN,std::string description)
{
GridBase * grid = Umu.Grid();
typedef Lattice<vLorentzColourMatrixD> GaugeField;
typedef vLorentzColourMatrixD vobj;
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
typedef iLorentzColourMatrix<vsimd> vobj;
typedef typename vobj::scalar_object sobj;
////////////////////////////////////////
@ -636,9 +636,6 @@ class IldgWriter : public ScidacWriter {
ScidacMetaData(Umu,header,_scidacRecord,_scidacFile);
stats Stats;
Stats(Umu,header);
std::string format = header.floating_point;
header.ensemble_id = description;
header.ensemble_label = description;
@ -708,10 +705,10 @@ class IldgReader : public GridLimeReader {
// Else use ILDG MetaData object if present.
// Else use SciDAC MetaData object if present.
////////////////////////////////////////////////////////////////
template <class stats = PeriodicGaugeStatistics>
void readConfiguration(Lattice<vLorentzColourMatrixD> &Umu, FieldMetaData &FieldMetaData_) {
template <class vsimd>
void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, FieldMetaData &FieldMetaData_) {
typedef Lattice<vLorentzColourMatrixD > GaugeField;
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
typedef typename GaugeField::vector_object vobj;
typedef typename vobj::scalar_object sobj;
@ -924,8 +921,7 @@ class IldgReader : public GridLimeReader {
if ( found_FieldMetaData || found_usqcdInfo ) {
FieldMetaData checker;
stats Stats;
Stats(Umu,checker);
GaugeStatistics(Umu,checker);
assert(fabs(checker.plaquette - FieldMetaData_.plaquette )<1.0e-5);
assert(fabs(checker.link_trace - FieldMetaData_.link_trace)<1.0e-5);
std::cout << GridLogMessage<<"Plaquette and link trace match " << std::endl;

View File

@ -176,18 +176,29 @@ template<class vobj> inline void PrepareMetaData(Lattice<vobj> & field, FieldMet
GridMetaData(grid,header);
MachineCharacteristics(header);
}
template<class Impl>
class GaugeStatistics
inline void GaugeStatistics(Lattice<vLorentzColourMatrixF> & data,FieldMetaData &header)
{
public:
void operator()(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header)
{
header.link_trace=WilsonLoops<Impl>::linkTrace(data);
header.plaquette =WilsonLoops<Impl>::avgPlaquette(data);
}
};
typedef GaugeStatistics<PeriodicGimplD> PeriodicGaugeStatistics;
typedef GaugeStatistics<ConjugateGimplD> ConjugateGaugeStatistics;
// How to convert data precision etc...
header.link_trace=WilsonLoops<PeriodicGimplF>::linkTrace(data);
header.plaquette =WilsonLoops<PeriodicGimplF>::avgPlaquette(data);
}
inline void GaugeStatistics(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header)
{
// How to convert data precision etc...
header.link_trace=WilsonLoops<PeriodicGimplD>::linkTrace(data);
header.plaquette =WilsonLoops<PeriodicGimplD>::avgPlaquette(data);
}
template<> inline void PrepareMetaData<vLorentzColourMatrixF>(Lattice<vLorentzColourMatrixF> & field, FieldMetaData &header)
{
GridBase *grid = field.Grid();
std::string format = getFormatString<vLorentzColourMatrixF>();
header.floating_point = format;
header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac
GridMetaData(grid,header);
GaugeStatistics(field,header);
MachineCharacteristics(header);
}
template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzColourMatrixD> & field, FieldMetaData &header)
{
GridBase *grid = field.Grid();
@ -195,6 +206,7 @@ template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzCo
header.floating_point = format;
header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac
GridMetaData(grid,header);
GaugeStatistics(field,header);
MachineCharacteristics(header);
}

View File

@ -40,8 +40,6 @@ using namespace Grid;
class NerscIO : public BinaryIO {
public:
typedef Lattice<vLorentzColourMatrixD> GaugeField;
static inline void truncate(std::string file){
std::ofstream fout(file,std::ios::out);
}
@ -131,12 +129,12 @@ public:
// Now the meat: the object readers
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<class GaugeStats=PeriodicGaugeStatistics>
static inline void readConfiguration(GaugeField &Umu,
template<class vsimd>
static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
FieldMetaData& header,
std::string file,
GaugeStats GaugeStatisticsCalculator=GaugeStats())
std::string file)
{
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
GridBase *grid = Umu.Grid();
uint64_t offset = readHeader(file,Umu.Grid(),header);
@ -155,23 +153,23 @@ public:
// munger is a function of <floating point, Real, data_type>
if ( header.data_type == std::string("4D_SU3_GAUGE") ) {
if ( ieee32 || ieee32big ) {
BinaryIO::readLatticeObject<vLorentzColourMatrixD, LorentzColour2x3F>
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
(Umu,file,Gauge3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format,
nersc_csum,scidac_csuma,scidac_csumb);
}
if ( ieee64 || ieee64big ) {
BinaryIO::readLatticeObject<vLorentzColourMatrixD, LorentzColour2x3D>
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
(Umu,file,Gauge3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format,
nersc_csum,scidac_csuma,scidac_csumb);
}
} else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) {
if ( ieee32 || ieee32big ) {
BinaryIO::readLatticeObject<vLorentzColourMatrixD,LorentzColourMatrixF>
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
(Umu,file,GaugeSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format,
nersc_csum,scidac_csuma,scidac_csumb);
}
if ( ieee64 || ieee64big ) {
BinaryIO::readLatticeObject<vLorentzColourMatrixD,LorentzColourMatrixD>
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
(Umu,file,GaugeSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format,
nersc_csum,scidac_csuma,scidac_csumb);
}
@ -179,7 +177,7 @@ public:
assert(0);
}
GaugeStats Stats; Stats(Umu,clone);
GaugeStatistics(Umu,clone);
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<<nersc_csum<< std::dec
<<" header "<<std::hex<<header.checksum<<std::dec <<std::endl;
@ -205,13 +203,15 @@ public:
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
}
template<class GaugeStats=PeriodicGaugeStatistics>
static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,
template<class vsimd>
static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
std::string file,
int two_row,
int bits32)
{
typedef vLorentzColourMatrixD vobj;
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
typedef iLorentzColourMatrix<vsimd> vobj;
typedef typename vobj::scalar_object sobj;
FieldMetaData header;
@ -229,7 +229,7 @@ public:
GridMetaData(grid,header);
assert(header.nd==4);
GaugeStats Stats; Stats(Umu,header);
GaugeStatistics(Umu,header);
MachineCharacteristics(header);
uint64_t offset;
@ -238,19 +238,19 @@ public:
header.floating_point = std::string("IEEE64BIG");
header.data_type = std::string("4D_SU3_GAUGE_3x3");
GaugeSimpleUnmunger<fobj3D,sobj> munge;
if ( grid->IsBoss() ) {
truncate(file);
offset = writeHeader(header,file);
}
grid->Broadcast(0,(void *)&offset,sizeof(offset));
if ( grid->IsBoss() ) {
truncate(file);
offset = writeHeader(header,file);
}
grid->Broadcast(0,(void *)&offset,sizeof(offset));
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point,
nersc_csum,scidac_csuma,scidac_csumb);
header.checksum = nersc_csum;
if ( grid->IsBoss() ) {
writeHeader(header,file);
}
if ( grid->IsBoss() ) {
writeHeader(header,file);
}
std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum "
<<std::hex<<header.checksum

View File

@ -154,7 +154,7 @@ public:
grid->Barrier(); timer.Stop();
std::cout << Grid::GridLogMessage << "OpenQcdIO::readConfiguration: redistribute overhead " << timer.Elapsed() << std::endl;
PeriodicGaugeStatistics Stats; Stats(Umu, clone);
GaugeStatistics(Umu, clone);
RealD plaq_diff = fabs(clone.plaquette - header.plaquette);

View File

@ -208,7 +208,7 @@ public:
FieldMetaData clone(header);
PeriodicGaugeStatistics Stats; Stats(Umu, clone);
GaugeStatistics(Umu, clone);
RealD plaq_diff = fabs(clone.plaquette - header.plaquette);

View File

@ -80,13 +80,6 @@ template<typename T> struct isSpinor {
template <typename T> using IfSpinor = Invoke<std::enable_if< isSpinor<T>::value,int> > ;
template <typename T> using IfNotSpinor = Invoke<std::enable_if<!isSpinor<T>::value,int> > ;
const int CoarseIndex = 4;
template<typename T> struct isCoarsened {
static constexpr bool value = (CoarseIndex<=T::TensorLevel);
};
template <typename T> using IfCoarsened = Invoke<std::enable_if< isCoarsened<T>::value,int> > ;
template <typename T> using IfNotCoarsened = Invoke<std::enable_if<!isCoarsened<T>::value,int> > ;
// ChrisK very keen to add extra space for Gparity doubling.
//
// Also add domain wall index, in a way where Wilson operator

View File

@ -97,30 +97,42 @@ public:
Coordinate icoor;
#ifdef GRID_SIMT
_Spinor tmp;
const int Nsimd =SiteDoubledGaugeField::Nsimd();
int s = acceleratorSIMTlane(Nsimd);
St.iCoorFromIindex(icoor,s);
int mmu = mu % Nd;
if ( SE->_around_the_world && St.parameters.twists[mmu] ) {
int permute_lane = (sl==1)
|| ((distance== 1)&&(icoor[direction]==1))
|| ((distance==-1)&&(icoor[direction]==0));
auto UU0=coalescedRead(U(0)(mu));
auto UU1=coalescedRead(U(1)(mu));
//Decide whether we do a G-parity flavor twist
//Note: this assumes (but does not check) that sl==1 || sl==2 i.e. max 2 SIMD lanes in G-parity dir
//It also assumes (but does not check) that abs(distance) == 1
int permute_lane = (sl==1)
|| ((distance== 1)&&(icoor[direction]==1))
|| ((distance==-1)&&(icoor[direction]==0));
if ( permute_lane ) {
tmp(0) = chi(1);
tmp(1) = chi(0);
} else {
tmp(0) = chi(0);
tmp(1) = chi(1);
}
permute_lane = permute_lane && SE->_around_the_world && St.parameters.twists[mmu]; //only if we are going around the world
auto UU0=coalescedRead(U(0)(mu));
auto UU1=coalescedRead(U(1)(mu));
//Apply the links
int f_upper = permute_lane ? 1 : 0;
int f_lower = !f_upper;
mult(&phi(0),&UU0,&tmp(0));
mult(&phi(1),&UU1,&tmp(1));
mult(&phi(0),&UU0,&chi(f_upper));
mult(&phi(1),&UU1,&chi(f_lower));
} else {
auto UU0=coalescedRead(U(0)(mu));
auto UU1=coalescedRead(U(1)(mu));
mult(&phi(0),&UU0,&chi(0));
mult(&phi(1),&UU1,&chi(1));
}
#else
typedef _Spinor vobj;

View File

@ -642,7 +642,7 @@ void CayleyFermion5D<Impl>::ContractConservedCurrent( PropagatorField &q_in_1,
Current curr_type,
unsigned int mu)
{
#if (!defined(GRID_HIP))
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
Gamma::Algebra Gmu [] = {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
@ -826,7 +826,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
}
#endif
#if (!defined(GRID_HIP))
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
int tshift = (mu == Nd-1) ? 1 : 0;
////////////////////////////////////////////////
// GENERAL CAYLEY CASE

View File

@ -92,16 +92,20 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
int lvol = _Umu.Grid()->lSites();
int DimRep = Impl::Dimension;
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
Coordinate lcoor;
typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero();
{
autoView(CTv,CloverTerm,CpuRead);
autoView(CTIv,CloverTermInv,CpuWrite);
thread_for(site, lvol, {
Coordinate lcoor;
for (int site = 0; site < lvol; site++) {
grid->LocalIndexToLocalCoor(site, lcoor);
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero();
EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
peekLocalSite(Qx, CTv, lcoor);
Qxinv = Zero();
//if (csw!=0){
for (int j = 0; j < Ns; j++)
for (int k = 0; k < Ns; k++)
@ -122,21 +126,21 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
// if (site==0) std::cout << "site =" << site << "\n" << EigenInvCloverOp << std::endl;
// }
pokeLocalSite(Qxinv, CTIv, lcoor);
});
}
}
// Separate the even and odd parts
pickCheckerboard(Even, CloverTermEven, CloverTerm);
pickCheckerboard(Odd, CloverTermOdd, CloverTerm);
pickCheckerboard(Even, CloverTermDagEven, adj(CloverTerm));
pickCheckerboard(Odd, CloverTermDagOdd, adj(CloverTerm));
pickCheckerboard(Even, CloverTermDagEven, closure(adj(CloverTerm)));
pickCheckerboard(Odd, CloverTermDagOdd, closure(adj(CloverTerm)));
pickCheckerboard(Even, CloverTermInvEven, CloverTermInv);
pickCheckerboard(Odd, CloverTermInvOdd, CloverTermInv);
pickCheckerboard(Even, CloverTermInvDagEven, adj(CloverTermInv));
pickCheckerboard(Odd, CloverTermInvDagOdd, adj(CloverTermInv));
pickCheckerboard(Even, CloverTermInvDagEven, closure(adj(CloverTermInv)));
pickCheckerboard(Odd, CloverTermInvDagOdd, closure(adj(CloverTermInv)));
}
template <class Impl>

View File

@ -38,6 +38,9 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
// undefine everything related to kernels
#include <simd/Fujitsu_A64FX_undef.h>
// enable A64FX body
#define WILSONKERNELSASMBODYA64FX
//#pragma message("A64FX Dslash: WilsonKernelsAsmBodyA64FX.h")
///////////////////////////////////////////////////////////
// If we are A64FX specialise the single precision routine
@ -60,89 +63,119 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
#define INTERIOR_AND_EXTERIOR
#undef INTERIOR
#undef EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#undef INTERIOR_AND_EXTERIOR
#define INTERIOR
#undef EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#undef INTERIOR_AND_EXTERIOR
#undef INTERIOR
#define EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
/////////////////////////////////////////////////////////////////
@ -152,89 +185,119 @@ WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldV
#define INTERIOR_AND_EXTERIOR
#undef INTERIOR
#undef EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#undef INTERIOR_AND_EXTERIOR
#define INTERIOR
#undef EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#undef INTERIOR_AND_EXTERIOR
#undef INTERIOR
#define EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
// undefine
@ -267,89 +330,119 @@ WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFie
#define INTERIOR_AND_EXTERIOR
#undef INTERIOR
#undef EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#undef INTERIOR_AND_EXTERIOR
#define INTERIOR
#undef EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#undef INTERIOR_AND_EXTERIOR
#undef INTERIOR
#define EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
/////////////////////////////////////////////////////////////////
// XYZT vectorised, dag Kernel, double
@ -358,93 +451,124 @@ WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldV
#define INTERIOR_AND_EXTERIOR
#undef INTERIOR
#undef EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#undef INTERIOR_AND_EXTERIOR
#define INTERIOR
#undef EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#undef INTERIOR_AND_EXTERIOR
#undef INTERIOR
#define EXTERIOR
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<WilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
template<> void
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
#if defined (WILSONKERNELSASMBODYA64FX)
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
#else
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
#endif
// undefs
#undef WILSONKERNELSASMBODYA64FX
#include <simd/Fujitsu_A64FX_undef.h>
#endif //A64FXASM

View File

@ -25,11 +25,6 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
// GCC 10 messes up SVE instruction scheduling using -O3, but
// -O3 -fno-schedule-insns -fno-schedule-insns2 does wonders
// performance now is better than armclang 20.2
#ifdef KERNEL_DAG
#define DIR0_PROJ XP_PROJ
#define DIR1_PROJ YP_PROJ
@ -102,7 +97,7 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
PROJ; \
MAYBEPERM(PERMUTE_DIR,perm); \
} else { \
LOAD_CHI(base); \
LOAD_CHI(base); \
} \
base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++; \
MULT_2SPIN_1(Dir); \
@ -115,11 +110,6 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
} \
RECON; \
/*
NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty
though I expected that it would improve on performance
*/
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
PREFETCH1_CHIMU(base); \
@ -136,63 +126,73 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
basep = st.GetPFInfo(nent,plocal); nent++; \
if ( local ) { \
LOAD_CHIMU(base); \
LOAD_TABLE(PERMUTE_DIR); \
PROJ; \
MAYBEPERM(PERMUTE_DIR,perm); \
}else if ( st.same_node[Dir] ) {LOAD_CHI(base);} \
if ( local || st.same_node[Dir] ) { \
MULT_2SPIN_1(Dir); \
MULT_2SPIN_2; \
RECON; \
} \
base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++; \
PREFETCH_CHIMU(base); \
PREFETCH_CHIMU_L2(basep); \
if ( local ) { \
LOAD_CHIMU(base); \
LOAD_TABLE(PERMUTE_DIR); \
PROJ; \
MAYBEPERM(PERMUTE_DIR,perm); \
}else if ( st.same_node[Dir] ) {LOAD_CHI(base);} \
base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++; \
if ( local || st.same_node[Dir] ) { \
MULT_2SPIN_1(Dir); \
PREFETCH_CHIMU(base); \
/* PREFETCH_GAUGE_L1(NxtDir); */ \
MULT_2SPIN_2; \
if (s == 0) { \
if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
} \
RECON; \
PREFETCH_CHIMU_L2(basep); \
} else { PREFETCH_CHIMU(base); } \
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
PREFETCH1_CHIMU(base); \
{ ZERO_PSI; } \
ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)
#define RESULT(base,basep) SAVE_RESULT(base,basep);
#endif
////////////////////////////////////////////////////////////////////////////////
// Post comms kernel
////////////////////////////////////////////////////////////////////////////////
#ifdef EXTERIOR
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
if((!local)&&(!st.same_node[Dir]) ) { \
LOAD_CHI(base); \
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
if((!local)&&(!st.same_node[Dir]) ) { \
LOAD_CHI(base); \
MULT_2SPIN_1(Dir); \
PREFETCH_CHIMU(base); \
/* PREFETCH_GAUGE_L1(NxtDir); */ \
MULT_2SPIN_2; \
RECON; \
nmu++; \
if (s == 0) { \
if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
} \
RECON; \
nmu++; \
}
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
nmu=0; \
{ ZERO_PSI;} \
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
if((!local)&&(!st.same_node[Dir]) ) { \
LOAD_CHI(base); \
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
nmu=0; \
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;\
if((!local)&&(!st.same_node[Dir]) ) { \
LOAD_CHI(base); \
MULT_2SPIN_1(Dir); \
PREFETCH_CHIMU(base); \
/* PREFETCH_GAUGE_L1(NxtDir); */ \
MULT_2SPIN_2; \
RECON; \
nmu++; \
if (s == 0) { \
if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
} \
RECON; \
nmu++; \
}
#define RESULT(base,basep) if (nmu){ ADD_RESULT(base,base);}
#endif
{
int nmu;
int local,perm, ptype;
@ -209,6 +209,7 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty
int ssn=ssU+1; if(ssn>=nmax) ssn=0;
// int sUn=lo.Reorder(ssn);
int sUn=ssn;
LOCK_GAUGE(0);
#else
int sU =ssU;
int ssn=ssU+1; if(ssn>=nmax) ssn=0;
@ -294,11 +295,6 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty
std::cout << "----------------------------------------------------" << std::endl;
#endif
// DC ZVA test
// { uint64_t basestore = (uint64_t)&out[ss];
// PREFETCH_RESULT_L2_STORE(basestore); }
ASM_LEG(Ym,Zm,PERMUTE_DIR2,DIR5_PROJ,DIR5_RECON);
#ifdef SHOW
@ -312,11 +308,6 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty
std::cout << "----------------------------------------------------" << std::endl;
#endif
// DC ZVA test
//{ uint64_t basestore = (uint64_t)&out[ss];
// PREFETCH_RESULT_L2_STORE(basestore); }
ASM_LEG(Zm,Tm,PERMUTE_DIR1,DIR6_PROJ,DIR6_RECON);
#ifdef SHOW
@ -330,11 +321,6 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty
std::cout << "----------------------------------------------------" << std::endl;
#endif
// DC ZVA test
//{ uint64_t basestore = (uint64_t)&out[ss];
// PREFETCH_RESULT_L2_STORE(basestore); }
ASM_LEG(Tm,Xp,PERMUTE_DIR0,DIR7_PROJ,DIR7_RECON);
#ifdef SHOW
@ -355,7 +341,6 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty
base = (uint64_t) &out[ss];
basep= st.GetPFInfo(nent,plocal); ent++;
basep = (uint64_t) &out[ssn];
//PREFETCH_RESULT_L1_STORE(base);
RESULT(base,basep);
#ifdef SHOW

View File

@ -1,38 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/gauge/Gauge.cc
Copyright (C) 2020
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/qcd/action/fermion/FermionCore.h>
NAMESPACE_BEGIN(Grid);
std::vector<int> ConjugateGaugeImplBase::_conjDirs;
NAMESPACE_END(Grid);

View File

@ -154,10 +154,6 @@ public:
return Hsum.real();
}
static inline void Project(Field &U) {
ProjectSUn(U);
}
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
SU<Nc>::HotConfiguration(pRNG, U);
}

View File

@ -59,14 +59,14 @@ public:
}
static inline GaugeLinkField
CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) {
return PeriodicBC::CovShiftIdentityBackward(Link, mu);
return Cshift(adj(Link), mu, -1);
}
static inline GaugeLinkField
CovShiftIdentityForward(const GaugeLinkField &Link, int mu) {
return PeriodicBC::CovShiftIdentityForward(Link,mu);
return Link;
}
static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu) {
return PeriodicBC::ShiftStaple(Link,mu);
return Cshift(Link, mu, 1);
}
static inline bool isPeriodicGaugeField(void) { return true; }
@ -74,13 +74,7 @@ public:
// Composition with smeared link, bc's etc.. probably need multiple inheritance
// Variable precision "S" and variable Nc
class ConjugateGaugeImplBase {
protected:
static std::vector<int> _conjDirs;
};
template <class GimplTypes> class ConjugateGaugeImpl : public GimplTypes, ConjugateGaugeImplBase {
private:
template <class GimplTypes> class ConjugateGaugeImpl : public GimplTypes {
public:
INHERIT_GIMPL_TYPES(GimplTypes);
@ -90,56 +84,47 @@ public:
////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <class covariant>
static Lattice<covariant> CovShiftForward(const GaugeLinkField &Link, int mu,
const Lattice<covariant> &field)
{
assert(_conjDirs.size() == Nd);
if(_conjDirs[mu])
return ConjugateBC::CovShiftForward(Link, mu, field);
else
return PeriodicBC::CovShiftForward(Link, mu, field);
const Lattice<covariant> &field) {
return ConjugateBC::CovShiftForward(Link, mu, field);
}
template <class covariant>
static Lattice<covariant> CovShiftBackward(const GaugeLinkField &Link, int mu,
const Lattice<covariant> &field)
{
assert(_conjDirs.size() == Nd);
if(_conjDirs[mu])
return ConjugateBC::CovShiftBackward(Link, mu, field);
else
return PeriodicBC::CovShiftBackward(Link, mu, field);
const Lattice<covariant> &field) {
return ConjugateBC::CovShiftBackward(Link, mu, field);
}
static inline GaugeLinkField
CovShiftIdentityBackward(const GaugeLinkField &Link, int mu)
{
assert(_conjDirs.size() == Nd);
if(_conjDirs[mu])
return ConjugateBC::CovShiftIdentityBackward(Link, mu);
else
return PeriodicBC::CovShiftIdentityBackward(Link, mu);
CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) {
GridBase *grid = Link.Grid();
int Lmu = grid->GlobalDimensions()[mu] - 1;
Lattice<iScalar<vInteger>> coor(grid);
LatticeCoordinate(coor, mu);
GaugeLinkField tmp(grid);
tmp = adj(Link);
tmp = where(coor == Lmu, conjugate(tmp), tmp);
return Cshift(tmp, mu, -1); // moves towards positive mu
}
static inline GaugeLinkField
CovShiftIdentityForward(const GaugeLinkField &Link, int mu)
{
assert(_conjDirs.size() == Nd);
if(_conjDirs[mu])
return ConjugateBC::CovShiftIdentityForward(Link,mu);
else
return PeriodicBC::CovShiftIdentityForward(Link,mu);
CovShiftIdentityForward(const GaugeLinkField &Link, int mu) {
return Link;
}
static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu)
{
assert(_conjDirs.size() == Nd);
if(_conjDirs[mu])
return ConjugateBC::ShiftStaple(Link,mu);
else
return PeriodicBC::ShiftStaple(Link,mu);
static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu) {
GridBase *grid = Link.Grid();
int Lmu = grid->GlobalDimensions()[mu] - 1;
Lattice<iScalar<vInteger>> coor(grid);
LatticeCoordinate(coor, mu);
GaugeLinkField tmp(grid);
tmp = Cshift(Link, mu, 1);
tmp = where(coor == Lmu, conjugate(tmp), tmp);
return tmp;
}
static inline void setDirections(std::vector<int> &conjDirs) { _conjDirs=conjDirs; }
static inline std::vector<int> getDirections(void) { return _conjDirs; }
static inline bool isPeriodicGaugeField(void) { return false; }
};

View File

@ -54,10 +54,6 @@ public:
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
U = 1.0;
}
static inline void Project(Field &U) {
return;
}
static void MomentumSpacePropagator(Field &out, RealD m)
{
@ -238,10 +234,6 @@ public:
#endif //USE_FFT_ACCELERATION
}
static inline void Project(Field &U) {
return;
}
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
Group::GaussianFundamentalLieAlgebraMatrix(pRNG, U);
}

View File

@ -159,13 +159,6 @@ private:
Resources.GetCheckPointer()->CheckpointRestore(Parameters.StartTrajectory, U,
Resources.GetSerialRNG(),
Resources.GetParallelRNG());
} else {
// others
std::cout << GridLogError << "Unrecognized StartingType\n";
std::cout
<< GridLogError
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
exit(1);
}
Smearing.set_Field(U);

View File

@ -95,7 +95,7 @@ private:
typedef typename IntegratorType::Field Field;
typedef std::vector< HmcObservable<Field> * > ObsListType;
//pass these from the resource manager
GridSerialRNG &sRNG;
GridParallelRNG &pRNG;

View File

@ -74,7 +74,7 @@ public:
conf_file = os.str();
}
}
virtual ~BaseHmcCheckpointer(){};
void check_filename(const std::string &filename){
std::ifstream f(filename.c_str());
if(!f.good()){
@ -82,6 +82,7 @@ public:
abort();
};
}
virtual void initialize(const CheckpointerParameters &Params) = 0;
virtual void CheckpointRestore(int traj, typename Impl::Field &U,

View File

@ -45,7 +45,6 @@ private:
public:
INHERIT_GIMPL_TYPES(Implementation);
typedef GaugeStatistics<Implementation> GaugeStats;
ILDGHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); }
@ -79,7 +78,7 @@ public:
BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
IldgWriter _IldgWriter(grid->IsBoss());
_IldgWriter.open(config);
_IldgWriter.writeConfiguration<GaugeStats>(U, traj, config, config);
_IldgWriter.writeConfiguration(U, traj, config, config);
_IldgWriter.close();
std::cout << GridLogMessage << "Written ILDG Configuration on " << config
@ -106,7 +105,7 @@ public:
FieldMetaData header;
IldgReader _IldgReader;
_IldgReader.open(config);
_IldgReader.readConfiguration<GaugeStats>(U,header); // format from the header
_IldgReader.readConfiguration(U,header); // format from the header
_IldgReader.close();
std::cout << GridLogMessage << "Read ILDG Configuration from " << config

View File

@ -43,8 +43,7 @@ private:
public:
INHERIT_GIMPL_TYPES(Gimpl); // only for gauge configurations
typedef GaugeStatistics<Gimpl> GaugeStats;
NerscHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); }
void initialize(const CheckpointerParameters &Params_) {
@ -61,7 +60,7 @@ public:
int precision32 = 1;
int tworow = 0;
NerscIO::writeRNGState(sRNG, pRNG, rng);
NerscIO::writeConfiguration<GaugeStats>(U, config, tworow, precision32);
NerscIO::writeConfiguration(U, config, tworow, precision32);
}
};
@ -75,7 +74,7 @@ public:
FieldMetaData header;
NerscIO::readRNGState(sRNG, pRNG, header, rng);
NerscIO::readConfiguration<GaugeStats>(U, header, config);
NerscIO::readConfiguration(U, header, config);
};
};

View File

@ -313,8 +313,6 @@ public:
std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl;
}
FieldImplementation::Project(U);
// and that we indeed got to the end of the trajectory
assert(fabs(t_U - Params.trajL) < 1.0e-6);

View File

@ -99,7 +99,7 @@ public:
virtual Prod* getPtr() = 0;
// add a getReference?
virtual ~HMCModuleBase(){};
virtual void print_parameters(){}; // default to nothing
};

View File

@ -128,6 +128,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProjTm (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
hspin(0)=fspin(0)-fspin(2);
hspin(1)=fspin(1)-fspin(3);
}
@ -137,50 +138,40 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
* 0 0 -1 0
* 0 0 0 -1
*/
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5p (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
hspin(0)=fspin(0);
hspin(1)=fspin(1);
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5m (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
hspin(0)=fspin(2);
hspin(1)=fspin(3);
}
// template<class vtype> accelerator_inline void fspProj5p (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5p (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
rfspin(0)=fspin(0);
rfspin(1)=fspin(1);
rfspin(2)=Zero();
rfspin(3)=Zero();
}
// template<class vtype> accelerator_inline void fspProj5m (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5m (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
rfspin(0)=Zero();
rfspin(1)=Zero();
rfspin(2)=fspin(2);
rfspin(3)=fspin(3);
}
template<class vtype,int N,IfCoarsened<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &rfspin,const iVector<vtype,N> &fspin)
{
const int hN = N>>1;
for(int s=0;s<hN;s++){
rfspin(s)=fspin(s);
rfspin(s+hN)=Zero();
}
}
template<class vtype,int N,IfCoarsened<iVector<vtype,N> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &rfspin,const iVector<vtype,N> &fspin)
{
const int hN = N>>1;
for(int s=0;s<hN;s++){
rfspin(s)=Zero();
rfspin(s+hN)=fspin(s+hN);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Reconstruction routines to move back again to four spin
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -192,6 +183,7 @@ template<class vtype,int N,IfCoarsened<iVector<vtype,N> > = 0> accelerator_inlin
*/
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconXp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)=hspin(0);
fspin(1)=hspin(1);
fspin(2)=timesMinusI(hspin(1));
@ -199,6 +191,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconXm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)=hspin(0);
fspin(1)=hspin(1);
fspin(2)=timesI(hspin(1));
@ -206,6 +199,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconXp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)+=hspin(0);
fspin(1)+=hspin(1);
fspin(2)-=timesI(hspin(1));
@ -213,6 +207,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconXm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)+=hspin(0);
fspin(1)+=hspin(1);
fspin(2)+=timesI(hspin(1));
@ -226,6 +221,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconYp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)=hspin(0);
fspin(1)=hspin(1);
fspin(2)= hspin(1);
@ -233,6 +229,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconYm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)=hspin(0);
fspin(1)=hspin(1);
fspin(2)=-hspin(1);
@ -240,6 +237,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconYp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)+=hspin(0);
fspin(1)+=hspin(1);
fspin(2)+=hspin(1);
@ -247,6 +245,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconYm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)+=hspin(0);
fspin(1)+=hspin(1);
fspin(2)-=hspin(1);
@ -261,6 +260,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
*/
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconZp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)=hspin(0);
fspin(1)=hspin(1);
fspin(2)=timesMinusI(hspin(0));
@ -268,6 +268,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconZm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)=hspin(0);
fspin(1)=hspin(1);
fspin(2)= timesI(hspin(0));
@ -275,6 +276,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconZp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)+=hspin(0);
fspin(1)+=hspin(1);
fspin(2)-=timesI(hspin(0));
@ -282,6 +284,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconZm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)+=hspin(0);
fspin(1)+=hspin(1);
fspin(2)+=timesI(hspin(0));
@ -295,6 +298,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
*/
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconTp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)=hspin(0);
fspin(1)=hspin(1);
fspin(2)=hspin(0);
@ -302,6 +306,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconTm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)=hspin(0);
fspin(1)=hspin(1);
fspin(2)=-hspin(0);
@ -309,6 +314,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconTp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)+=hspin(0);
fspin(1)+=hspin(1);
fspin(2)+=hspin(0);
@ -316,6 +322,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconTm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)+=hspin(0);
fspin(1)+=hspin(1);
fspin(2)-=hspin(0);
@ -329,6 +336,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
*/
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spRecon5p (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)=hspin(0)+hspin(0); // add is lower latency than mul
fspin(1)=hspin(1)+hspin(1); // probably no measurable diffence though
fspin(2)=Zero();
@ -336,6 +344,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spRecon5m (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)=Zero();
fspin(1)=Zero();
fspin(2)=hspin(0)+hspin(0);
@ -343,6 +352,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
}
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumRecon5p (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
fspin(0)+=hspin(0)+hspin(0);
fspin(1)+=hspin(1)+hspin(1);
}
@ -362,6 +372,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
//////////
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjXp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProjXp(hspin._internal[i],fspin._internal[i]);
}
@ -415,21 +426,26 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconXp (iM
}}
}
////////
// Xm
////////
template<class rtype,class vtype> accelerator_inline void spProjXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spProjXm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProjXm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spProjXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spProjXm(hspin._internal[i][j],fspin._internal[i][j]);
@ -439,16 +455,19 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjXm (iMatri
template<class rtype,class vtype> accelerator_inline void spReconXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spReconXm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spReconXm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spReconXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spReconXm(hspin._internal[i][j],fspin._internal[i][j]);
@ -457,37 +476,45 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconXm (iMatr
template<class rtype,class vtype> accelerator_inline void accumReconXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
accumReconXm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
accumReconXm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void accumReconXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
accumReconXm(hspin._internal[i][j],fspin._internal[i][j]);
}}
}
////////
// Yp
////////
template<class rtype,class vtype> accelerator_inline void spProjYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spProjYp(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProjYp(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spProjYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spProjYp(hspin._internal[i][j],fspin._internal[i][j]);
@ -497,16 +524,19 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjYp (iMatri
template<class rtype,class vtype> accelerator_inline void spReconYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spReconYp(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spReconYp(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spReconYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spReconYp(hspin._internal[i][j],fspin._internal[i][j]);
@ -515,55 +545,66 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconYp (iMatr
template<class rtype,class vtype> accelerator_inline void accumReconYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
accumReconYp(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
accumReconYp(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void accumReconYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
accumReconYp(hspin._internal[i][j],fspin._internal[i][j]);
}}
}
////////
// Ym
////////
template<class rtype,class vtype> accelerator_inline void spProjYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spProjYm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProjYm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spProjYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spProjYm(hspin._internal[i][j],fspin._internal[i][j]);
}}
}
template<class rtype,class vtype> accelerator_inline void spReconYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spReconYm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,const iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spReconYm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spReconYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spReconYm(hspin._internal[i][j],fspin._internal[i][j]);
@ -572,16 +613,19 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconYm (iMatr
template<class rtype,class vtype> accelerator_inline void accumReconYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
accumReconYm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
accumReconYm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void accumReconYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
accumReconYm(hspin._internal[i][j],fspin._internal[i][j]);
@ -594,57 +638,66 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconYm (iM
////////
template<class rtype,class vtype> accelerator_inline void spProjZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spProjZp(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProjZp(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spProjZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spProjZp(hspin._internal[i][j],fspin._internal[i][j]);
}}
}}
}
template<class rtype,class vtype> accelerator_inline void spReconZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spReconZp(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spReconZp(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spReconZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spReconZp(hspin._internal[i][j],fspin._internal[i][j]);
}}
}}
}
template<class rtype,class vtype> accelerator_inline void accumReconZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
accumReconZp(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
accumReconZp(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void accumReconZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
accumReconZp(hspin._internal[i][j],fspin._internal[i][j]);
}}
}}
}
@ -653,53 +706,62 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconZp (iM
////////
template<class rtype,class vtype> accelerator_inline void spProjZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spProjZm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProjZm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spProjZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spProjZm(hspin._internal[i][j],fspin._internal[i][j]);
}}
}}
}
template<class rtype,class vtype> accelerator_inline void spReconZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spReconZm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spReconZm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spReconZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spReconZm(hspin._internal[i][j],fspin._internal[i][j]);
}}
}}
}
template<class rtype,class vtype> accelerator_inline void accumReconZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
accumReconZm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
accumReconZm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void accumReconZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
accumReconZm(hspin._internal[i][j],fspin._internal[i][j]);
@ -712,35 +774,41 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconZm (iM
////////
template<class rtype,class vtype> accelerator_inline void spProjTp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spProjTp(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjTp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProjTp(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spProjTp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spProjTp(hspin._internal[i][j],fspin._internal[i][j]);
}}
}}
}
template<class rtype,class vtype> accelerator_inline void spReconTp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spReconTp(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconTp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spReconTp(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spReconTp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spReconTp(hspin._internal[i][j],fspin._internal[i][j]);
@ -749,37 +817,44 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconTp (iMatr
template<class rtype,class vtype> accelerator_inline void accumReconTp (iScalar<rtype> &hspin, iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
accumReconTp(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconTp (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
accumReconTp(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void accumReconTp (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
accumReconTp(hspin._internal[i][j],fspin._internal[i][j]);
}}
}
////////
// Tm
////////
template<class rtype,class vtype> accelerator_inline void spProjTm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spProjTm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjTm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProjTm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spProjTm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spProjTm(hspin._internal[i][j],fspin._internal[i][j]);
@ -789,16 +864,19 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjTm (iMatri
template<class rtype,class vtype> accelerator_inline void spReconTm (iScalar<rtype> &hspin, const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spReconTm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconTm (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spReconTm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spReconTm (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spReconTm(hspin._internal[i][j],fspin._internal[i][j]);
@ -807,37 +885,44 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconTm (iMatr
template<class rtype,class vtype> accelerator_inline void accumReconTm (iScalar<rtype> &hspin, const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
accumReconTm(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconTm (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
accumReconTm(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void accumReconTm (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
accumReconTm(hspin._internal[i][j],fspin._internal[i][j]);
}}
}
////////
// 5p
////////
template<class rtype,class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
template<class rtype,class vtype> accelerator_inline void spProj5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spProj5p(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProj5p(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
template<class rtype,class vtype,int N> accelerator_inline void spProj5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spProj5p(hspin._internal[i][j],fspin._internal[i][j]);
@ -846,16 +931,19 @@ template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> acce
template<class rtype,class vtype> accelerator_inline void spRecon5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spRecon5p(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spRecon5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spRecon5p(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spRecon5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spRecon5p(hspin._internal[i][j],fspin._internal[i][j]);
@ -864,16 +952,19 @@ template<class rtype,class vtype,int N> accelerator_inline void spRecon5p (iMatr
template<class rtype,class vtype> accelerator_inline void accumRecon5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
accumRecon5p(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumRecon5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
accumRecon5p(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void accumRecon5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
accumRecon5p(hspin._internal[i][j],fspin._internal[i][j]);
@ -881,18 +972,24 @@ template<class rtype,class vtype,int N> accelerator_inline void accumRecon5p (iM
}
// four spinor projectors for chiral proj
template<class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
// template<class vtype> accelerator_inline void fspProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
template<class vtype> accelerator_inline void spProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spProj5p(hspin._internal,fspin._internal);
}
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
// template<class vtype,int N> accelerator_inline void fspProj5p (iVector<vtype,N> &hspin,iVector<vtype,N> &fspin)
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProj5p(hspin._internal[i],fspin._internal[i]);
}
}
template<class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
// template<class vtype,int N> accelerator_inline void fspProj5p (iMatrix<vtype,N> &hspin,iMatrix<vtype,N> &fspin)
template<class vtype,int N> accelerator_inline void spProj5p (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spProj5p(hspin._internal[i][j],fspin._internal[i][j]);
@ -904,17 +1001,17 @@ template<class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inli
// 5m
////////
template<class rtype,class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
template<class rtype,class vtype> accelerator_inline void spProj5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
spProj5m(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<rtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
template<class rtype,class vtype,int N,IfNotSpinor<iVector<rtype,N> > = 0> accelerator_inline void spProj5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
for(int i=0;i<N;i++) {
spProj5m(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
template<class rtype,class vtype,int N> accelerator_inline void spProj5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
@ -924,34 +1021,40 @@ template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> acce
template<class rtype,class vtype> accelerator_inline void spRecon5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spRecon5m(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spRecon5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spRecon5m(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void spRecon5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spRecon5m(hspin._internal[i][j],fspin._internal[i][j]);
}}
}}
}
template<class rtype,class vtype> accelerator_inline void accumRecon5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
accumRecon5m(hspin._internal,fspin._internal);
}
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumRecon5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
accumRecon5m(hspin._internal[i],fspin._internal[i]);
}
}
template<class rtype,class vtype,int N> accelerator_inline void accumRecon5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
accumRecon5m(hspin._internal[i][j],fspin._internal[i][j]);
@ -960,18 +1063,24 @@ template<class rtype,class vtype,int N> accelerator_inline void accumRecon5m (iM
// four spinor projectors for chiral proj
template<class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
// template<class vtype> accelerator_inline void fspProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
template<class vtype> accelerator_inline void spProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
spProj5m(hspin._internal,fspin._internal);
}
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
// template<class vtype,int N> accelerator_inline void fspProj5m (iVector<vtype,N> &hspin,iVector<vtype,N> &fspin)
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
for(int i=0;i<N;i++) {
spProj5m(hspin._internal[i],fspin._internal[i]);
}
}
template<class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
// template<class vtype,int N> accelerator_inline void fspProj5m (iMatrix<vtype,N> &hspin,iMatrix<vtype,N> &fspin)
template<class vtype,int N> accelerator_inline void spProj5m (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
{
//typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
spProj5m(hspin._internal[i][j],fspin._internal[i][j]);

View File

@ -51,7 +51,7 @@ public:
private:
template <class mobj, class robj>
static void BaryonSite(const mobj &D1,
static void baryon_site(const mobj &D1,
const mobj &D2,
const mobj &D3,
const Gamma GammaA_left,
@ -61,18 +61,8 @@ public:
const int parity,
const bool * wick_contractions,
robj &result);
template <class mobj, class robj>
static void BaryonSiteMatrix(const mobj &D1,
const mobj &D2,
const mobj &D3,
const Gamma GammaA_left,
const Gamma GammaB_left,
const Gamma GammaA_right,
const Gamma GammaB_right,
const bool * wick_contractions,
robj &result);
public:
static void WickContractions(std::string qi,
static void Wick_Contractions(std::string qi,
std::string qf,
bool* wick_contractions);
static void ContractBaryons(const PropagatorField &q1_left,
@ -85,17 +75,8 @@ public:
const bool* wick_contractions,
const int parity,
ComplexField &baryon_corr);
static void ContractBaryonsMatrix(const PropagatorField &q1_left,
const PropagatorField &q2_left,
const PropagatorField &q3_left,
const Gamma GammaA_left,
const Gamma GammaB_left,
const Gamma GammaA_right,
const Gamma GammaB_right,
const bool* wick_contractions,
SpinMatrixField &baryon_corr);
template <class mobj, class robj>
static void ContractBaryonsSliced(const mobj &D1,
static void ContractBaryons_Sliced(const mobj &D1,
const mobj &D2,
const mobj &D3,
const Gamma GammaA_left,
@ -106,20 +87,9 @@ public:
const int parity,
const int nt,
robj &result);
template <class mobj, class robj>
static void ContractBaryonsSlicedMatrix(const mobj &D1,
const mobj &D2,
const mobj &D3,
const Gamma GammaA_left,
const Gamma GammaB_left,
const Gamma GammaA_right,
const Gamma GammaB_right,
const bool* wick_contractions,
const int nt,
robj &result);
private:
template <class mobj, class mobj2, class robj>
static void BaryonGamma3ptGroup1Site(
static void Baryon_Gamma_3pt_Group1_Site(
const mobj &Dq1_ti,
const mobj2 &Dq2_spec,
const mobj2 &Dq3_spec,
@ -131,7 +101,7 @@ public:
robj &result);
template <class mobj, class mobj2, class robj>
static void BaryonGamma3ptGroup2Site(
static void Baryon_Gamma_3pt_Group2_Site(
const mobj2 &Dq1_spec,
const mobj &Dq2_ti,
const mobj2 &Dq3_spec,
@ -143,7 +113,7 @@ public:
robj &result);
template <class mobj, class mobj2, class robj>
static void BaryonGamma3ptGroup3Site(
static void Baryon_Gamma_3pt_Group3_Site(
const mobj2 &Dq1_spec,
const mobj2 &Dq2_spec,
const mobj &Dq3_ti,
@ -155,7 +125,7 @@ public:
robj &result);
public:
template <class mobj>
static void BaryonGamma3pt(
static void Baryon_Gamma_3pt(
const PropagatorField &q_ti,
const mobj &Dq_spec1,
const mobj &Dq_spec2,
@ -168,7 +138,7 @@ public:
SpinMatrixField &stn_corr);
private:
template <class mobj, class mobj2, class robj>
static void SigmaToNucleonQ1EyeSite(const mobj &Dq_loop,
static void Sigma_to_Nucleon_Q1_Eye_site(const mobj &Dq_loop,
const mobj2 &Du_spec,
const mobj &Dd_tf,
const mobj &Ds_ti,
@ -177,7 +147,7 @@ public:
const Gamma GammaB_nucl,
robj &result);
template <class mobj, class mobj2, class robj>
static void SigmaToNucleonQ1NonEyeSite(const mobj &Du_ti,
static void Sigma_to_Nucleon_Q1_NonEye_site(const mobj &Du_ti,
const mobj &Du_tf,
const mobj2 &Du_spec,
const mobj &Dd_tf,
@ -189,7 +159,7 @@ public:
template <class mobj, class mobj2, class robj>
static void SigmaToNucleonQ2EyeSite(const mobj &Dq_loop,
static void Sigma_to_Nucleon_Q2_Eye_site(const mobj &Dq_loop,
const mobj2 &Du_spec,
const mobj &Dd_tf,
const mobj &Ds_ti,
@ -198,7 +168,7 @@ public:
const Gamma GammaB_nucl,
robj &result);
template <class mobj, class mobj2, class robj>
static void SigmaToNucleonQ2NonEyeSite(const mobj &Du_ti,
static void Sigma_to_Nucleon_Q2_NonEye_site(const mobj &Du_ti,
const mobj &Du_tf,
const mobj2 &Du_spec,
const mobj &Dd_tf,
@ -209,7 +179,7 @@ public:
robj &result);
public:
template <class mobj>
static void SigmaToNucleonEye(const PropagatorField &qq_loop,
static void Sigma_to_Nucleon_Eye(const PropagatorField &qq_loop,
const mobj &Du_spec,
const PropagatorField &qd_tf,
const PropagatorField &qs_ti,
@ -219,7 +189,7 @@ public:
const std::string op,
SpinMatrixField &stn_corr);
template <class mobj>
static void SigmaToNucleonNonEye(const PropagatorField &qq_ti,
static void Sigma_to_Nucleon_NonEye(const PropagatorField &qq_ti,
const PropagatorField &qq_tf,
const mobj &Du_spec,
const PropagatorField &qd_tf,
@ -247,7 +217,7 @@ const Real BaryonUtils<FImpl>::epsilon_sgn[6] = {1.,1.,1.,-1.,-1.,-1.};
//This is the old version
template <class FImpl>
template <class mobj, class robj>
void BaryonUtils<FImpl>::BaryonSite(const mobj &D1,
void BaryonUtils<FImpl>::baryon_site(const mobj &D1,
const mobj &D2,
const mobj &D3,
const Gamma GammaA_i,
@ -359,132 +329,12 @@ void BaryonUtils<FImpl>::BaryonSite(const mobj &D1,
}}
}
//New version without parity projection or trace
template <class FImpl>
template <class mobj, class robj>
void BaryonUtils<FImpl>::BaryonSiteMatrix(const mobj &D1,
const mobj &D2,
const mobj &D3,
const Gamma GammaA_i,
const Gamma GammaB_i,
const Gamma GammaA_f,
const Gamma GammaB_f,
const bool * wick_contraction,
robj &result)
{
auto D1_GAi = D1 * GammaA_i;
auto GAf_D1_GAi = GammaA_f * D1_GAi;
auto GBf_D1_GAi = GammaB_f * D1_GAi;
auto D2_GBi = D2 * GammaB_i;
auto GBf_D2_GBi = GammaB_f * D2_GBi;
auto GAf_D2_GBi = GammaA_f * D2_GBi;
auto GBf_D3 = GammaB_f * D3;
auto GAf_D3 = GammaA_f * D3;
for (int ie_f=0; ie_f < 6 ; ie_f++){
int a_f = epsilon[ie_f][0]; //a
int b_f = epsilon[ie_f][1]; //b
int c_f = epsilon[ie_f][2]; //c
for (int ie_i=0; ie_i < 6 ; ie_i++){
int a_i = epsilon[ie_i][0]; //a'
int b_i = epsilon[ie_i][1]; //b'
int c_i = epsilon[ie_i][2]; //c'
Real ee = epsilon_sgn[ie_f] * epsilon_sgn[ie_i];
//This is the \delta_{456}^{123} part
if (wick_contraction[0]){
for (int rho_i=0; rho_i<Ns; rho_i++){
for (int rho_f=0; rho_f<Ns; rho_f++){
auto GAf_D1_GAi_rr_cc = GAf_D1_GAi()(rho_f,rho_i)(c_f,c_i);
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
for (int beta_i=0; beta_i<Ns; beta_i++){
result()(rho_f,rho_i)() += ee * GAf_D1_GAi_rr_cc
* D2_GBi ()(alpha_f,beta_i)(a_f,a_i)
* GBf_D3 ()(alpha_f,beta_i)(b_f,b_i);
}}
}}
}
//This is the \delta_{456}^{231} part
if (wick_contraction[1]){
for (int rho_i=0; rho_i<Ns; rho_i++){
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
auto D1_GAi_ar_ac = D1_GAi()(alpha_f,rho_i)(a_f,c_i);
for (int beta_i=0; beta_i<Ns; beta_i++){
auto GBf_D2_GBi_ab_ba = GBf_D2_GBi ()(alpha_f,beta_i)(b_f,a_i);
for (int rho_f=0; rho_f<Ns; rho_f++){
result()(rho_f,rho_i)() += ee * D1_GAi_ar_ac
* GBf_D2_GBi_ab_ba
* GAf_D3 ()(rho_f,beta_i)(c_f,b_i);
}}
}}
}
//This is the \delta_{456}^{312} part
if (wick_contraction[2]){
for (int rho_i=0; rho_i<Ns; rho_i++){
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
auto GBf_D1_GAi_ar_bc = GBf_D1_GAi()(alpha_f,rho_i)(b_f,c_i);
for (int beta_i=0; beta_i<Ns; beta_i++){
auto D3_ab_ab = D3 ()(alpha_f,beta_i)(a_f,b_i);
for (int rho_f=0; rho_f<Ns; rho_f++){
result()(rho_f,rho_i)() += ee * GBf_D1_GAi_ar_bc
* GAf_D2_GBi ()(rho_f,beta_i)(c_f,a_i)
* D3_ab_ab;
}}
}}
}
//This is the \delta_{456}^{132} part
if (wick_contraction[3]){
for (int rho_i=0; rho_i<Ns; rho_i++){
for (int rho_f=0; rho_f<Ns; rho_f++){
auto GAf_D1_GAi_rr_cc = GAf_D1_GAi()(rho_f,rho_i)(c_f,c_i);
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
for (int beta_i=0; beta_i<Ns; beta_i++){
result()(rho_f,rho_i)() -= ee * GAf_D1_GAi_rr_cc
* GBf_D2_GBi ()(alpha_f,beta_i)(b_f,a_i)
* D3 ()(alpha_f,beta_i)(a_f,b_i);
}}
}}
}
//This is the \delta_{456}^{321} part
if (wick_contraction[4]){
for (int rho_i=0; rho_i<Ns; rho_i++){
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
auto GBf_D1_GAi_ar_bc = GBf_D1_GAi()(alpha_f,rho_i)(b_f,c_i);
for (int beta_i=0; beta_i<Ns; beta_i++){
auto D2_GBi_ab_aa = D2_GBi()(alpha_f,beta_i)(a_f,a_i);
for (int rho_f=0; rho_f<Ns; rho_f++){
result()(rho_f,rho_i)() -= ee * GBf_D1_GAi_ar_bc
* D2_GBi_ab_aa
* GAf_D3 ()(rho_f,beta_i)(c_f,b_i);
}}
}}
}
//This is the \delta_{456}^{213} part
if (wick_contraction[5]){
for (int rho_i=0; rho_i<Ns; rho_i++){
for (int alpha_f=0; alpha_f<Ns; alpha_f++){
auto D1_GAi_ar_ac = D1_GAi()(alpha_f,rho_i)(a_f,c_i);
for (int beta_i=0; beta_i<Ns; beta_i++){
auto GBf_D3_ab_bb = GBf_D3()(alpha_f,beta_i)(b_f,b_i);
for (int rho_f=0; rho_f<Ns; rho_f++){
result()(rho_f,rho_i)() -= ee * D1_GAi_ar_ac
* GAf_D2_GBi ()(rho_f,beta_i)(c_f,a_i)
* GBf_D3_ab_bb;
}}
}}
}
}}
}
/* Computes which wick contractions should be performed for a *
* baryon 2pt function given the initial and finals state quark *
* flavours. *
* The array wick_contractions must be of length 6 */
template<class FImpl>
void BaryonUtils<FImpl>::WickContractions(std::string qi, std::string qf, bool* wick_contractions) {
void BaryonUtils<FImpl>::Wick_Contractions(std::string qi, std::string qf, bool* wick_contractions) {
const int epsilon[6][3] = {{0,1,2},{1,2,0},{2,0,1},{0,2,1},{2,1,0},{1,0,2}};
for (int ie=0; ie < 6 ; ie++) {
wick_contractions[ie] = (qi.size() == 3 && qf.size() == 3
@ -514,6 +364,11 @@ void BaryonUtils<FImpl>::ContractBaryons(const PropagatorField &q1_left,
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
std::cout << "GammaA (left) " << (GammaA_left.g) << std::endl;
std::cout << "GammaB (left) " << (GammaB_left.g) << std::endl;
std::cout << "GammaA (right) " << (GammaA_right.g) << std::endl;
std::cout << "GammaB (right) " << (GammaB_right.g) << std::endl;
assert(parity==1 || parity == -1 && "Parity must be +1 or -1");
@ -542,62 +397,13 @@ void BaryonUtils<FImpl>::ContractBaryons(const PropagatorField &q1_left,
auto D2 = v2[ss];
auto D3 = v3[ss];
vobj result=Zero();
BaryonSite(D1,D2,D3,GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result);
baryon_site(D1,D2,D3,GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result);
vbaryon_corr[ss] = result;
} );//end loop over lattice sites
t += usecond();
std::cout << GridLogDebug << std::setw(10) << bytes/t*1.0e6/1024/1024/1024 << " GB/s " << std::endl;
}
template<class FImpl>
void BaryonUtils<FImpl>::ContractBaryonsMatrix(const PropagatorField &q1_left,
const PropagatorField &q2_left,
const PropagatorField &q3_left,
const Gamma GammaA_left,
const Gamma GammaB_left,
const Gamma GammaA_right,
const Gamma GammaB_right,
const bool* wick_contractions,
SpinMatrixField &baryon_corr)
{
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
GridBase *grid = q1_left.Grid();
autoView(vbaryon_corr, baryon_corr,CpuWrite);
autoView( v1 , q1_left, CpuRead);
autoView( v2 , q2_left, CpuRead);
autoView( v3 , q3_left, CpuRead);
// Real bytes =0.;
// bytes += grid->oSites() * (432.*sizeof(vComplex) + 126.*sizeof(int) + 36.*sizeof(Real));
// for (int ie=0; ie < 6 ; ie++){
// if(ie==0 or ie==3){
// bytes += grid->oSites() * (4.*sizeof(int) + 4752.*sizeof(vComplex)) * wick_contractions[ie];
// }
// else{
// bytes += grid->oSites() * (64.*sizeof(int) + 5184.*sizeof(vComplex)) * wick_contractions[ie];
// }
// }
// Real t=0.;
// t =-usecond();
accelerator_for(ss, grid->oSites(), grid->Nsimd(), {
auto D1 = v1[ss];
auto D2 = v2[ss];
auto D3 = v3[ss];
sobj result=Zero();
BaryonSiteMatrix(D1,D2,D3,GammaA_left,GammaB_left,GammaA_right,GammaB_right,wick_contractions,result);
vbaryon_corr[ss] = result;
} );//end loop over lattice sites
// t += usecond();
// std::cout << GridLogDebug << std::setw(10) << bytes/t*1.0e6/1024/1024/1024 << " GB/s " << std::endl;
std::cout << std::setw(10) << bytes/t*1.0e6/1024/1024/1024 << " GB/s " << std::endl;
}
@ -608,7 +414,7 @@ void BaryonUtils<FImpl>::ContractBaryonsMatrix(const PropagatorField &q1_left,
* Wick_Contractions function above */
template <class FImpl>
template <class mobj, class robj>
void BaryonUtils<FImpl>::ContractBaryonsSliced(const mobj &D1,
void BaryonUtils<FImpl>::ContractBaryons_Sliced(const mobj &D1,
const mobj &D2,
const mobj &D3,
const Gamma GammaA_left,
@ -623,33 +429,16 @@ void BaryonUtils<FImpl>::ContractBaryonsSliced(const mobj &D1,
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
std::cout << "GammaA (left) " << (GammaA_left.g) << std::endl;
std::cout << "GammaB (left) " << (GammaB_left.g) << std::endl;
std::cout << "GammaA (right) " << (GammaA_right.g) << std::endl;
std::cout << "GammaB (right) " << (GammaB_right.g) << std::endl;
assert(parity==1 || parity == -1 && "Parity must be +1 or -1");
for (int t=0; t<nt; t++) {
BaryonSite(D1[t],D2[t],D3[t],GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result[t]);
}
}
template <class FImpl>
template <class mobj, class robj>
void BaryonUtils<FImpl>::ContractBaryonsSlicedMatrix(const mobj &D1,
const mobj &D2,
const mobj &D3,
const Gamma GammaA_left,
const Gamma GammaB_left,
const Gamma GammaA_right,
const Gamma GammaB_right,
const bool* wick_contractions,
const int nt,
robj &result)
{
assert(Ns==4 && "Baryon code only implemented for N_spin = 4");
assert(Nc==3 && "Baryon code only implemented for N_colour = 3");
for (int t=0; t<nt; t++) {
BaryonSiteMatrix(D1[t],D2[t],D3[t],GammaA_left,GammaB_left,GammaA_right,GammaB_right,wick_contractions,result[t]);
baryon_site(D1[t],D2[t],D3[t],GammaA_left,GammaB_left,GammaA_right,GammaB_right,parity,wick_contractions,result[t]);
}
}
@ -665,7 +454,7 @@ void BaryonUtils<FImpl>::ContractBaryonsSlicedMatrix(const mobj &D1,
* Dq4_tf is a quark line from t_f to t_J */
template<class FImpl>
template <class mobj, class mobj2, class robj>
void BaryonUtils<FImpl>::BaryonGamma3ptGroup1Site(
void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group1_Site(
const mobj &Dq1_ti,
const mobj2 &Dq2_spec,
const mobj2 &Dq3_spec,
@ -757,7 +546,7 @@ void BaryonUtils<FImpl>::BaryonGamma3ptGroup1Site(
* Dq4_tf is a quark line from t_f to t_J */
template<class FImpl>
template <class mobj, class mobj2, class robj>
void BaryonUtils<FImpl>::BaryonGamma3ptGroup2Site(
void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group2_Site(
const mobj2 &Dq1_spec,
const mobj &Dq2_ti,
const mobj2 &Dq3_spec,
@ -847,7 +636,7 @@ void BaryonUtils<FImpl>::BaryonGamma3ptGroup2Site(
* Dq4_tf is a quark line from t_f to t_J */
template<class FImpl>
template <class mobj, class mobj2, class robj>
void BaryonUtils<FImpl>::BaryonGamma3ptGroup3Site(
void BaryonUtils<FImpl>::Baryon_Gamma_3pt_Group3_Site(
const mobj2 &Dq1_spec,
const mobj2 &Dq2_spec,
const mobj &Dq3_ti,
@ -939,7 +728,7 @@ void BaryonUtils<FImpl>::BaryonGamma3ptGroup3Site(
* https://aportelli.github.io/Hadrons-doc/#/mcontraction */
template<class FImpl>
template <class mobj>
void BaryonUtils<FImpl>::BaryonGamma3pt(
void BaryonUtils<FImpl>::Baryon_Gamma_3pt(
const PropagatorField &q_ti,
const mobj &Dq_spec1,
const mobj &Dq_spec2,
@ -962,7 +751,7 @@ void BaryonUtils<FImpl>::BaryonGamma3pt(
auto Dq_ti = vq_ti[ss];
auto Dq_tf = vq_tf[ss];
sobj result=Zero();
BaryonGamma3ptGroup1Site(Dq_ti,Dq_spec1,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
Baryon_Gamma_3pt_Group1_Site(Dq_ti,Dq_spec1,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
vcorr[ss] += result;
});//end loop over lattice sites
} else if (group == 2) {
@ -970,7 +759,7 @@ void BaryonUtils<FImpl>::BaryonGamma3pt(
auto Dq_ti = vq_ti[ss];
auto Dq_tf = vq_tf[ss];
sobj result=Zero();
BaryonGamma3ptGroup2Site(Dq_spec1,Dq_ti,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
Baryon_Gamma_3pt_Group2_Site(Dq_spec1,Dq_ti,Dq_spec2,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
vcorr[ss] += result;
});//end loop over lattice sites
} else if (group == 3) {
@ -978,7 +767,7 @@ void BaryonUtils<FImpl>::BaryonGamma3pt(
auto Dq_ti = vq_ti[ss];
auto Dq_tf = vq_tf[ss];
sobj result=Zero();
BaryonGamma3ptGroup3Site(Dq_spec1,Dq_spec2,Dq_ti,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
Baryon_Gamma_3pt_Group3_Site(Dq_spec1,Dq_spec2,Dq_ti,Dq_tf,GammaJ,GammaBi,GammaBf,wick_contraction,result);
vcorr[ss] += result;
});//end loop over lattice sites
@ -998,7 +787,7 @@ void BaryonUtils<FImpl>::BaryonGamma3pt(
* Ds_ti is a quark line from t_i to t_H */
template <class FImpl>
template <class mobj, class mobj2, class robj>
void BaryonUtils<FImpl>::SigmaToNucleonQ1EyeSite(const mobj &Dq_loop,
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q1_Eye_site(const mobj &Dq_loop,
const mobj2 &Du_spec,
const mobj &Dd_tf,
const mobj &Ds_ti,
@ -1049,7 +838,7 @@ void BaryonUtils<FImpl>::SigmaToNucleonQ1EyeSite(const mobj &Dq_loop,
* Ds_ti is a quark line from t_i to t_H */
template <class FImpl>
template <class mobj, class mobj2, class robj>
void BaryonUtils<FImpl>::SigmaToNucleonQ1NonEyeSite(const mobj &Du_ti,
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q1_NonEye_site(const mobj &Du_ti,
const mobj &Du_tf,
const mobj2 &Du_spec,
const mobj &Dd_tf,
@ -1108,7 +897,7 @@ void BaryonUtils<FImpl>::SigmaToNucleonQ1NonEyeSite(const mobj &Du_ti,
* Ds_ti is a quark line from t_i to t_H */
template <class FImpl>
template <class mobj, class mobj2, class robj>
void BaryonUtils<FImpl>::SigmaToNucleonQ2EyeSite(const mobj &Dq_loop,
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q2_Eye_site(const mobj &Dq_loop,
const mobj2 &Du_spec,
const mobj &Dd_tf,
const mobj &Ds_ti,
@ -1159,7 +948,7 @@ void BaryonUtils<FImpl>::SigmaToNucleonQ2EyeSite(const mobj &Dq_loop,
* Ds_ti is a quark line from t_i to t_H */
template <class FImpl>
template <class mobj, class mobj2, class robj>
void BaryonUtils<FImpl>::SigmaToNucleonQ2NonEyeSite(const mobj &Du_ti,
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Q2_NonEye_site(const mobj &Du_ti,
const mobj &Du_tf,
const mobj2 &Du_spec,
const mobj &Dd_tf,
@ -1213,7 +1002,7 @@ void BaryonUtils<FImpl>::SigmaToNucleonQ2NonEyeSite(const mobj &Du_ti,
template<class FImpl>
template <class mobj>
void BaryonUtils<FImpl>::SigmaToNucleonEye(const PropagatorField &qq_loop,
void BaryonUtils<FImpl>::Sigma_to_Nucleon_Eye(const PropagatorField &qq_loop,
const mobj &Du_spec,
const PropagatorField &qd_tf,
const PropagatorField &qs_ti,
@ -1240,9 +1029,9 @@ void BaryonUtils<FImpl>::SigmaToNucleonEye(const PropagatorField &qq_loop,
auto Ds_ti = vs_ti[ss];
sobj result=Zero();
if(op == "Q1"){
SigmaToNucleonQ1EyeSite(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
Sigma_to_Nucleon_Q1_Eye_site(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
} else if(op == "Q2"){
SigmaToNucleonQ2EyeSite(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
Sigma_to_Nucleon_Q2_Eye_site(Dq_loop,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
} else {
assert(0 && "Weak Operator not correctly specified");
}
@ -1252,7 +1041,7 @@ void BaryonUtils<FImpl>::SigmaToNucleonEye(const PropagatorField &qq_loop,
template<class FImpl>
template <class mobj>
void BaryonUtils<FImpl>::SigmaToNucleonNonEye(const PropagatorField &qq_ti,
void BaryonUtils<FImpl>::Sigma_to_Nucleon_NonEye(const PropagatorField &qq_ti,
const PropagatorField &qq_tf,
const mobj &Du_spec,
const PropagatorField &qd_tf,
@ -1282,9 +1071,9 @@ void BaryonUtils<FImpl>::SigmaToNucleonNonEye(const PropagatorField &qq_ti,
auto Ds_ti = vs_ti[ss];
sobj result=Zero();
if(op == "Q1"){
SigmaToNucleonQ1NonEyeSite(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
Sigma_to_Nucleon_Q1_NonEye_site(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
} else if(op == "Q2"){
SigmaToNucleonQ2NonEyeSite(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
Sigma_to_Nucleon_Q2_NonEye_site(Dq_ti,Dq_tf,Du_spec,Dd_tf,Ds_ti,Gamma_H,GammaB_sigma,GammaB_nucl,result);
} else {
assert(0 && "Weak Operator not correctly specified");
}

View File

@ -53,24 +53,6 @@ namespace PeriodicBC {
return Cshift(tmp,mu,-1);// moves towards positive mu
}
template<class gauge> Lattice<gauge>
CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu)
{
return Cshift(adj(Link), mu, -1);
}
template<class gauge> Lattice<gauge>
CovShiftIdentityForward(const Lattice<gauge> &Link, int mu)
{
return Link;
}
template<class gauge> Lattice<gauge>
ShiftStaple(const Lattice<gauge> &Link, int mu)
{
return Cshift(Link, mu, 1);
}
template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
auto CovShiftForward(const Lattice<gauge> &Link,
int mu,
@ -88,7 +70,6 @@ namespace PeriodicBC {
return CovShiftBackward(Link,mu,arg);
}
}
@ -158,38 +139,6 @@ namespace ConjugateBC {
// std::cout<<"Gparity::CovCshiftBackward mu="<<mu<<std::endl;
return Cshift(tmp,mu,-1);// moves towards positive mu
}
template<class gauge> Lattice<gauge>
CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu) {
GridBase *grid = Link.Grid();
int Lmu = grid->GlobalDimensions()[mu] - 1;
Lattice<iScalar<vInteger>> coor(grid);
LatticeCoordinate(coor, mu);
Lattice<gauge> tmp(grid);
tmp = adj(Link);
tmp = where(coor == Lmu, conjugate(tmp), tmp);
return Cshift(tmp, mu, -1); // moves towards positive mu
}
template<class gauge> Lattice<gauge>
CovShiftIdentityForward(const Lattice<gauge> &Link, int mu) {
return Link;
}
template<class gauge> Lattice<gauge>
ShiftStaple(const Lattice<gauge> &Link, int mu)
{
GridBase *grid = Link.Grid();
int Lmu = grid->GlobalDimensions()[mu] - 1;
Lattice<iScalar<vInteger>> coor(grid);
LatticeCoordinate(coor, mu);
Lattice<gauge> tmp(grid);
tmp = Cshift(Link, mu, 1);
tmp = where(coor == Lmu, conjugate(tmp), tmp);
return tmp;
}
template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
auto CovShiftForward(const Lattice<gauge> &Link,

View File

@ -154,8 +154,8 @@ void axpby_ssp_pminus(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,co
accelerator_for(sss,nloop,vobj::Nsimd(),{
uint64_t ss = sss*Ls;
decltype(coalescedRead(y_v[ss+sp])) tmp;
spProj5m(tmp,y_v(ss+sp));
tmp = a*x_v(ss+s)+b*tmp;
spProj5m(tmp,y_v(ss+sp));
tmp = a*x_v(ss+s)+b*tmp;
coalescedWrite(z_v[ss+s],tmp);
});
}
@ -188,6 +188,7 @@ void G5R5(Lattice<vobj> &z,const Lattice<vobj> &x)
z.Checkerboard() = x.Checkerboard();
conformable(x,z);
int Ls = grid->_rdimensions[0];
Gamma G5(Gamma::Algebra::Gamma5);
autoView( x_v, x, AcceleratorRead);
autoView( z_v, z, AcceleratorWrite);
uint64_t nloop = grid->oSites()/Ls;
@ -195,13 +196,7 @@ void G5R5(Lattice<vobj> &z,const Lattice<vobj> &x)
uint64_t ss = sss*Ls;
for(int s=0;s<Ls;s++){
int sp = Ls-1-s;
auto tmp = x_v(ss+s);
decltype(tmp) tmp_p;
decltype(tmp) tmp_m;
spProj5p(tmp_p,tmp);
spProj5m(tmp_m,tmp);
// Use of spProj5m, 5p captures the coarse space too
coalescedWrite(z_v[ss+sp],tmp_p - tmp_m);
coalescedWrite(z_v[ss+sp],G5*x_v(ss+s));
}
});
}
@ -213,20 +208,10 @@ void G5C(Lattice<vobj> &z, const Lattice<vobj> &x)
z.Checkerboard() = x.Checkerboard();
conformable(x, z);
autoView( x_v, x, AcceleratorRead);
autoView( z_v, z, AcceleratorWrite);
uint64_t nloop = grid->oSites();
accelerator_for(ss,nloop,vobj::Nsimd(),{
auto tmp = x_v(ss);
decltype(tmp) tmp_p;
decltype(tmp) tmp_m;
spProj5p(tmp_p,tmp);
spProj5m(tmp_m,tmp);
coalescedWrite(z_v[ss],tmp_p - tmp_m);
});
Gamma G5(Gamma::Algebra::Gamma5);
z = G5 * x;
}
/*
template<class CComplex, int nbasis>
void G5C(Lattice<iVector<CComplex, nbasis>> &z, const Lattice<iVector<CComplex, nbasis>> &x)
{
@ -249,7 +234,6 @@ void G5C(Lattice<iVector<CComplex, nbasis>> &z, const Lattice<iVector<CComplex,
}
});
}
*/
NAMESPACE_END(Grid);

View File

@ -449,8 +449,7 @@ public:
LatticeReal alpha(grid);
// std::cout<<GridLogMessage<<"xi "<<xi <<std::endl;
xi = 2.0 *xi;
alpha = toReal(xi);
alpha = toReal(2.0 * xi);
do {
// A. Generate two uniformly distributed pseudo-random numbers R and R',
@ -735,6 +734,7 @@ public:
}
}
template <typename GaugeField>
static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
typedef typename GaugeField::vector_type vector_type;
@ -799,88 +799,6 @@ public:
}
};
template<int N>
LatticeComplexD Determinant(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
LatticeComplexD ret(grid);
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<ComplexD, N> > > Us;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
EigenU(i,j) = Us()()(i,j);
}}
ComplexD det = EigenU.determinant();
pokeLocalSite(det,ret_v,lcoor);
});
return ret;
}
template<int N>
static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
{
Umu = ProjectOnGroup(Umu);
auto det = Determinant(Umu);
det = conjugate(det);
for(int i=0;i<N;i++){
auto element = PeekIndex<ColourIndex>(Umu,N-1,i);
element = element * det;
PokeIndex<ColourIndex>(Umu,element,Nc-1,i);
}
}
template<int N>
static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<vComplexD, N> >,Nd> > &U)
{
GridBase *grid=U.Grid();
// Reunitarise
for(int mu=0;mu<Nd;mu++){
auto Umu = PeekIndex<LorentzIndex>(U,mu);
Umu = ProjectOnGroup(Umu);
ProjectSUn(Umu);
PokeIndex<LorentzIndex>(U,Umu,mu);
}
}
// Explicit specialisation for SU(3).
// Explicit specialisation for SU(3).
static void
ProjectSU3 (Lattice<iScalar<iScalar<iMatrix<vComplexD, 3> > > > &Umu)
{
GridBase *grid=Umu.Grid();
const int x=0;
const int y=1;
const int z=2;
// Reunitarise
Umu = ProjectOnGroup(Umu);
autoView(Umu_v,Umu,CpuWrite);
thread_for(ss,grid->oSites(),{
auto cm = Umu_v[ss];
cm()()(2,x) = adj(cm()()(0,y)*cm()()(1,z)-cm()()(0,z)*cm()()(1,y)); //x= yz-zy
cm()()(2,y) = adj(cm()()(0,z)*cm()()(1,x)-cm()()(0,x)*cm()()(1,z)); //y= zx-xz
cm()()(2,z) = adj(cm()()(0,x)*cm()()(1,y)-cm()()(0,y)*cm()()(1,x)); //z= xy-yx
Umu_v[ss]=cm;
});
}
static void ProjectSU3(Lattice<iVector<iScalar<iMatrix<vComplexD, 3> >,Nd> > &U)
{
GridBase *grid=U.Grid();
// Reunitarise
for(int mu=0;mu<Nd;mu++){
auto Umu = PeekIndex<LorentzIndex>(U,mu);
Umu = ProjectOnGroup(Umu);
ProjectSU3(Umu);
PokeIndex<LorentzIndex>(U,Umu,mu);
}
}
typedef SU<2> SU2;
typedef SU<3> SU3;
typedef SU<4> SU4;

View File

@ -26,7 +26,7 @@
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
#ifndef __NVCC__
NAMESPACE_BEGIN(Grid);

View File

@ -0,0 +1,779 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Fujitsu_A64FX_asm_double.h
Copyright (C) 2020
Author: Nils Meyer <nils.meyer@ur.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#define LOAD_CHIMU(base) LOAD_CHIMU_INTERLEAVED_A64FXd(base)
#define PREFETCH_CHIMU_L1(A) PREFETCH_CHIMU_L1_INTERNAL_A64FXd(A)
#define PREFETCH_GAUGE_L1(A) PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)
#define PREFETCH_CHIMU_L2(A) PREFETCH_CHIMU_L2_INTERNAL_A64FXd(A)
#define PREFETCH_GAUGE_L2(A) PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)
#define PF_GAUGE(A)
#define PREFETCH_RESULT_L2_STORE(A) PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(A)
#define PREFETCH_RESULT_L1_STORE(A) PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(A)
#define PREFETCH1_CHIMU(A) PREFETCH_CHIMU_L1(A)
#define PREFETCH_CHIMU(A) PREFETCH_CHIMU_L1(A)
#define LOCK_GAUGE(A)
#define UNLOCK_GAUGE(A)
#define MASK_REGS DECLARATIONS_A64FXd
#define SAVE_RESULT(A,B) RESULT_A64FXd(A); PREFETCH_RESULT_L2_STORE(B)
#define MULT_2SPIN_1(Dir) MULT_2SPIN_1_A64FXd(Dir)
#define MULT_2SPIN_2 MULT_2SPIN_2_A64FXd
#define LOAD_CHI(base) LOAD_CHI_A64FXd(base)
#define ADD_RESULT(base,basep) LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)
#define XP_PROJ XP_PROJ_A64FXd
#define YP_PROJ YP_PROJ_A64FXd
#define ZP_PROJ ZP_PROJ_A64FXd
#define TP_PROJ TP_PROJ_A64FXd
#define XM_PROJ XM_PROJ_A64FXd
#define YM_PROJ YM_PROJ_A64FXd
#define ZM_PROJ ZM_PROJ_A64FXd
#define TM_PROJ TM_PROJ_A64FXd
#define XP_RECON XP_RECON_A64FXd
#define XM_RECON XM_RECON_A64FXd
#define XM_RECON_ACCUM XM_RECON_ACCUM_A64FXd
#define YM_RECON_ACCUM YM_RECON_ACCUM_A64FXd
#define ZM_RECON_ACCUM ZM_RECON_ACCUM_A64FXd
#define TM_RECON_ACCUM TM_RECON_ACCUM_A64FXd
#define XP_RECON_ACCUM XP_RECON_ACCUM_A64FXd
#define YP_RECON_ACCUM YP_RECON_ACCUM_A64FXd
#define ZP_RECON_ACCUM ZP_RECON_ACCUM_A64FXd
#define TP_RECON_ACCUM TP_RECON_ACCUM_A64FXd
#define PERMUTE_DIR0 0
#define PERMUTE_DIR1 1
#define PERMUTE_DIR2 2
#define PERMUTE_DIR3 3
#define PERMUTE PERMUTE_A64FXd;
#define LOAD_TABLE(Dir) if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1; } else if (Dir == 2) { LOAD_TABLE2; }
#define MAYBEPERM(Dir,perm) if (Dir != 3) { if (perm) { PERMUTE; } }
// DECLARATIONS
#define DECLARATIONS_A64FXd \
const uint64_t lut[4][8] = { \
{4, 5, 6, 7, 0, 1, 2, 3}, \
{2, 3, 0, 1, 6, 7, 4, 5}, \
{1, 0, 3, 2, 5, 4, 7, 6}, \
{0, 1, 2, 4, 5, 6, 7, 8} };\
asm ( \
"fmov z31.d , 0 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// RESULT
#define RESULT_A64FXd(base) \
{ \
asm ( \
"str z0, [%[storeptr], -6, mul vl] \n\t" \
"str z1, [%[storeptr], -5, mul vl] \n\t" \
"str z2, [%[storeptr], -4, mul vl] \n\t" \
"str z3, [%[storeptr], -3, mul vl] \n\t" \
"str z4, [%[storeptr], -2, mul vl] \n\t" \
"str z5, [%[storeptr], -1, mul vl] \n\t" \
"str z6, [%[storeptr], 0, mul vl] \n\t" \
"str z7, [%[storeptr], 1, mul vl] \n\t" \
"str z8, [%[storeptr], 2, mul vl] \n\t" \
"str z9, [%[storeptr], 3, mul vl] \n\t" \
"str z10, [%[storeptr], 4, mul vl] \n\t" \
"str z11, [%[storeptr], 5, mul vl] \n\t" \
: \
: [storeptr] "r" (base + 2 * 3 * 64) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// PREFETCH_CHIMU_L2 (prefetch to L2)
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base) \
{ \
asm ( \
"prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
: \
: [fetchptr] "r" (base) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// PREFETCH_CHIMU_L1 (prefetch to L1)
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXd(base) \
{ \
asm ( \
"prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
: \
: [fetchptr] "r" (base) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// PREFETCH_GAUGE_L2 (prefetch to L2)
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A) \
{ \
const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
asm ( \
"prfd PLDL2STRM, p5, [%[fetchptr], -4, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 12, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 16, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 20, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 24, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 28, mul vl] \n\t" \
: \
: [fetchptr] "r" (baseU) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// PREFETCH_GAUGE_L1 (prefetch to L1)
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A) \
{ \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
asm ( \
"prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
: \
: [fetchptr] "r" (baseU) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// LOAD_CHI
#define LOAD_CHI_A64FXd(base) \
{ \
asm ( \
"ldr z12, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z13, [%[fetchptr], 1, mul vl] \n\t" \
"ldr z14, [%[fetchptr], 2, mul vl] \n\t" \
"ldr z15, [%[fetchptr], 3, mul vl] \n\t" \
"ldr z16, [%[fetchptr], 4, mul vl] \n\t" \
"ldr z17, [%[fetchptr], 5, mul vl] \n\t" \
: \
: [fetchptr] "r" (base) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// LOAD_CHIMU
#define LOAD_CHIMU_INTERLEAVED_A64FXd(base) \
{ \
asm ( \
"ptrue p5.d \n\t" \
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
: \
: [fetchptr] "r" (base + 2 * 3 * 64) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// LOAD_CHIMU_0213
#define LOAD_CHIMU_0213_A64FXd \
{ \
const SiteSpinor & ref(in[offset]); \
asm ( \
"ptrue p5.d \n\t" \
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
: \
: [fetchptr] "r" (&ref[2][0]) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// LOAD_CHIMU_0312
#define LOAD_CHIMU_0312_A64FXd \
{ \
const SiteSpinor & ref(in[offset]); \
asm ( \
"ptrue p5.d \n\t" \
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
: \
: [fetchptr] "r" (&ref[2][0]) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// LOAD_TABLE0
#define LOAD_TABLE0 \
asm ( \
"ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
: \
: [tableptr] "r" (&lut[0]),[index] "i" (0) \
: "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// LOAD_TABLE1
#define LOAD_TABLE1 \
asm ( \
"ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
: \
: [tableptr] "r" (&lut[0]),[index] "i" (1) \
: "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// LOAD_TABLE2
#define LOAD_TABLE2 \
asm ( \
"ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
: \
: [tableptr] "r" (&lut[0]),[index] "i" (2) \
: "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// LOAD_TABLE3
#define LOAD_TABLE3 \
asm ( \
"ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
: \
: [tableptr] "r" (&lut[0]),[index] "i" (3) \
: "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// PERMUTE
#define PERMUTE_A64FXd \
asm ( \
"tbl z12.d, { z12.d }, z30.d \n\t" \
"tbl z13.d, { z13.d }, z30.d \n\t" \
"tbl z14.d, { z14.d }, z30.d \n\t" \
"tbl z15.d, { z15.d }, z30.d \n\t" \
"tbl z16.d, { z16.d }, z30.d \n\t" \
"tbl z17.d, { z17.d }, z30.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// LOAD_GAUGE
#define LOAD_GAUGE \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
{ \
asm ( \
"ptrue p5.d \n\t" \
"ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
"ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
"ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
"ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
"ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
: \
: [fetchptr] "r" (baseU + 2 * 3 * 64) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// MULT_2SPIN
#define MULT_2SPIN_1_A64FXd(A) \
{ \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
asm ( \
"ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
"ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
"ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
"ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
"ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
"movprfx z18.d, p5/m, z31.d \n\t" \
"fcmla z18.d, p5/m, z24.d, z12.d, 0 \n\t" \
"movprfx z21.d, p5/m, z31.d \n\t" \
"fcmla z21.d, p5/m, z24.d, z15.d, 0 \n\t" \
"movprfx z19.d, p5/m, z31.d \n\t" \
"fcmla z19.d, p5/m, z25.d, z12.d, 0 \n\t" \
"movprfx z22.d, p5/m, z31.d \n\t" \
"fcmla z22.d, p5/m, z25.d, z15.d, 0 \n\t" \
"movprfx z20.d, p5/m, z31.d \n\t" \
"fcmla z20.d, p5/m, z26.d, z12.d, 0 \n\t" \
"movprfx z23.d, p5/m, z31.d \n\t" \
"fcmla z23.d, p5/m, z26.d, z15.d, 0 \n\t" \
"fcmla z18.d, p5/m, z24.d, z12.d, 90 \n\t" \
"fcmla z21.d, p5/m, z24.d, z15.d, 90 \n\t" \
"fcmla z19.d, p5/m, z25.d, z12.d, 90 \n\t" \
"fcmla z22.d, p5/m, z25.d, z15.d, 90 \n\t" \
"fcmla z20.d, p5/m, z26.d, z12.d, 90 \n\t" \
"fcmla z23.d, p5/m, z26.d, z15.d, 90 \n\t" \
"ldr z24, [%[fetchptr], -4, mul vl] \n\t" \
"ldr z25, [%[fetchptr], -1, mul vl] \n\t" \
"ldr z26, [%[fetchptr], 2, mul vl] \n\t" \
: \
: [fetchptr] "r" (baseU + 2 * 3 * 64) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// MULT_2SPIN_BACKEND
#define MULT_2SPIN_2_A64FXd \
{ \
asm ( \
"fcmla z18.d, p5/m, z27.d, z13.d, 0 \n\t" \
"fcmla z21.d, p5/m, z27.d, z16.d, 0 \n\t" \
"fcmla z19.d, p5/m, z28.d, z13.d, 0 \n\t" \
"fcmla z22.d, p5/m, z28.d, z16.d, 0 \n\t" \
"fcmla z20.d, p5/m, z29.d, z13.d, 0 \n\t" \
"fcmla z23.d, p5/m, z29.d, z16.d, 0 \n\t" \
"fcmla z18.d, p5/m, z27.d, z13.d, 90 \n\t" \
"fcmla z21.d, p5/m, z27.d, z16.d, 90 \n\t" \
"fcmla z19.d, p5/m, z28.d, z13.d, 90 \n\t" \
"fcmla z22.d, p5/m, z28.d, z16.d, 90 \n\t" \
"fcmla z20.d, p5/m, z29.d, z13.d, 90 \n\t" \
"fcmla z23.d, p5/m, z29.d, z16.d, 90 \n\t" \
"fcmla z18.d, p5/m, z24.d, z14.d, 0 \n\t" \
"fcmla z21.d, p5/m, z24.d, z17.d, 0 \n\t" \
"fcmla z19.d, p5/m, z25.d, z14.d, 0 \n\t" \
"fcmla z22.d, p5/m, z25.d, z17.d, 0 \n\t" \
"fcmla z20.d, p5/m, z26.d, z14.d, 0 \n\t" \
"fcmla z23.d, p5/m, z26.d, z17.d, 0 \n\t" \
"fcmla z18.d, p5/m, z24.d, z14.d, 90 \n\t" \
"fcmla z21.d, p5/m, z24.d, z17.d, 90 \n\t" \
"fcmla z19.d, p5/m, z25.d, z14.d, 90 \n\t" \
"fcmla z22.d, p5/m, z25.d, z17.d, 90 \n\t" \
"fcmla z20.d, p5/m, z26.d, z14.d, 90 \n\t" \
"fcmla z23.d, p5/m, z26.d, z17.d, 90 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// XP_PROJ
#define XP_PROJ_A64FXd \
{ \
asm ( \
"fcadd z12.d, p5/m, z12.d, z21.d, 90 \n\t" \
"fcadd z13.d, p5/m, z13.d, z22.d, 90 \n\t" \
"fcadd z14.d, p5/m, z14.d, z23.d, 90 \n\t" \
"fcadd z15.d, p5/m, z15.d, z18.d, 90 \n\t" \
"fcadd z16.d, p5/m, z16.d, z19.d, 90 \n\t" \
"fcadd z17.d, p5/m, z17.d, z20.d, 90 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// XP_RECON
#define XP_RECON_A64FXd \
asm ( \
"movprfx z6.d, p5/m, z31.d \n\t" \
"fcadd z6.d, p5/m, z6.d, z21.d, 270 \n\t" \
"movprfx z7.d, p5/m, z31.d \n\t" \
"fcadd z7.d, p5/m, z7.d, z22.d, 270 \n\t" \
"movprfx z8.d, p5/m, z31.d \n\t" \
"fcadd z8.d, p5/m, z8.d, z23.d, 270 \n\t" \
"movprfx z9.d, p5/m, z31.d \n\t" \
"fcadd z9.d, p5/m, z9.d, z18.d, 270 \n\t" \
"movprfx z10.d, p5/m, z31.d \n\t" \
"fcadd z10.d, p5/m, z10.d, z19.d, 270 \n\t" \
"movprfx z11.d, p5/m, z31.d \n\t" \
"fcadd z11.d, p5/m, z11.d, z20.d, 270 \n\t" \
"mov z0.d, p5/m, z18.d \n\t" \
"mov z1.d, p5/m, z19.d \n\t" \
"mov z2.d, p5/m, z20.d \n\t" \
"mov z3.d, p5/m, z21.d \n\t" \
"mov z4.d, p5/m, z22.d \n\t" \
"mov z5.d, p5/m, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// XP_RECON_ACCUM
#define XP_RECON_ACCUM_A64FXd \
asm ( \
"fcadd z9.d, p5/m, z9.d, z18.d, 270 \n\t" \
"fadd z0.d, p5/m, z0.d, z18.d \n\t" \
"fcadd z10.d, p5/m, z10.d, z19.d, 270 \n\t" \
"fadd z1.d, p5/m, z1.d, z19.d \n\t" \
"fcadd z11.d, p5/m, z11.d, z20.d, 270 \n\t" \
"fadd z2.d, p5/m, z2.d, z20.d \n\t" \
"fcadd z6.d, p5/m, z6.d, z21.d, 270 \n\t" \
"fadd z3.d, p5/m, z3.d, z21.d \n\t" \
"fcadd z7.d, p5/m, z7.d, z22.d, 270 \n\t" \
"fadd z4.d, p5/m, z4.d, z22.d \n\t" \
"fcadd z8.d, p5/m, z8.d, z23.d, 270 \n\t" \
"fadd z5.d, p5/m, z5.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// YP_PROJ
#define YP_PROJ_A64FXd \
{ \
asm ( \
"fsub z12.d, p5/m, z12.d, z21.d \n\t" \
"fsub z13.d, p5/m, z13.d, z22.d \n\t" \
"fsub z14.d, p5/m, z14.d, z23.d \n\t" \
"fadd z15.d, p5/m, z15.d, z18.d \n\t" \
"fadd z16.d, p5/m, z16.d, z19.d \n\t" \
"fadd z17.d, p5/m, z17.d, z20.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// ZP_PROJ
#define ZP_PROJ_A64FXd \
{ \
asm ( \
"fcadd z12.d, p5/m, z12.d, z18.d, 90 \n\t" \
"fcadd z13.d, p5/m, z13.d, z19.d, 90 \n\t" \
"fcadd z14.d, p5/m, z14.d, z20.d, 90 \n\t" \
"fcadd z15.d, p5/m, z15.d, z21.d, 270 \n\t" \
"fcadd z16.d, p5/m, z16.d, z22.d, 270 \n\t" \
"fcadd z17.d, p5/m, z17.d, z23.d, 270 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// TP_PROJ
#define TP_PROJ_A64FXd \
{ \
asm ( \
"fadd z12.d, p5/m, z12.d, z18.d \n\t" \
"fadd z13.d, p5/m, z13.d, z19.d \n\t" \
"fadd z14.d, p5/m, z14.d, z20.d \n\t" \
"fadd z15.d, p5/m, z15.d, z21.d \n\t" \
"fadd z16.d, p5/m, z16.d, z22.d \n\t" \
"fadd z17.d, p5/m, z17.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// XM_PROJ
#define XM_PROJ_A64FXd \
{ \
asm ( \
"fcadd z12.d, p5/m, z12.d, z21.d, 270 \n\t" \
"fcadd z13.d, p5/m, z13.d, z22.d, 270 \n\t" \
"fcadd z14.d, p5/m, z14.d, z23.d, 270 \n\t" \
"fcadd z15.d, p5/m, z15.d, z18.d, 270 \n\t" \
"fcadd z16.d, p5/m, z16.d, z19.d, 270 \n\t" \
"fcadd z17.d, p5/m, z17.d, z20.d, 270 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// XM_RECON
#define XM_RECON_A64FXd \
asm ( \
"movprfx z6.d, p5/m, z31.d \n\t" \
"fcadd z6.d, p5/m, z6.d, z21.d, 90 \n\t" \
"movprfx z7.d, p5/m, z31.d \n\t" \
"fcadd z7.d, p5/m, z7.d, z22.d, 90 \n\t" \
"movprfx z8.d, p5/m, z31.d \n\t" \
"fcadd z8.d, p5/m, z8.d, z23.d, 90 \n\t" \
"movprfx z9.d, p5/m, z31.d \n\t" \
"fcadd z9.d, p5/m, z9.d, z18.d, 90 \n\t" \
"movprfx z10.d, p5/m, z31.d \n\t" \
"fcadd z10.d, p5/m, z10.d, z19.d, 90 \n\t" \
"movprfx z11.d, p5/m, z31.d \n\t" \
"fcadd z11.d, p5/m, z11.d, z20.d, 90 \n\t" \
"mov z0.d, p5/m, z18.d \n\t" \
"mov z1.d, p5/m, z19.d \n\t" \
"mov z2.d, p5/m, z20.d \n\t" \
"mov z3.d, p5/m, z21.d \n\t" \
"mov z4.d, p5/m, z22.d \n\t" \
"mov z5.d, p5/m, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// YM_PROJ
#define YM_PROJ_A64FXd \
{ \
asm ( \
"fadd z12.d, p5/m, z12.d, z21.d \n\t" \
"fadd z13.d, p5/m, z13.d, z22.d \n\t" \
"fadd z14.d, p5/m, z14.d, z23.d \n\t" \
"fsub z15.d, p5/m, z15.d, z18.d \n\t" \
"fsub z16.d, p5/m, z16.d, z19.d \n\t" \
"fsub z17.d, p5/m, z17.d, z20.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// ZM_PROJ
#define ZM_PROJ_A64FXd \
{ \
asm ( \
"fcadd z12.d, p5/m, z12.d, z18.d, 270 \n\t" \
"fcadd z13.d, p5/m, z13.d, z19.d, 270 \n\t" \
"fcadd z14.d, p5/m, z14.d, z20.d, 270 \n\t" \
"fcadd z15.d, p5/m, z15.d, z21.d, 90 \n\t" \
"fcadd z16.d, p5/m, z16.d, z22.d, 90 \n\t" \
"fcadd z17.d, p5/m, z17.d, z23.d, 90 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// TM_PROJ
#define TM_PROJ_A64FXd \
{ \
asm ( \
"ptrue p5.d \n\t" \
"fsub z12.d, p5/m, z12.d, z18.d \n\t" \
"fsub z13.d, p5/m, z13.d, z19.d \n\t" \
"fsub z14.d, p5/m, z14.d, z20.d \n\t" \
"fsub z15.d, p5/m, z15.d, z21.d \n\t" \
"fsub z16.d, p5/m, z16.d, z22.d \n\t" \
"fsub z17.d, p5/m, z17.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// XM_RECON_ACCUM
#define XM_RECON_ACCUM_A64FXd \
asm ( \
"fcadd z9.d, p5/m, z9.d, z18.d, 90 \n\t" \
"fcadd z10.d, p5/m, z10.d, z19.d, 90 \n\t" \
"fcadd z11.d, p5/m, z11.d, z20.d, 90 \n\t" \
"fcadd z6.d, p5/m, z6.d, z21.d, 90 \n\t" \
"fcadd z7.d, p5/m, z7.d, z22.d, 90 \n\t" \
"fcadd z8.d, p5/m, z8.d, z23.d, 90 \n\t" \
"fadd z0.d, p5/m, z0.d, z18.d \n\t" \
"fadd z1.d, p5/m, z1.d, z19.d \n\t" \
"fadd z2.d, p5/m, z2.d, z20.d \n\t" \
"fadd z3.d, p5/m, z3.d, z21.d \n\t" \
"fadd z4.d, p5/m, z4.d, z22.d \n\t" \
"fadd z5.d, p5/m, z5.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// YP_RECON_ACCUM
#define YP_RECON_ACCUM_A64FXd \
asm ( \
"fadd z0.d, p5/m, z0.d, z18.d \n\t" \
"fsub z9.d, p5/m, z9.d, z18.d \n\t" \
"fadd z1.d, p5/m, z1.d, z19.d \n\t" \
"fsub z10.d, p5/m, z10.d, z19.d \n\t" \
"fadd z2.d, p5/m, z2.d, z20.d \n\t" \
"fsub z11.d, p5/m, z11.d, z20.d \n\t" \
"fadd z3.d, p5/m, z3.d, z21.d \n\t" \
"fadd z6.d, p5/m, z6.d, z21.d \n\t" \
"fadd z4.d, p5/m, z4.d, z22.d \n\t" \
"fadd z7.d, p5/m, z7.d, z22.d \n\t" \
"fadd z5.d, p5/m, z5.d, z23.d \n\t" \
"fadd z8.d, p5/m, z8.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// YM_RECON_ACCUM
#define YM_RECON_ACCUM_A64FXd \
asm ( \
"fadd z0.d, p5/m, z0.d, z18.d \n\t" \
"fadd z9.d, p5/m, z9.d, z18.d \n\t" \
"fadd z1.d, p5/m, z1.d, z19.d \n\t" \
"fadd z10.d, p5/m, z10.d, z19.d \n\t" \
"fadd z2.d, p5/m, z2.d, z20.d \n\t" \
"fadd z11.d, p5/m, z11.d, z20.d \n\t" \
"fadd z3.d, p5/m, z3.d, z21.d \n\t" \
"fsub z6.d, p5/m, z6.d, z21.d \n\t" \
"fadd z4.d, p5/m, z4.d, z22.d \n\t" \
"fsub z7.d, p5/m, z7.d, z22.d \n\t" \
"fadd z5.d, p5/m, z5.d, z23.d \n\t" \
"fsub z8.d, p5/m, z8.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// ZP_RECON_ACCUM
#define ZP_RECON_ACCUM_A64FXd \
asm ( \
"fcadd z6.d, p5/m, z6.d, z18.d, 270 \n\t" \
"fadd z0.d, p5/m, z0.d, z18.d \n\t" \
"fcadd z7.d, p5/m, z7.d, z19.d, 270 \n\t" \
"fadd z1.d, p5/m, z1.d, z19.d \n\t" \
"fcadd z8.d, p5/m, z8.d, z20.d, 270 \n\t" \
"fadd z2.d, p5/m, z2.d, z20.d \n\t" \
"fcadd z9.d, p5/m, z9.d, z21.d, 90 \n\t" \
"fadd z3.d, p5/m, z3.d, z21.d \n\t" \
"fcadd z10.d, p5/m, z10.d, z22.d, 90 \n\t" \
"fadd z4.d, p5/m, z4.d, z22.d \n\t" \
"fcadd z11.d, p5/m, z11.d, z23.d, 90 \n\t" \
"fadd z5.d, p5/m, z5.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// ZM_RECON_ACCUM
#define ZM_RECON_ACCUM_A64FXd \
asm ( \
"fcadd z6.d, p5/m, z6.d, z18.d, 90 \n\t" \
"fadd z0.d, p5/m, z0.d, z18.d \n\t" \
"fcadd z7.d, p5/m, z7.d, z19.d, 90 \n\t" \
"fadd z1.d, p5/m, z1.d, z19.d \n\t" \
"fcadd z8.d, p5/m, z8.d, z20.d, 90 \n\t" \
"fadd z2.d, p5/m, z2.d, z20.d \n\t" \
"fcadd z9.d, p5/m, z9.d, z21.d, 270 \n\t" \
"fadd z3.d, p5/m, z3.d, z21.d \n\t" \
"fcadd z10.d, p5/m, z10.d, z22.d, 270 \n\t" \
"fadd z4.d, p5/m, z4.d, z22.d \n\t" \
"fcadd z11.d, p5/m, z11.d, z23.d, 270 \n\t" \
"fadd z5.d, p5/m, z5.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// TP_RECON_ACCUM
#define TP_RECON_ACCUM_A64FXd \
asm ( \
"fadd z0.d, p5/m, z0.d, z18.d \n\t" \
"fadd z6.d, p5/m, z6.d, z18.d \n\t" \
"fadd z1.d, p5/m, z1.d, z19.d \n\t" \
"fadd z7.d, p5/m, z7.d, z19.d \n\t" \
"fadd z2.d, p5/m, z2.d, z20.d \n\t" \
"fadd z8.d, p5/m, z8.d, z20.d \n\t" \
"fadd z3.d, p5/m, z3.d, z21.d \n\t" \
"fadd z9.d, p5/m, z9.d, z21.d \n\t" \
"fadd z4.d, p5/m, z4.d, z22.d \n\t" \
"fadd z10.d, p5/m, z10.d, z22.d \n\t" \
"fadd z5.d, p5/m, z5.d, z23.d \n\t" \
"fadd z11.d, p5/m, z11.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// TM_RECON_ACCUM
#define TM_RECON_ACCUM_A64FXd \
asm ( \
"fadd z0.d, p5/m, z0.d, z18.d \n\t" \
"fsub z6.d, p5/m, z6.d, z18.d \n\t" \
"fadd z1.d, p5/m, z1.d, z19.d \n\t" \
"fsub z7.d, p5/m, z7.d, z19.d \n\t" \
"fadd z2.d, p5/m, z2.d, z20.d \n\t" \
"fsub z8.d, p5/m, z8.d, z20.d \n\t" \
"fadd z3.d, p5/m, z3.d, z21.d \n\t" \
"fsub z9.d, p5/m, z9.d, z21.d \n\t" \
"fadd z4.d, p5/m, z4.d, z22.d \n\t" \
"fsub z10.d, p5/m, z10.d, z22.d \n\t" \
"fadd z5.d, p5/m, z5.d, z23.d \n\t" \
"fsub z11.d, p5/m, z11.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// ZERO_PSI
#define ZERO_PSI_A64FXd \
asm ( \
"ptrue p5.d \n\t" \
"fmov z0.d , 0 \n\t" \
"fmov z1.d , 0 \n\t" \
"fmov z2.d , 0 \n\t" \
"fmov z3.d , 0 \n\t" \
"fmov z4.d , 0 \n\t" \
"fmov z5.d , 0 \n\t" \
"fmov z6.d , 0 \n\t" \
"fmov z7.d , 0 \n\t" \
"fmov z8.d , 0 \n\t" \
"fmov z9.d , 0 \n\t" \
"fmov z10.d , 0 \n\t" \
"fmov z11.d , 0 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base) \
{ \
asm ( \
"prfd PSTL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PSTL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PSTL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
: \
: [fetchptr] "r" (base) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base) \
{ \
asm ( \
"prfd PSTL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PSTL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PSTL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
: \
: [fetchptr] "r" (base) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// ADD_RESULT_INTERNAL
#define ADD_RESULT_INTERNAL_A64FXd \
asm ( \
"fadd z0.d, p5/m, z0.d, z12.d \n\t" \
"fadd z1.d, p5/m, z1.d, z13.d \n\t" \
"fadd z2.d, p5/m, z2.d, z14.d \n\t" \
"fadd z3.d, p5/m, z3.d, z15.d \n\t" \
"fadd z4.d, p5/m, z4.d, z16.d \n\t" \
"fadd z5.d, p5/m, z5.d, z17.d \n\t" \
"fadd z6.d, p5/m, z6.d, z18.d \n\t" \
"fadd z7.d, p5/m, z7.d, z19.d \n\t" \
"fadd z8.d, p5/m, z8.d, z20.d \n\t" \
"fadd z9.d, p5/m, z9.d, z21.d \n\t" \
"fadd z10.d, p5/m, z10.d, z22.d \n\t" \
"fadd z11.d, p5/m, z11.d, z23.d \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);

View File

@ -0,0 +1,779 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Fujitsu_A64FX_asm_single.h
Copyright (C) 2020
Author: Nils Meyer <nils.meyer@ur.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#define LOAD_CHIMU(base) LOAD_CHIMU_INTERLEAVED_A64FXf(base)
#define PREFETCH_CHIMU_L1(A) PREFETCH_CHIMU_L1_INTERNAL_A64FXf(A)
#define PREFETCH_GAUGE_L1(A) PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)
#define PREFETCH_CHIMU_L2(A) PREFETCH_CHIMU_L2_INTERNAL_A64FXf(A)
#define PREFETCH_GAUGE_L2(A) PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)
#define PF_GAUGE(A)
#define PREFETCH_RESULT_L2_STORE(A) PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(A)
#define PREFETCH_RESULT_L1_STORE(A) PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(A)
#define PREFETCH1_CHIMU(A) PREFETCH_CHIMU_L1(A)
#define PREFETCH_CHIMU(A) PREFETCH_CHIMU_L1(A)
#define LOCK_GAUGE(A)
#define UNLOCK_GAUGE(A)
#define MASK_REGS DECLARATIONS_A64FXf
#define SAVE_RESULT(A,B) RESULT_A64FXf(A); PREFETCH_RESULT_L2_STORE(B)
#define MULT_2SPIN_1(Dir) MULT_2SPIN_1_A64FXf(Dir)
#define MULT_2SPIN_2 MULT_2SPIN_2_A64FXf
#define LOAD_CHI(base) LOAD_CHI_A64FXf(base)
#define ADD_RESULT(base,basep) LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)
#define XP_PROJ XP_PROJ_A64FXf
#define YP_PROJ YP_PROJ_A64FXf
#define ZP_PROJ ZP_PROJ_A64FXf
#define TP_PROJ TP_PROJ_A64FXf
#define XM_PROJ XM_PROJ_A64FXf
#define YM_PROJ YM_PROJ_A64FXf
#define ZM_PROJ ZM_PROJ_A64FXf
#define TM_PROJ TM_PROJ_A64FXf
#define XP_RECON XP_RECON_A64FXf
#define XM_RECON XM_RECON_A64FXf
#define XM_RECON_ACCUM XM_RECON_ACCUM_A64FXf
#define YM_RECON_ACCUM YM_RECON_ACCUM_A64FXf
#define ZM_RECON_ACCUM ZM_RECON_ACCUM_A64FXf
#define TM_RECON_ACCUM TM_RECON_ACCUM_A64FXf
#define XP_RECON_ACCUM XP_RECON_ACCUM_A64FXf
#define YP_RECON_ACCUM YP_RECON_ACCUM_A64FXf
#define ZP_RECON_ACCUM ZP_RECON_ACCUM_A64FXf
#define TP_RECON_ACCUM TP_RECON_ACCUM_A64FXf
#define PERMUTE_DIR0 0
#define PERMUTE_DIR1 1
#define PERMUTE_DIR2 2
#define PERMUTE_DIR3 3
#define PERMUTE PERMUTE_A64FXf;
#define LOAD_TABLE(Dir) if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1 } else if (Dir == 2) { LOAD_TABLE2; } else if (Dir == 3) { LOAD_TABLE3; }
#define MAYBEPERM(A,perm) if (perm) { PERMUTE; }
// DECLARATIONS
#define DECLARATIONS_A64FXf \
const uint32_t lut[4][16] = { \
{8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \
{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \
{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, \
{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14} }; \
asm ( \
"fmov z31.s , 0 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// RESULT
#define RESULT_A64FXf(base) \
{ \
asm ( \
"str z0, [%[storeptr], -6, mul vl] \n\t" \
"str z1, [%[storeptr], -5, mul vl] \n\t" \
"str z2, [%[storeptr], -4, mul vl] \n\t" \
"str z3, [%[storeptr], -3, mul vl] \n\t" \
"str z4, [%[storeptr], -2, mul vl] \n\t" \
"str z5, [%[storeptr], -1, mul vl] \n\t" \
"str z6, [%[storeptr], 0, mul vl] \n\t" \
"str z7, [%[storeptr], 1, mul vl] \n\t" \
"str z8, [%[storeptr], 2, mul vl] \n\t" \
"str z9, [%[storeptr], 3, mul vl] \n\t" \
"str z10, [%[storeptr], 4, mul vl] \n\t" \
"str z11, [%[storeptr], 5, mul vl] \n\t" \
: \
: [storeptr] "r" (base + 2 * 3 * 64) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// PREFETCH_CHIMU_L2 (prefetch to L2)
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base) \
{ \
asm ( \
"prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
: \
: [fetchptr] "r" (base) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// PREFETCH_CHIMU_L1 (prefetch to L1)
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXf(base) \
{ \
asm ( \
"prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
: \
: [fetchptr] "r" (base) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// PREFETCH_GAUGE_L2 (prefetch to L2)
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A) \
{ \
const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
asm ( \
"prfd PLDL2STRM, p5, [%[fetchptr], -4, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 12, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 16, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 20, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 24, mul vl] \n\t" \
"prfd PLDL2STRM, p5, [%[fetchptr], 28, mul vl] \n\t" \
: \
: [fetchptr] "r" (baseU) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// PREFETCH_GAUGE_L1 (prefetch to L1)
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A) \
{ \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
asm ( \
"prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
: \
: [fetchptr] "r" (baseU) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// LOAD_CHI
#define LOAD_CHI_A64FXf(base) \
{ \
asm ( \
"ldr z12, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z13, [%[fetchptr], 1, mul vl] \n\t" \
"ldr z14, [%[fetchptr], 2, mul vl] \n\t" \
"ldr z15, [%[fetchptr], 3, mul vl] \n\t" \
"ldr z16, [%[fetchptr], 4, mul vl] \n\t" \
"ldr z17, [%[fetchptr], 5, mul vl] \n\t" \
: \
: [fetchptr] "r" (base) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// LOAD_CHIMU
#define LOAD_CHIMU_INTERLEAVED_A64FXf(base) \
{ \
asm ( \
"ptrue p5.s \n\t" \
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
: \
: [fetchptr] "r" (base + 2 * 3 * 64) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// LOAD_CHIMU_0213
#define LOAD_CHIMU_0213_A64FXf \
{ \
const SiteSpinor & ref(in[offset]); \
asm ( \
"ptrue p5.s \n\t" \
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
: \
: [fetchptr] "r" (&ref[2][0]) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// LOAD_CHIMU_0312
#define LOAD_CHIMU_0312_A64FXf \
{ \
const SiteSpinor & ref(in[offset]); \
asm ( \
"ptrue p5.s \n\t" \
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
: \
: [fetchptr] "r" (&ref[2][0]) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// LOAD_TABLE0
#define LOAD_TABLE0 \
asm ( \
"ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
: \
: [tableptr] "r" (&lut[0]),[index] "i" (0) \
: "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// LOAD_TABLE1
#define LOAD_TABLE1 \
asm ( \
"ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
: \
: [tableptr] "r" (&lut[0]),[index] "i" (1) \
: "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// LOAD_TABLE2
#define LOAD_TABLE2 \
asm ( \
"ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
: \
: [tableptr] "r" (&lut[0]),[index] "i" (2) \
: "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// LOAD_TABLE3
#define LOAD_TABLE3 \
asm ( \
"ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
: \
: [tableptr] "r" (&lut[0]),[index] "i" (3) \
: "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// PERMUTE
#define PERMUTE_A64FXf \
asm ( \
"tbl z12.s, { z12.s }, z30.s \n\t" \
"tbl z13.s, { z13.s }, z30.s \n\t" \
"tbl z14.s, { z14.s }, z30.s \n\t" \
"tbl z15.s, { z15.s }, z30.s \n\t" \
"tbl z16.s, { z16.s }, z30.s \n\t" \
"tbl z17.s, { z17.s }, z30.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// LOAD_GAUGE
#define LOAD_GAUGE \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
{ \
asm ( \
"ptrue p5.s \n\t" \
"ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
"ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
"ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
"ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
"ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
: \
: [fetchptr] "r" (baseU + 2 * 3 * 64) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// MULT_2SPIN
#define MULT_2SPIN_1_A64FXf(A) \
{ \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
asm ( \
"ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
"ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
"ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
"ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
"ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
"ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
"movprfx z18.s, p5/m, z31.s \n\t" \
"fcmla z18.s, p5/m, z24.s, z12.s, 0 \n\t" \
"movprfx z21.s, p5/m, z31.s \n\t" \
"fcmla z21.s, p5/m, z24.s, z15.s, 0 \n\t" \
"movprfx z19.s, p5/m, z31.s \n\t" \
"fcmla z19.s, p5/m, z25.s, z12.s, 0 \n\t" \
"movprfx z22.s, p5/m, z31.s \n\t" \
"fcmla z22.s, p5/m, z25.s, z15.s, 0 \n\t" \
"movprfx z20.s, p5/m, z31.s \n\t" \
"fcmla z20.s, p5/m, z26.s, z12.s, 0 \n\t" \
"movprfx z23.s, p5/m, z31.s \n\t" \
"fcmla z23.s, p5/m, z26.s, z15.s, 0 \n\t" \
"fcmla z18.s, p5/m, z24.s, z12.s, 90 \n\t" \
"fcmla z21.s, p5/m, z24.s, z15.s, 90 \n\t" \
"fcmla z19.s, p5/m, z25.s, z12.s, 90 \n\t" \
"fcmla z22.s, p5/m, z25.s, z15.s, 90 \n\t" \
"fcmla z20.s, p5/m, z26.s, z12.s, 90 \n\t" \
"fcmla z23.s, p5/m, z26.s, z15.s, 90 \n\t" \
"ldr z24, [%[fetchptr], -4, mul vl] \n\t" \
"ldr z25, [%[fetchptr], -1, mul vl] \n\t" \
"ldr z26, [%[fetchptr], 2, mul vl] \n\t" \
: \
: [fetchptr] "r" (baseU + 2 * 3 * 64) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// MULT_2SPIN_BACKEND
#define MULT_2SPIN_2_A64FXf \
{ \
asm ( \
"fcmla z18.s, p5/m, z27.s, z13.s, 0 \n\t" \
"fcmla z21.s, p5/m, z27.s, z16.s, 0 \n\t" \
"fcmla z19.s, p5/m, z28.s, z13.s, 0 \n\t" \
"fcmla z22.s, p5/m, z28.s, z16.s, 0 \n\t" \
"fcmla z20.s, p5/m, z29.s, z13.s, 0 \n\t" \
"fcmla z23.s, p5/m, z29.s, z16.s, 0 \n\t" \
"fcmla z18.s, p5/m, z27.s, z13.s, 90 \n\t" \
"fcmla z21.s, p5/m, z27.s, z16.s, 90 \n\t" \
"fcmla z19.s, p5/m, z28.s, z13.s, 90 \n\t" \
"fcmla z22.s, p5/m, z28.s, z16.s, 90 \n\t" \
"fcmla z20.s, p5/m, z29.s, z13.s, 90 \n\t" \
"fcmla z23.s, p5/m, z29.s, z16.s, 90 \n\t" \
"fcmla z18.s, p5/m, z24.s, z14.s, 0 \n\t" \
"fcmla z21.s, p5/m, z24.s, z17.s, 0 \n\t" \
"fcmla z19.s, p5/m, z25.s, z14.s, 0 \n\t" \
"fcmla z22.s, p5/m, z25.s, z17.s, 0 \n\t" \
"fcmla z20.s, p5/m, z26.s, z14.s, 0 \n\t" \
"fcmla z23.s, p5/m, z26.s, z17.s, 0 \n\t" \
"fcmla z18.s, p5/m, z24.s, z14.s, 90 \n\t" \
"fcmla z21.s, p5/m, z24.s, z17.s, 90 \n\t" \
"fcmla z19.s, p5/m, z25.s, z14.s, 90 \n\t" \
"fcmla z22.s, p5/m, z25.s, z17.s, 90 \n\t" \
"fcmla z20.s, p5/m, z26.s, z14.s, 90 \n\t" \
"fcmla z23.s, p5/m, z26.s, z17.s, 90 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// XP_PROJ
#define XP_PROJ_A64FXf \
{ \
asm ( \
"fcadd z12.s, p5/m, z12.s, z21.s, 90 \n\t" \
"fcadd z13.s, p5/m, z13.s, z22.s, 90 \n\t" \
"fcadd z14.s, p5/m, z14.s, z23.s, 90 \n\t" \
"fcadd z15.s, p5/m, z15.s, z18.s, 90 \n\t" \
"fcadd z16.s, p5/m, z16.s, z19.s, 90 \n\t" \
"fcadd z17.s, p5/m, z17.s, z20.s, 90 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// XP_RECON
#define XP_RECON_A64FXf \
asm ( \
"movprfx z6.s, p5/m, z31.s \n\t" \
"fcadd z6.s, p5/m, z6.s, z21.s, 270 \n\t" \
"movprfx z7.s, p5/m, z31.s \n\t" \
"fcadd z7.s, p5/m, z7.s, z22.s, 270 \n\t" \
"movprfx z8.s, p5/m, z31.s \n\t" \
"fcadd z8.s, p5/m, z8.s, z23.s, 270 \n\t" \
"movprfx z9.s, p5/m, z31.s \n\t" \
"fcadd z9.s, p5/m, z9.s, z18.s, 270 \n\t" \
"movprfx z10.s, p5/m, z31.s \n\t" \
"fcadd z10.s, p5/m, z10.s, z19.s, 270 \n\t" \
"movprfx z11.s, p5/m, z31.s \n\t" \
"fcadd z11.s, p5/m, z11.s, z20.s, 270 \n\t" \
"mov z0.s, p5/m, z18.s \n\t" \
"mov z1.s, p5/m, z19.s \n\t" \
"mov z2.s, p5/m, z20.s \n\t" \
"mov z3.s, p5/m, z21.s \n\t" \
"mov z4.s, p5/m, z22.s \n\t" \
"mov z5.s, p5/m, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// XP_RECON_ACCUM
#define XP_RECON_ACCUM_A64FXf \
asm ( \
"fcadd z9.s, p5/m, z9.s, z18.s, 270 \n\t" \
"fadd z0.s, p5/m, z0.s, z18.s \n\t" \
"fcadd z10.s, p5/m, z10.s, z19.s, 270 \n\t" \
"fadd z1.s, p5/m, z1.s, z19.s \n\t" \
"fcadd z11.s, p5/m, z11.s, z20.s, 270 \n\t" \
"fadd z2.s, p5/m, z2.s, z20.s \n\t" \
"fcadd z6.s, p5/m, z6.s, z21.s, 270 \n\t" \
"fadd z3.s, p5/m, z3.s, z21.s \n\t" \
"fcadd z7.s, p5/m, z7.s, z22.s, 270 \n\t" \
"fadd z4.s, p5/m, z4.s, z22.s \n\t" \
"fcadd z8.s, p5/m, z8.s, z23.s, 270 \n\t" \
"fadd z5.s, p5/m, z5.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// YP_PROJ
#define YP_PROJ_A64FXf \
{ \
asm ( \
"fsub z12.s, p5/m, z12.s, z21.s \n\t" \
"fsub z13.s, p5/m, z13.s, z22.s \n\t" \
"fsub z14.s, p5/m, z14.s, z23.s \n\t" \
"fadd z15.s, p5/m, z15.s, z18.s \n\t" \
"fadd z16.s, p5/m, z16.s, z19.s \n\t" \
"fadd z17.s, p5/m, z17.s, z20.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// ZP_PROJ
#define ZP_PROJ_A64FXf \
{ \
asm ( \
"fcadd z12.s, p5/m, z12.s, z18.s, 90 \n\t" \
"fcadd z13.s, p5/m, z13.s, z19.s, 90 \n\t" \
"fcadd z14.s, p5/m, z14.s, z20.s, 90 \n\t" \
"fcadd z15.s, p5/m, z15.s, z21.s, 270 \n\t" \
"fcadd z16.s, p5/m, z16.s, z22.s, 270 \n\t" \
"fcadd z17.s, p5/m, z17.s, z23.s, 270 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// TP_PROJ
#define TP_PROJ_A64FXf \
{ \
asm ( \
"fadd z12.s, p5/m, z12.s, z18.s \n\t" \
"fadd z13.s, p5/m, z13.s, z19.s \n\t" \
"fadd z14.s, p5/m, z14.s, z20.s \n\t" \
"fadd z15.s, p5/m, z15.s, z21.s \n\t" \
"fadd z16.s, p5/m, z16.s, z22.s \n\t" \
"fadd z17.s, p5/m, z17.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// XM_PROJ
#define XM_PROJ_A64FXf \
{ \
asm ( \
"fcadd z12.s, p5/m, z12.s, z21.s, 270 \n\t" \
"fcadd z13.s, p5/m, z13.s, z22.s, 270 \n\t" \
"fcadd z14.s, p5/m, z14.s, z23.s, 270 \n\t" \
"fcadd z15.s, p5/m, z15.s, z18.s, 270 \n\t" \
"fcadd z16.s, p5/m, z16.s, z19.s, 270 \n\t" \
"fcadd z17.s, p5/m, z17.s, z20.s, 270 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// XM_RECON
#define XM_RECON_A64FXf \
asm ( \
"movprfx z6.s, p5/m, z31.s \n\t" \
"fcadd z6.s, p5/m, z6.s, z21.s, 90 \n\t" \
"movprfx z7.s, p5/m, z31.s \n\t" \
"fcadd z7.s, p5/m, z7.s, z22.s, 90 \n\t" \
"movprfx z8.s, p5/m, z31.s \n\t" \
"fcadd z8.s, p5/m, z8.s, z23.s, 90 \n\t" \
"movprfx z9.s, p5/m, z31.s \n\t" \
"fcadd z9.s, p5/m, z9.s, z18.s, 90 \n\t" \
"movprfx z10.s, p5/m, z31.s \n\t" \
"fcadd z10.s, p5/m, z10.s, z19.s, 90 \n\t" \
"movprfx z11.s, p5/m, z31.s \n\t" \
"fcadd z11.s, p5/m, z11.s, z20.s, 90 \n\t" \
"mov z0.s, p5/m, z18.s \n\t" \
"mov z1.s, p5/m, z19.s \n\t" \
"mov z2.s, p5/m, z20.s \n\t" \
"mov z3.s, p5/m, z21.s \n\t" \
"mov z4.s, p5/m, z22.s \n\t" \
"mov z5.s, p5/m, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// YM_PROJ
#define YM_PROJ_A64FXf \
{ \
asm ( \
"fadd z12.s, p5/m, z12.s, z21.s \n\t" \
"fadd z13.s, p5/m, z13.s, z22.s \n\t" \
"fadd z14.s, p5/m, z14.s, z23.s \n\t" \
"fsub z15.s, p5/m, z15.s, z18.s \n\t" \
"fsub z16.s, p5/m, z16.s, z19.s \n\t" \
"fsub z17.s, p5/m, z17.s, z20.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// ZM_PROJ
#define ZM_PROJ_A64FXf \
{ \
asm ( \
"fcadd z12.s, p5/m, z12.s, z18.s, 270 \n\t" \
"fcadd z13.s, p5/m, z13.s, z19.s, 270 \n\t" \
"fcadd z14.s, p5/m, z14.s, z20.s, 270 \n\t" \
"fcadd z15.s, p5/m, z15.s, z21.s, 90 \n\t" \
"fcadd z16.s, p5/m, z16.s, z22.s, 90 \n\t" \
"fcadd z17.s, p5/m, z17.s, z23.s, 90 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// TM_PROJ
#define TM_PROJ_A64FXf \
{ \
asm ( \
"ptrue p5.s \n\t" \
"fsub z12.s, p5/m, z12.s, z18.s \n\t" \
"fsub z13.s, p5/m, z13.s, z19.s \n\t" \
"fsub z14.s, p5/m, z14.s, z20.s \n\t" \
"fsub z15.s, p5/m, z15.s, z21.s \n\t" \
"fsub z16.s, p5/m, z16.s, z22.s \n\t" \
"fsub z17.s, p5/m, z17.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
); \
}
// XM_RECON_ACCUM
#define XM_RECON_ACCUM_A64FXf \
asm ( \
"fcadd z9.s, p5/m, z9.s, z18.s, 90 \n\t" \
"fcadd z10.s, p5/m, z10.s, z19.s, 90 \n\t" \
"fcadd z11.s, p5/m, z11.s, z20.s, 90 \n\t" \
"fcadd z6.s, p5/m, z6.s, z21.s, 90 \n\t" \
"fcadd z7.s, p5/m, z7.s, z22.s, 90 \n\t" \
"fcadd z8.s, p5/m, z8.s, z23.s, 90 \n\t" \
"fadd z0.s, p5/m, z0.s, z18.s \n\t" \
"fadd z1.s, p5/m, z1.s, z19.s \n\t" \
"fadd z2.s, p5/m, z2.s, z20.s \n\t" \
"fadd z3.s, p5/m, z3.s, z21.s \n\t" \
"fadd z4.s, p5/m, z4.s, z22.s \n\t" \
"fadd z5.s, p5/m, z5.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// YP_RECON_ACCUM
#define YP_RECON_ACCUM_A64FXf \
asm ( \
"fadd z0.s, p5/m, z0.s, z18.s \n\t" \
"fsub z9.s, p5/m, z9.s, z18.s \n\t" \
"fadd z1.s, p5/m, z1.s, z19.s \n\t" \
"fsub z10.s, p5/m, z10.s, z19.s \n\t" \
"fadd z2.s, p5/m, z2.s, z20.s \n\t" \
"fsub z11.s, p5/m, z11.s, z20.s \n\t" \
"fadd z3.s, p5/m, z3.s, z21.s \n\t" \
"fadd z6.s, p5/m, z6.s, z21.s \n\t" \
"fadd z4.s, p5/m, z4.s, z22.s \n\t" \
"fadd z7.s, p5/m, z7.s, z22.s \n\t" \
"fadd z5.s, p5/m, z5.s, z23.s \n\t" \
"fadd z8.s, p5/m, z8.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// YM_RECON_ACCUM
#define YM_RECON_ACCUM_A64FXf \
asm ( \
"fadd z0.s, p5/m, z0.s, z18.s \n\t" \
"fadd z9.s, p5/m, z9.s, z18.s \n\t" \
"fadd z1.s, p5/m, z1.s, z19.s \n\t" \
"fadd z10.s, p5/m, z10.s, z19.s \n\t" \
"fadd z2.s, p5/m, z2.s, z20.s \n\t" \
"fadd z11.s, p5/m, z11.s, z20.s \n\t" \
"fadd z3.s, p5/m, z3.s, z21.s \n\t" \
"fsub z6.s, p5/m, z6.s, z21.s \n\t" \
"fadd z4.s, p5/m, z4.s, z22.s \n\t" \
"fsub z7.s, p5/m, z7.s, z22.s \n\t" \
"fadd z5.s, p5/m, z5.s, z23.s \n\t" \
"fsub z8.s, p5/m, z8.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// ZP_RECON_ACCUM
#define ZP_RECON_ACCUM_A64FXf \
asm ( \
"fcadd z6.s, p5/m, z6.s, z18.s, 270 \n\t" \
"fadd z0.s, p5/m, z0.s, z18.s \n\t" \
"fcadd z7.s, p5/m, z7.s, z19.s, 270 \n\t" \
"fadd z1.s, p5/m, z1.s, z19.s \n\t" \
"fcadd z8.s, p5/m, z8.s, z20.s, 270 \n\t" \
"fadd z2.s, p5/m, z2.s, z20.s \n\t" \
"fcadd z9.s, p5/m, z9.s, z21.s, 90 \n\t" \
"fadd z3.s, p5/m, z3.s, z21.s \n\t" \
"fcadd z10.s, p5/m, z10.s, z22.s, 90 \n\t" \
"fadd z4.s, p5/m, z4.s, z22.s \n\t" \
"fcadd z11.s, p5/m, z11.s, z23.s, 90 \n\t" \
"fadd z5.s, p5/m, z5.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// ZM_RECON_ACCUM
#define ZM_RECON_ACCUM_A64FXf \
asm ( \
"fcadd z6.s, p5/m, z6.s, z18.s, 90 \n\t" \
"fadd z0.s, p5/m, z0.s, z18.s \n\t" \
"fcadd z7.s, p5/m, z7.s, z19.s, 90 \n\t" \
"fadd z1.s, p5/m, z1.s, z19.s \n\t" \
"fcadd z8.s, p5/m, z8.s, z20.s, 90 \n\t" \
"fadd z2.s, p5/m, z2.s, z20.s \n\t" \
"fcadd z9.s, p5/m, z9.s, z21.s, 270 \n\t" \
"fadd z3.s, p5/m, z3.s, z21.s \n\t" \
"fcadd z10.s, p5/m, z10.s, z22.s, 270 \n\t" \
"fadd z4.s, p5/m, z4.s, z22.s \n\t" \
"fcadd z11.s, p5/m, z11.s, z23.s, 270 \n\t" \
"fadd z5.s, p5/m, z5.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// TP_RECON_ACCUM
#define TP_RECON_ACCUM_A64FXf \
asm ( \
"fadd z0.s, p5/m, z0.s, z18.s \n\t" \
"fadd z6.s, p5/m, z6.s, z18.s \n\t" \
"fadd z1.s, p5/m, z1.s, z19.s \n\t" \
"fadd z7.s, p5/m, z7.s, z19.s \n\t" \
"fadd z2.s, p5/m, z2.s, z20.s \n\t" \
"fadd z8.s, p5/m, z8.s, z20.s \n\t" \
"fadd z3.s, p5/m, z3.s, z21.s \n\t" \
"fadd z9.s, p5/m, z9.s, z21.s \n\t" \
"fadd z4.s, p5/m, z4.s, z22.s \n\t" \
"fadd z10.s, p5/m, z10.s, z22.s \n\t" \
"fadd z5.s, p5/m, z5.s, z23.s \n\t" \
"fadd z11.s, p5/m, z11.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// TM_RECON_ACCUM
#define TM_RECON_ACCUM_A64FXf \
asm ( \
"fadd z0.s, p5/m, z0.s, z18.s \n\t" \
"fsub z6.s, p5/m, z6.s, z18.s \n\t" \
"fadd z1.s, p5/m, z1.s, z19.s \n\t" \
"fsub z7.s, p5/m, z7.s, z19.s \n\t" \
"fadd z2.s, p5/m, z2.s, z20.s \n\t" \
"fsub z8.s, p5/m, z8.s, z20.s \n\t" \
"fadd z3.s, p5/m, z3.s, z21.s \n\t" \
"fsub z9.s, p5/m, z9.s, z21.s \n\t" \
"fadd z4.s, p5/m, z4.s, z22.s \n\t" \
"fsub z10.s, p5/m, z10.s, z22.s \n\t" \
"fadd z5.s, p5/m, z5.s, z23.s \n\t" \
"fsub z11.s, p5/m, z11.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// ZERO_PSI
#define ZERO_PSI_A64FXf \
asm ( \
"ptrue p5.s \n\t" \
"fmov z0.s , 0 \n\t" \
"fmov z1.s , 0 \n\t" \
"fmov z2.s , 0 \n\t" \
"fmov z3.s , 0 \n\t" \
"fmov z4.s , 0 \n\t" \
"fmov z5.s , 0 \n\t" \
"fmov z6.s , 0 \n\t" \
"fmov z7.s , 0 \n\t" \
"fmov z8.s , 0 \n\t" \
"fmov z9.s , 0 \n\t" \
"fmov z10.s , 0 \n\t" \
"fmov z11.s , 0 \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base) \
{ \
asm ( \
"prfd PSTL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PSTL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PSTL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
: \
: [fetchptr] "r" (base) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base) \
{ \
asm ( \
"prfd PSTL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
"prfd PSTL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
"prfd PSTL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
: \
: [fetchptr] "r" (base) \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
); \
}
// ADD_RESULT_INTERNAL
#define ADD_RESULT_INTERNAL_A64FXf \
asm ( \
"fadd z0.s, p5/m, z0.s, z12.s \n\t" \
"fadd z1.s, p5/m, z1.s, z13.s \n\t" \
"fadd z2.s, p5/m, z2.s, z14.s \n\t" \
"fadd z3.s, p5/m, z3.s, z15.s \n\t" \
"fadd z4.s, p5/m, z4.s, z16.s \n\t" \
"fadd z5.s, p5/m, z5.s, z17.s \n\t" \
"fadd z6.s, p5/m, z6.s, z18.s \n\t" \
"fadd z7.s, p5/m, z7.s, z19.s \n\t" \
"fadd z8.s, p5/m, z8.s, z20.s \n\t" \
"fadd z9.s, p5/m, z9.s, z21.s \n\t" \
"fadd z10.s, p5/m, z10.s, z22.s \n\t" \
"fadd z11.s, p5/m, z11.s, z23.s \n\t" \
: \
: \
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
);

View File

@ -38,11 +38,10 @@ Author: Nils Meyer <nils.meyer@ur.de>
#define LOCK_GAUGE(A)
#define UNLOCK_GAUGE(A)
#define MASK_REGS DECLARATIONS_A64FXd
#define SAVE_RESULT(A,B) RESULT_A64FXd(A);
#define SAVE_RESULT(A,B) RESULT_A64FXd(A); PREFETCH_RESULT_L2_STORE(B)
#define MULT_2SPIN_1(Dir) MULT_2SPIN_1_A64FXd(Dir)
#define MULT_2SPIN_2 MULT_2SPIN_2_A64FXd
#define LOAD_CHI(base) LOAD_CHI_A64FXd(base)
#define ZERO_PSI ZERO_PSI_A64FXd
#define ADD_RESULT(base,basep) LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)
#define XP_PROJ XP_PROJ_A64FXd
#define YP_PROJ YP_PROJ_A64FXd
@ -71,7 +70,6 @@ Author: Nils Meyer <nils.meyer@ur.de>
#define MAYBEPERM(Dir,perm) if (Dir != 3) { if (perm) { PERMUTE; } }
// DECLARATIONS
#define DECLARATIONS_A64FXd \
uint64_t baseU; \
const uint64_t lut[4][8] = { \
{4, 5, 6, 7, 0, 1, 2, 3}, \
{2, 3, 0, 1, 6, 7, 4, 5}, \
@ -128,114 +126,114 @@ Author: Nils Meyer <nils.meyer@ur.de>
// RESULT
#define RESULT_A64FXd(base) \
{ \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-6), result_00); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-5), result_01); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-4), result_02); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-3), result_10); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-2), result_11); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-1), result_12); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(0), result_20); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(1), result_21); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(2), result_22); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(3), result_30); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(4), result_31); \
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(5), result_32); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64), result_00); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64), result_01); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64), result_02); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64), result_10); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64), result_11); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64), result_12); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64), result_20); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64), result_21); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64), result_22); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64), result_30); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64), result_31); \
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64), result_32); \
}
// PREFETCH_CHIMU_L2 (prefetch to L2)
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base) \
{ \
svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(base + 0), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(base + 256), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(base + 512), SV_PLDL2STRM); \
}
// PREFETCH_CHIMU_L1 (prefetch to L1)
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXd(base) \
{ \
svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL1STRM); \
svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL1STRM); \
svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL1STRM); \
svprfd(pg1, (int64_t*)(base + 0), SV_PLDL1STRM); \
svprfd(pg1, (int64_t*)(base + 256), SV_PLDL1STRM); \
svprfd(pg1, (int64_t*)(base + 512), SV_PLDL1STRM); \
}
// PREFETCH_GAUGE_L2 (prefetch to L2)
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A) \
{ \
const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(-4), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(12), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(16), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(20), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(24), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(28), SV_PLDL2STRM); \
const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
svprfd(pg1, (int64_t*)(baseU + -256), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 768), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 1024), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 1280), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 1536), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 1792), SV_PLDL2STRM); \
}
// PREFETCH_GAUGE_L1 (prefetch to L1)
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A) \
{ \
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL1STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL1STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL1STRM); \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL1STRM); \
svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL1STRM); \
svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL1STRM); \
}
// LOAD_CHI
#define LOAD_CHI_A64FXd(base) \
{ \
Chi_00 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(0)); \
Chi_01 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(1)); \
Chi_02 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(2)); \
Chi_10 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(3)); \
Chi_11 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(4)); \
Chi_12 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(5)); \
Chi_00 = svld1(pg1, (float64_t*)(base + 0 * 64)); \
Chi_01 = svld1(pg1, (float64_t*)(base + 1 * 64)); \
Chi_02 = svld1(pg1, (float64_t*)(base + 2 * 64)); \
Chi_10 = svld1(pg1, (float64_t*)(base + 3 * 64)); \
Chi_11 = svld1(pg1, (float64_t*)(base + 4 * 64)); \
Chi_12 = svld1(pg1, (float64_t*)(base + 5 * 64)); \
}
// LOAD_CHIMU
#define LOAD_CHIMU_INTERLEAVED_A64FXd(base) \
{ \
Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64)); \
Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64)); \
Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64)); \
Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64)); \
Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64)); \
Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64)); \
Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64)); \
Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64)); \
Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64)); \
Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64)); \
Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64)); \
Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64)); \
}
// LOAD_CHIMU_0213
#define LOAD_CHIMU_0213_A64FXd \
{ \
const SiteSpinor & ref(in[offset]); \
Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64)); \
Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64)); \
Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64)); \
Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64)); \
Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64)); \
Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64)); \
Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64)); \
Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64)); \
Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64)); \
Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64)); \
Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64)); \
Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64)); \
}
// LOAD_CHIMU_0312
#define LOAD_CHIMU_0312_A64FXd \
{ \
const SiteSpinor & ref(in[offset]); \
Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64)); \
Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64)); \
Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64)); \
Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64)); \
Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64)); \
Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64)); \
Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64)); \
Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64)); \
Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64)); \
Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64)); \
Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64)); \
Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64)); \
}
// LOAD_TABLE0
#define LOAD_TABLE0 \
@ -263,26 +261,26 @@ Author: Nils Meyer <nils.meyer@ur.de>
Chi_12 = svtbl(Chi_12, table0);
// LOAD_GAUGE
#define LOAD_GAUGE(A) \
#define LOAD_GAUGE \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
{ \
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
U_01 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
U_11 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
U_21 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -6 * 64)); \
U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -3 * 64)); \
U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 0 * 64)); \
U_01 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -5 * 64)); \
U_11 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -2 * 64)); \
U_21 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 1 * 64)); \
}
// MULT_2SPIN
#define MULT_2SPIN_1_A64FXd(A) \
{ \
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
U_01 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
U_11 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
U_21 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -6 * 64)); \
U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -3 * 64)); \
U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 0 * 64)); \
U_01 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -5 * 64)); \
U_11 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -2 * 64)); \
U_21 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 1 * 64)); \
UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \
UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \
UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \
@ -295,9 +293,9 @@ Author: Nils Meyer <nils.meyer@ur.de>
UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \
UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \
UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \
U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-4)); \
U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-1)); \
U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(2)); \
U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -4 * 64)); \
U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -1 * 64)); \
U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 2 * 64)); \
}
// MULT_2SPIN_BACKEND
#define MULT_2SPIN_2_A64FXd \
@ -572,12 +570,12 @@ Author: Nils Meyer <nils.meyer@ur.de>
result_31 = svdup_f64(0.); \
result_32 = svdup_f64(0.);
// PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing)
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base) \
{ \
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 0) : "memory" ); \
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 1) : "memory" ); \
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 2) : "memory" ); \
svprfd(pg1, (int64_t*)(base + 0), SV_PSTL2STRM); \
svprfd(pg1, (int64_t*)(base + 256), SV_PSTL2STRM); \
svprfd(pg1, (int64_t*)(base + 512), SV_PSTL2STRM); \
}
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base) \

View File

@ -38,11 +38,10 @@ Author: Nils Meyer <nils.meyer@ur.de>
#define LOCK_GAUGE(A)
#define UNLOCK_GAUGE(A)
#define MASK_REGS DECLARATIONS_A64FXf
#define SAVE_RESULT(A,B) RESULT_A64FXf(A);
#define SAVE_RESULT(A,B) RESULT_A64FXf(A); PREFETCH_RESULT_L2_STORE(B)
#define MULT_2SPIN_1(Dir) MULT_2SPIN_1_A64FXf(Dir)
#define MULT_2SPIN_2 MULT_2SPIN_2_A64FXf
#define LOAD_CHI(base) LOAD_CHI_A64FXf(base)
#define ZERO_PSI ZERO_PSI_A64FXf
#define ADD_RESULT(base,basep) LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)
#define XP_PROJ XP_PROJ_A64FXf
#define YP_PROJ YP_PROJ_A64FXf
@ -71,7 +70,6 @@ Author: Nils Meyer <nils.meyer@ur.de>
#define MAYBEPERM(A,perm) if (perm) { PERMUTE; }
// DECLARATIONS
#define DECLARATIONS_A64FXf \
uint64_t baseU; \
const uint32_t lut[4][16] = { \
{8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \
{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \
@ -128,114 +126,114 @@ Author: Nils Meyer <nils.meyer@ur.de>
// RESULT
#define RESULT_A64FXf(base) \
{ \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-6), result_00); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-5), result_01); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-4), result_02); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-3), result_10); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-2), result_11); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-1), result_12); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(0), result_20); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(1), result_21); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(2), result_22); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(3), result_30); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(4), result_31); \
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(5), result_32); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64), result_00); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64), result_01); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64), result_02); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64), result_10); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64), result_11); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64), result_12); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64), result_20); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64), result_21); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64), result_22); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64), result_30); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64), result_31); \
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64), result_32); \
}
// PREFETCH_CHIMU_L2 (prefetch to L2)
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base) \
{ \
svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(base + 0), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(base + 256), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(base + 512), SV_PLDL2STRM); \
}
// PREFETCH_CHIMU_L1 (prefetch to L1)
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXf(base) \
{ \
svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL1STRM); \
svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL1STRM); \
svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL1STRM); \
svprfd(pg1, (int64_t*)(base + 0), SV_PLDL1STRM); \
svprfd(pg1, (int64_t*)(base + 256), SV_PLDL1STRM); \
svprfd(pg1, (int64_t*)(base + 512), SV_PLDL1STRM); \
}
// PREFETCH_GAUGE_L2 (prefetch to L2)
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A) \
{ \
const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(-4), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(12), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(16), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(20), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(24), SV_PLDL2STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(28), SV_PLDL2STRM); \
const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
svprfd(pg1, (int64_t*)(baseU + -256), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 768), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 1024), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 1280), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 1536), SV_PLDL2STRM); \
svprfd(pg1, (int64_t*)(baseU + 1792), SV_PLDL2STRM); \
}
// PREFETCH_GAUGE_L1 (prefetch to L1)
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A) \
{ \
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL1STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL1STRM); \
svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL1STRM); \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL1STRM); \
svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL1STRM); \
svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL1STRM); \
}
// LOAD_CHI
#define LOAD_CHI_A64FXf(base) \
{ \
Chi_00 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(0)); \
Chi_01 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(1)); \
Chi_02 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(2)); \
Chi_10 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(3)); \
Chi_11 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(4)); \
Chi_12 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(5)); \
Chi_00 = svld1(pg1, (float32_t*)(base + 0 * 64)); \
Chi_01 = svld1(pg1, (float32_t*)(base + 1 * 64)); \
Chi_02 = svld1(pg1, (float32_t*)(base + 2 * 64)); \
Chi_10 = svld1(pg1, (float32_t*)(base + 3 * 64)); \
Chi_11 = svld1(pg1, (float32_t*)(base + 4 * 64)); \
Chi_12 = svld1(pg1, (float32_t*)(base + 5 * 64)); \
}
// LOAD_CHIMU
#define LOAD_CHIMU_INTERLEAVED_A64FXf(base) \
{ \
Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64)); \
Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64)); \
Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64)); \
Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64)); \
Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64)); \
Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64)); \
Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64)); \
Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64)); \
Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64)); \
Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64)); \
Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64)); \
Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64)); \
}
// LOAD_CHIMU_0213
#define LOAD_CHIMU_0213_A64FXf \
{ \
const SiteSpinor & ref(in[offset]); \
Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64)); \
Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64)); \
Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64)); \
Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64)); \
Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64)); \
Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64)); \
Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64)); \
Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64)); \
Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64)); \
Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64)); \
Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64)); \
Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64)); \
}
// LOAD_CHIMU_0312
#define LOAD_CHIMU_0312_A64FXf \
{ \
const SiteSpinor & ref(in[offset]); \
Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64)); \
Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64)); \
Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64)); \
Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64)); \
Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64)); \
Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64)); \
Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64)); \
Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64)); \
Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64)); \
Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64)); \
Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64)); \
Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64)); \
}
// LOAD_TABLE0
#define LOAD_TABLE0 \
@ -263,26 +261,26 @@ Author: Nils Meyer <nils.meyer@ur.de>
Chi_12 = svtbl(Chi_12, table0);
// LOAD_GAUGE
#define LOAD_GAUGE(A) \
#define LOAD_GAUGE \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
{ \
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
U_01 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
U_11 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
U_21 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -6 * 64)); \
U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -3 * 64)); \
U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 0 * 64)); \
U_01 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -5 * 64)); \
U_11 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -2 * 64)); \
U_21 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 1 * 64)); \
}
// MULT_2SPIN
#define MULT_2SPIN_1_A64FXf(A) \
{ \
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
U_01 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
U_11 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
U_21 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -6 * 64)); \
U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -3 * 64)); \
U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 0 * 64)); \
U_01 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -5 * 64)); \
U_11 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -2 * 64)); \
U_21 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 1 * 64)); \
UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \
UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \
UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \
@ -295,9 +293,9 @@ Author: Nils Meyer <nils.meyer@ur.de>
UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \
UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \
UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \
U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-4)); \
U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-1)); \
U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(2)); \
U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -4 * 64)); \
U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -1 * 64)); \
U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 2 * 64)); \
}
// MULT_2SPIN_BACKEND
#define MULT_2SPIN_2_A64FXf \
@ -572,12 +570,12 @@ Author: Nils Meyer <nils.meyer@ur.de>
result_31 = svdup_f32(0.); \
result_32 = svdup_f32(0.);
// PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing)
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base) \
{ \
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 0) : "memory" ); \
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 1) : "memory" ); \
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 2) : "memory" ); \
svprfd(pg1, (int64_t*)(base + 0), SV_PSTL2STRM); \
svprfd(pg1, (int64_t*)(base + 256), SV_PSTL2STRM); \
svprfd(pg1, (int64_t*)(base + 512), SV_PSTL2STRM); \
}
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base) \

View File

@ -46,7 +46,6 @@ Author: Nils Meyer <nils.meyer@ur.de>
#undef MULT_2SPIN_2
#undef MAYBEPERM
#undef LOAD_CHI
#undef ZERO_PSI
#undef XP_PROJ
#undef YP_PROJ
#undef ZP_PROJ

View File

@ -38,20 +38,12 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#ifdef GRID_HIP
#include <hip/hip_fp16.h>
#endif
#ifdef GRID_SYCL
namespace Grid {
typedef struct { uint16_t x;} half;
typedef struct { half x; half y;} half2;
typedef struct { float x; float y;} float2;
typedef struct { double x; double y;} double2;
}
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
typedef struct { uint16_t x;} half;
#endif
namespace Grid {
typedef struct Half2_t { half x; half y; } Half2;
#define COALESCE_GRANULARITY ( GEN_SIMD_WIDTH )
@ -164,7 +156,7 @@ accelerator_inline float half2float(half h)
f = __half2float(h);
#else
Grid_half hh;
hh.x = h.x;
hh.x = hr.x;
f= sfw_half_to_float(hh);
#endif
return f;

View File

@ -125,6 +125,14 @@ accelerator_inline Grid_simd<S, V> sqrt(const Grid_simd<S, V> &r) {
return SimdApply(SqrtRealFunctor<S>(), r);
}
template <class S, class V>
accelerator_inline Grid_simd<S, V> rsqrt(const Grid_simd<S, V> &r) {
return SimdApply(RSqrtRealFunctor<S>(), r);
}
template <class Scalar>
accelerator_inline Scalar rsqrt(const Scalar &r) {
return (RSqrtRealFunctor<Scalar>(), r);
}
template <class S, class V>
accelerator_inline Grid_simd<S, V> cos(const Grid_simd<S, V> &r) {
return SimdApply(CosRealFunctor<S>(), r);
}

2377
Grid/simd/gridverter.py Executable file

File diff suppressed because it is too large Load Diff

View File

@ -269,7 +269,7 @@ public:
std::vector<Vector<std::pair<int,int> > > face_table ;
Vector<int> surface_list;
stencilVector<StencilEntry> _entries; // Resident in managed memory
Vector<StencilEntry> _entries; // Resident in managed memory
std::vector<Packet> Packets;
std::vector<Merge> Mergers;
std::vector<Merge> MergersSHM;

View File

@ -95,18 +95,14 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
vtype nrm;
vtype inner;
for(int c1=0;c1<N;c1++){
// Normalises row c1
zeroit(inner);
for(int c2=0;c2<N;c2++)
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
nrm = sqrt(inner);
nrm = 1.0/nrm;
nrm = rsqrt(inner);
for(int c2=0;c2<N;c2++)
ret._internal[c1][c2]*= nrm;
// Remove c1 from rows c1+1...N-1
for (int b=c1+1; b<N; ++b){
decltype(ret._internal[b][b]*ret._internal[b][b]) pr;
zeroit(pr);
@ -117,19 +113,7 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
ret._internal[b][c] -= pr * ret._internal[c1][c];
}
}
}
// Normalise last row
{
int c1 = N-1;
zeroit(inner);
for(int c2=0;c2<N;c2++)
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
nrm = sqrt(inner);
nrm = 1.0/nrm;
for(int c2=0;c2<N;c2++)
ret._internal[c1][c2]*= nrm;
}
// assuming the determinant is ok
return ret;

View File

@ -84,6 +84,7 @@ NAMESPACE_BEGIN(Grid);
}
UNARY(sqrt);
UNARY(rsqrt);
UNARY(sin);
UNARY(cos);
UNARY(asin);

View File

@ -21,26 +21,22 @@ void acceleratorInit(void)
#define ENV_RANK_SLURM "SLURM_PROCID"
#define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK"
#define ENV_RANK_MVAPICH "MV2_COMM_WORLD_RANK"
// We extract the local rank initialization using an environment variable
if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL) {
printf("OPENMPI detected\n");
rank = atoi(localRankStr);
} else if ((localRankStr = getenv(ENV_LOCAL_RANK_MVAPICH)) != NULL) {
printf("MVAPICH detected\n");
rank = atoi(localRankStr);
} else if ((localRankStr = getenv(ENV_LOCAL_RANK_SLURM)) != NULL) {
printf("SLURM detected\n");
rank = atoi(localRankStr);
} else {
printf("MPI version is unknown - bad things may happen\n");
}
if ((localRankStr = getenv(ENV_RANK_OMPI )) != NULL) { world_rank = atoi(localRankStr);}
if ((localRankStr = getenv(ENV_RANK_MVAPICH)) != NULL) { world_rank = atoi(localRankStr);}
if ((localRankStr = getenv(ENV_RANK_SLURM )) != NULL) { world_rank = atoi(localRankStr);}
// We extract the local rank initialization using an environment variable
if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL) {
if (!world_rank)
printf("OPENMPI detected\n");
rank = atoi(localRankStr);
} else if ((localRankStr = getenv(ENV_LOCAL_RANK_MVAPICH)) != NULL) {
if (!world_rank)
printf("MVAPICH detected\n");
rank = atoi(localRankStr);
} else if ((localRankStr = getenv(ENV_LOCAL_RANK_SLURM)) != NULL) {
if (!world_rank)
printf("SLURM detected\n");
rank = atoi(localRankStr);
} else {
if (!world_rank)
printf("MPI version is unknown - bad things may happen\n");
}
size_t totalDeviceMem=0;
for (int i = 0; i < nDevices; i++) {

View File

@ -333,7 +333,7 @@ inline void *acceleratorAllocDevice(size_t bytes)
return ptr;
};
inline void acceleratorFreeShared(void *ptr){ hipFree(ptr);};
inline void acceleratorFreeShared(void *ptr){ free(ptr);};
inline void acceleratorFreeDevice(void *ptr){ hipFree(ptr);};
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
@ -361,7 +361,7 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas
//////////////////////////////////////////////
// CPU Target - No accelerator just thread instead
//////////////////////////////////////////////
#define GRID_ALLOC_ALIGN (2*1024*1024) // 2MB aligned
#if ( (!defined(GRID_SYCL)) && (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) )
#undef GRID_SIMT

View File

@ -1,3 +1,4 @@
#include "Benchmark_IO.hpp"
#ifndef BENCH_IO_LMIN
@ -12,7 +13,6 @@
#define BENCH_IO_NPASS 10
#endif
#ifdef HAVE_LIME
using namespace Grid;
std::string filestem(const int l)
@ -196,6 +196,3 @@ int main (int argc, char ** argv)
return EXIT_SUCCESS;
}
#else
int main(int argc,char ** argv){}
#endif

View File

@ -2,12 +2,12 @@
#define Benchmark_IO_hpp_
#include <Grid/Grid.h>
#ifdef HAVE_LIME
#define MSG std::cout << GridLogMessage
#define SEP \
"-----------------------------------------------------------------------------"
#define BIGSEP \
"============================================================================="
#ifdef HAVE_LIME
namespace Grid {

View File

@ -1,5 +1,5 @@
#include "Benchmark_IO.hpp"
#ifdef HAVE_LIME
using namespace Grid;
int main (int argc, char ** argv)
@ -97,6 +97,3 @@ int main (int argc, char ** argv)
return EXIT_SUCCESS;
}
#else
int main(int argc,char ** argv){}
#endif

View File

@ -62,7 +62,7 @@ struct time_statistics{
void comms_header(){
std::cout <<GridLogMessage << " L "<<"\t"<<" Ls "<<"\t"
<<"bytes\t MB/s uni (err/min/max) \t\t MB/s bidi (err/min/max)"<<std::endl;
<<std::setw(11)<<"bytes"<<"MB/s uni (err/min/max)"<<"\t\t"<<"MB/s bidi (err/min/max)"<<std::endl;
};
Gamma::Algebra Gmu [] = {
@ -189,11 +189,11 @@ public:
// double rbytes = dbytes*0.5;
double bidibytes = dbytes;
std::cout<<GridLogMessage << lat<<"\t"<<Ls<<"\t "
<< bytes << " \t "
<<xbytes/timestat.mean<<" \t "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " \t "
std::cout<<GridLogMessage << std::setw(4) << lat<<"\t"<<Ls<<"\t"
<<std::setw(11) << bytes<< std::fixed << std::setprecision(1) << std::setw(7)
<<std::right<< xbytes/timestat.mean<<" "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " "
<<xbytes/timestat.max <<" "<< xbytes/timestat.min
<< "\t\t"<< bidibytes/timestat.mean<< " " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " "
<< "\t\t"<<std::setw(7)<< bidibytes/timestat.mean<< " " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " "
<< bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl;
}
@ -445,7 +445,7 @@ public:
// 1344= 3*(2*8+6)*2*8 + 8*3*2*2 + 3*4*2*8
// 1344 = Nc* (6+(Nc-1)*8)*2*Nd + Nd*Nc*2*2 + Nd*Nc*Ns*2
// double flops=(1344.0*volume)/2;
#if 1
#if 0
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + Nd*Nc*Ns + Nd*Nc*Ns*2;
#else
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + 2*Nd*Nc*Ns + 2*Nd*Nc*Ns*2;
@ -512,6 +512,7 @@ public:
NN_global=NN;
uint64_t SHM=NP/NN;
Coordinate latt4({local[0]*mpi[0],local[1]*mpi[1],local[2]*mpi[2],local[3]*mpi[3]});
///////// Welcome message ////////////
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
@ -700,7 +701,7 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
std::cout<<GridLogMessage << " Summary table Ls="<<Ls <<std::endl;
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
std::cout<<GridLogMessage << "L \t\t Wilson \t\t DWF4 \t\t Staggered" <<std::endl;
std::cout<<GridLogMessage << "L \t\t Wilson \t\t DWF4 \t\tt Staggered" <<std::endl;
for(int l=0;l<L_list.size();l++){
std::cout<<GridLogMessage << L_list[l] <<" \t\t "<< wilson[l]<<" \t\t "<<dwf4[l] << " \t\t "<< staggered[l]<<std::endl;
}
@ -731,9 +732,9 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
std::cout<<GridLogMessage << " Per Node Summary table Ls="<<Ls <<std::endl;
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
std::cout<<GridLogMessage << " L \t\t Wilson\t\t DWF4\t\t Staggered " <<std::endl;
std::cout<<GridLogMessage << " L \t\t Wilson\t\t DWF4 " <<std::endl;
for(int l=0;l<L_list.size();l++){
std::cout<<GridLogMessage << L_list[l] <<" \t\t "<< wilson[l]/NN<<" \t "<<dwf4[l]/NN<< " \t "<<staggered[l]/NN<<std::endl;
std::cout<<GridLogMessage << L_list[l] <<" \t\t "<< wilson[l]/NN<<" \t "<<dwf4[l]/NN<<std::endl;
}
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;

View File

@ -24,7 +24,7 @@ typedef typename GparityDomainWallFermionD::FermionField GparityLatticeFermionD;
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
#ifdef ENABLE_GPARITY
int Ls=16;
for(int i=0;i<argc;i++)
if(std::string(argv[i]) == "-Ls"){
@ -184,7 +184,7 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
DwD.Report();
}
#endif
Grid_finalize();
}

View File

@ -123,24 +123,6 @@ case ${ac_LAPACK} in
AC_DEFINE([USE_LAPACK],[1],[use LAPACK]);;
esac
############### fermions
AC_ARG_ENABLE([fermion-reps],
[AC_HELP_STRING([--fermion-reps=yes|no], [enable extra fermion representation support])],
[ac_FERMION_REPS=${enable_fermion_reps}], [ac_FERMION_REPS=yes])
AM_CONDITIONAL(BUILD_FERMION_REPS, [ test "${ac_FERMION_REPS}X" == "yesX" ])
AC_ARG_ENABLE([gparity],
[AC_HELP_STRING([--enable-gparity=yes|no], [enable G-parity support])],
[ac_GPARITY=${enable_gparity}], [ac_GPARITY=yes])
AM_CONDITIONAL(BUILD_GPARITY, [ test "${ac_GPARITY}X" == "yesX" ])
case ${ac_FERMION_REPS} in
yes) AC_DEFINE([ENABLE_FERMION_REPS],[1],[non QCD fermion reps]);;
esac
case ${ac_GPARITY} in
yes) AC_DEFINE([ENABLE_GPARITY],[1],[fermion actions with GPARITY BCs]);;
esac
############### Nc
AC_ARG_ENABLE([Nc],
[AC_HELP_STRING([--enable-Nc=2|3|4], [enable number of colours])],
@ -509,28 +491,6 @@ AM_CFLAGS="$SIMD_FLAGS $AM_CFLAGS"
###### PRECISION ALWAYS DOUBLE
AC_DEFINE([GRID_DEFAULT_PRECISION_DOUBLE],[1],[GRID_DEFAULT_PRECISION is DOUBLE] )
#########################################################
###################### GRID ALLOCATOR ALIGNMENT ##
#########################################################
AC_ARG_ENABLE([alloc-align],[AC_HELP_STRING([--enable-alloc-align=2MB|4k],
[Alignment in bytes of GRID Allocator ])],[ac_ALLOC_ALIGN=${enable_alloc_align}],[ac_ALLOC_ALIGN=2MB])
case ${ac_ALLOC_ALIGN} in
4k)
AC_DEFINE([GRID_ALLOC_ALIGN],[(4096)],[GRID_ALLOC_ALIGN]);;
2MB)
AC_DEFINE([GRID_ALLOC_ALIGN],[(2*1024*1024)],[GRID_ALLOC_ALIGN]);;
*);;
esac
AC_ARG_ENABLE([alloc-cache],[AC_HELP_STRING([--enable-alloc-cache ],
[Cache a pool of recent "frees" to reuse])],[ac_ALLOC_CACHE=${enable_alloc_cache}],[ac_ALLOC_CACHE=yes])
case ${ac_ALLOC_CACHE} in
yes)
AC_DEFINE([ALLOCATION_CACHE],[1],[ALLOCATION_CACHE]);;
*);;
esac
#########################################################
###################### set GPU device to rank in node ##
#########################################################
@ -538,7 +498,7 @@ AC_ARG_ENABLE([setdevice],[AC_HELP_STRING([--enable-setdevice | --disable-setdev
[Set GPU to rank in node with cudaSetDevice or similar])],[ac_SETDEVICE=${enable_SETDEVICE}],[ac_SETDEVICE=no])
case ${ac_SETDEVICE} in
yes);;
no)
*)
AC_DEFINE([GRID_DEFAULT_GPU],[1],[GRID_DEFAULT_GPU] )
;;
esac
@ -568,7 +528,7 @@ case ${ac_SHM} in
shmnone | no)
AC_DEFINE([GRID_MPI3_SHM_NONE],[1],[GRID_MPI3_SHM_NONE] )
;;
AC_DEFINE([GRID_SHM_DISABLE],[1],[USE MPI for intranode comms]);;
nvlink)
AC_DEFINE([GRID_MPI3_SHM_NVLINK],[1],[GRID_MPI3_SHM_NVLINK] )
@ -590,15 +550,6 @@ AC_ARG_ENABLE([shmpath],[AC_HELP_STRING([--enable-shmpath=path],
[ac_SHMPATH=/var/lib/hugetlbfs/global/pagesize-2MB/])
AC_DEFINE_UNQUOTED([GRID_SHM_PATH],["$ac_SHMPATH"],[Path to a hugetlbfs filesystem for MMAPing])
############### force MPI in SMP
AC_ARG_ENABLE([shm-force-mpi],[AC_HELP_STRING([--enable-shm-force-mpi],
[Force MPI within shared memory])],[ac_SHM_FORCE_MPI=${enable_shm_force_mpi}],[ac_SHM_FORCE_MPI=no])
case ${ac_SHM_FORCE_MPI} in
yes)
AC_DEFINE([GRID_SHM_FORCE_MPI],[1],[GRID_SHM_FORCE_MPI] )
;;
*) ;;
esac
############### communication type selection
AC_ARG_ENABLE([comms-threads],[AC_HELP_STRING([--enable-comms-threads | --disable-comms-threads],

View File

@ -6,27 +6,13 @@ home=`pwd`
cd $home/Grid
HFILES=`find . -type f -name '*.h' -not -name '*Hdf5*' -not -path '*/gamma-gen/*' -not -path '*/Old/*' -not -path '*/Eigen/*'`
HFILES="$HFILES"
CCFILES=`find . -name '*.cc' -not -path '*/instantiation/*/*' -not -path '*/gamma-gen/*' -not -name '*Communicator*.cc' -not -name '*SharedMemory*.cc' -not -name '*Hdf5*'`
ZWILS_FERMION_FILES=` find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/ZWilsonImpl*' `
WILS_FERMION_FILES=` find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/WilsonImpl*' `
STAG_FERMION_FILES=` find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/Staggered*' `
GP_FERMION_FILES=` find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/Gparity*' `
ADJ_FERMION_FILES=` find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/WilsonAdj*' `
TWOIND_FERMION_FILES=`find . -name '*.cc' -path '*/instantiation/*' -path '*/instantiation/WilsonTwoIndex*'`
CCFILES=`find . -name '*.cc' -not -path '*/gamma-gen/*' -not -name '*Communicator*.cc' -not -name '*SharedMemory*.cc' -not -name '*Hdf5*'`
HPPFILES=`find . -type f -name '*.hpp'`
echo HFILES=$HFILES $HPPFILES > Make.inc
echo >> Make.inc
echo CCFILES=$CCFILES >> Make.inc
echo ZWILS_FERMION_FILES=$ZWILS_FERMION_FILES >> Make.inc
echo WILS_FERMION_FILES=$WILS_FERMION_FILES >> Make.inc
echo STAG_FERMION_FILES=$STAG_FERMION_FILES >> Make.inc
echo GP_FERMION_FILES=$GP_FERMION_FILES >> Make.inc
echo ADJ_FERMION_FILES=$ADJ_FERMION_FILES >> Make.inc
echo TWOIND_FERMION_FILES=$TWOIND_FERMION_FILES >> Make.inc
# tests Make.inc
cd $home/tests
@ -40,10 +26,11 @@ for subdir in $dirs; do
echo "tests-local: ${TESTLIST} " > Make.inc
echo ${PREF}_PROGRAMS = ${TESTLIST} >> Make.inc
echo >> Make.inc
HADLINK=`[ $subdir = './hadrons' ] && echo '-lHadrons '`
for f in $TESTS; do
BNAME=`basename $f .cc`
echo ${BNAME}_SOURCES=$f >> Make.inc
echo ${BNAME}_LDADD='$(top_builddir)/Grid/libGrid.a' >> Make.inc
echo ${BNAME}_LDADD=${HADLINK}-lGrid >> Make.inc
echo >> Make.inc
done
if [ $subdir != '.' ]; then
@ -62,7 +49,7 @@ echo >> Make.inc
for f in $TESTS; do
BNAME=`basename $f .cc`
echo ${BNAME}_SOURCES=$f >> Make.inc
echo ${BNAME}_LDADD='$(top_builddir)/Grid/libGrid.a' >> Make.inc
echo ${BNAME}_LDADD=-lGrid>> Make.inc
echo >> Make.inc
done
cd ..
@ -78,7 +65,7 @@ echo >> Make.inc
for f in $TESTS; do
BNAME=`basename $f .cc`
echo ${BNAME}_SOURCES=$f >> Make.inc
echo ${BNAME}_LDADD='$(top_builddir)/Grid/libGrid.a'>> Make.inc
echo ${BNAME}_LDADD=-lGrid>> Make.inc
echo >> Make.inc
done
cd ..

View File

@ -69,11 +69,11 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "* Generators for SU(Nc" << std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
SU3::printGenerators();
std::cout << "Dimension of adjoint representation: "<< SU3Adjoint::Dimension << std::endl;
SU3Adjoint::printGenerators();
SU3::testGenerators();
SU3Adjoint::testGenerators();
SU<Nc>::printGenerators();
std::cout << "Dimension of adjoint representation: "<< SU<Nc>Adjoint::Dimension << std::endl;
SU<Nc>Adjoint::printGenerators();
SU<Nc>::testGenerators();
SU<Nc>Adjoint::testGenerators();
std::cout<<GridLogMessage<<"*********************************************"<<std::endl;
std::cout<<GridLogMessage<<"* Generators for SU(4)"<<std::endl;
@ -87,22 +87,22 @@ int main(int argc, char** argv) {
// Projectors
GridParallelRNG gridRNG(grid);
gridRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
SU3Adjoint::LatticeAdjMatrix Gauss(grid);
SU3::LatticeAlgebraVector ha(grid);
SU3::LatticeAlgebraVector hb(grid);
SU<Nc>Adjoint::LatticeAdjMatrix Gauss(grid);
SU<Nc>::LatticeAlgebraVector ha(grid);
SU<Nc>::LatticeAlgebraVector hb(grid);
random(gridRNG,Gauss);
std::cout << GridLogMessage << "Start projectOnAlgebra" << std::endl;
SU3Adjoint::projectOnAlgebra(ha, Gauss);
SU<Nc>Adjoint::projectOnAlgebra(ha, Gauss);
std::cout << GridLogMessage << "end projectOnAlgebra" << std::endl;
std::cout << GridLogMessage << "Start projector" << std::endl;
SU3Adjoint::projector(hb, Gauss);
SU<Nc>Adjoint::projector(hb, Gauss);
std::cout << GridLogMessage << "end projector" << std::endl;
std::cout << GridLogMessage << "ReStart projector" << std::endl;
SU3Adjoint::projector(hb, Gauss);
SU<Nc>Adjoint::projector(hb, Gauss);
std::cout << GridLogMessage << "end projector" << std::endl;
SU3::LatticeAlgebraVector diff = ha -hb;
SU<Nc>::LatticeAlgebraVector diff = ha -hb;
std::cout << GridLogMessage << "Difference: " << norm2(diff) << std::endl;
@ -114,8 +114,8 @@ int main(int argc, char** argv) {
LatticeGaugeField U(grid), V(grid);
SU3::HotConfiguration<LatticeGaugeField>(gridRNG, U);
SU3::HotConfiguration<LatticeGaugeField>(gridRNG, V);
SU<Nc>::HotConfiguration<LatticeGaugeField>(gridRNG, U);
SU<Nc>::HotConfiguration<LatticeGaugeField>(gridRNG, V);
// Adjoint representation
// Test group structure
@ -123,8 +123,8 @@ int main(int argc, char** argv) {
LatticeGaugeField UV(grid);
UV = Zero();
for (int mu = 0; mu < Nd; mu++) {
SU3::LatticeMatrix Umu = peekLorentz(U,mu);
SU3::LatticeMatrix Vmu = peekLorentz(V,mu);
SU<Nc>::LatticeMatrix Umu = peekLorentz(U,mu);
SU<Nc>::LatticeMatrix Vmu = peekLorentz(V,mu);
pokeLorentz(UV,Umu*Vmu, mu);
}
@ -151,16 +151,16 @@ int main(int argc, char** argv) {
// Check correspondence of algebra and group transformations
// Create a random vector
SU3::LatticeAlgebraVector h_adj(grid);
SU<Nc>::LatticeAlgebraVector h_adj(grid);
typename AdjointRep<Nc>::LatticeMatrix Ar(grid);
random(gridRNG,h_adj);
h_adj = real(h_adj);
SU_Adjoint<Nc>::AdjointLieAlgebraMatrix(h_adj,Ar);
// Re-extract h_adj
SU3::LatticeAlgebraVector h_adj2(grid);
SU<Nc>::LatticeAlgebraVector h_adj2(grid);
SU_Adjoint<Nc>::projectOnAlgebra(h_adj2, Ar);
SU3::LatticeAlgebraVector h_diff = h_adj - h_adj2;
SU<Nc>::LatticeAlgebraVector h_diff = h_adj - h_adj2;
std::cout << GridLogMessage << "Projections structure check vector difference (Adjoint representation) : " << norm2(h_diff) << std::endl;
// Exponentiate
@ -183,14 +183,14 @@ int main(int argc, char** argv) {
// Construct the fundamental matrix in the group
SU3::LatticeMatrix Af(grid);
SU3::FundamentalLieAlgebraMatrix(h_adj,Af);
SU3::LatticeMatrix Ufund(grid);
SU<Nc>::LatticeMatrix Af(grid);
SU<Nc>::FundamentalLieAlgebraMatrix(h_adj,Af);
SU<Nc>::LatticeMatrix Ufund(grid);
Ufund = expMat(Af, 1.0, 16);
// Check unitarity
SU3::LatticeMatrix uno_f(grid);
SU<Nc>::LatticeMatrix uno_f(grid);
uno_f = 1.0;
SU3::LatticeMatrix UnitCheck(grid);
SU<Nc>::LatticeMatrix UnitCheck(grid);
UnitCheck = Ufund * adj(Ufund) - uno_f;
std::cout << GridLogMessage << "unitarity check 1: " << norm2(UnitCheck)
<< std::endl;
@ -260,20 +260,20 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "Test for the Two Index Symmetric projectors"
<< std::endl;
// Projectors
SU3TwoIndexSymm::LatticeTwoIndexMatrix Gauss2(grid);
SU<Nc>TwoIndexSymm::LatticeTwoIndexMatrix Gauss2(grid);
random(gridRNG,Gauss2);
std::cout << GridLogMessage << "Start projectOnAlgebra" << std::endl;
SU3TwoIndexSymm::projectOnAlgebra(ha, Gauss2);
SU<Nc>TwoIndexSymm::projectOnAlgebra(ha, Gauss2);
std::cout << GridLogMessage << "end projectOnAlgebra" << std::endl;
std::cout << GridLogMessage << "Start projector" << std::endl;
SU3TwoIndexSymm::projector(hb, Gauss2);
SU<Nc>TwoIndexSymm::projector(hb, Gauss2);
std::cout << GridLogMessage << "end projector" << std::endl;
std::cout << GridLogMessage << "ReStart projector" << std::endl;
SU3TwoIndexSymm::projector(hb, Gauss2);
SU<Nc>TwoIndexSymm::projector(hb, Gauss2);
std::cout << GridLogMessage << "end projector" << std::endl;
SU3::LatticeAlgebraVector diff2 = ha - hb;
SU<Nc>::LatticeAlgebraVector diff2 = ha - hb;
std::cout << GridLogMessage << "Difference: " << norm2(diff) << std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
@ -284,20 +284,20 @@ int main(int argc, char** argv) {
std::cout << GridLogMessage << "Test for the Two index anti-Symmetric projectors"
<< std::endl;
// Projectors
SU3TwoIndexAntiSymm::LatticeTwoIndexMatrix Gauss2a(grid);
SU<Nc>TwoIndexAntiSymm::LatticeTwoIndexMatrix Gauss2a(grid);
random(gridRNG,Gauss2a);
std::cout << GridLogMessage << "Start projectOnAlgebra" << std::endl;
SU3TwoIndexAntiSymm::projectOnAlgebra(ha, Gauss2a);
SU<Nc>TwoIndexAntiSymm::projectOnAlgebra(ha, Gauss2a);
std::cout << GridLogMessage << "end projectOnAlgebra" << std::endl;
std::cout << GridLogMessage << "Start projector" << std::endl;
SU3TwoIndexAntiSymm::projector(hb, Gauss2a);
SU<Nc>TwoIndexAntiSymm::projector(hb, Gauss2a);
std::cout << GridLogMessage << "end projector" << std::endl;
std::cout << GridLogMessage << "ReStart projector" << std::endl;
SU3TwoIndexAntiSymm::projector(hb, Gauss2a);
SU<Nc>TwoIndexAntiSymm::projector(hb, Gauss2a);
std::cout << GridLogMessage << "end projector" << std::endl;
SU3::LatticeAlgebraVector diff2a = ha - hb;
SU<Nc>::LatticeAlgebraVector diff2a = ha - hb;
std::cout << GridLogMessage << "Difference: " << norm2(diff2a) << std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
@ -311,14 +311,14 @@ int main(int argc, char** argv) {
// Test group structure
// (U_f * V_f)_r = U_r * V_r
LatticeGaugeField U2(grid), V2(grid);
SU3::HotConfiguration<LatticeGaugeField>(gridRNG, U2);
SU3::HotConfiguration<LatticeGaugeField>(gridRNG, V2);
SU<Nc>::HotConfiguration<LatticeGaugeField>(gridRNG, U2);
SU<Nc>::HotConfiguration<LatticeGaugeField>(gridRNG, V2);
LatticeGaugeField UV2(grid);
UV2 = Zero();
for (int mu = 0; mu < Nd; mu++) {
SU3::LatticeMatrix Umu2 = peekLorentz(U2,mu);
SU3::LatticeMatrix Vmu2 = peekLorentz(V2,mu);
SU<Nc>::LatticeMatrix Umu2 = peekLorentz(U2,mu);
SU<Nc>::LatticeMatrix Vmu2 = peekLorentz(V2,mu);
pokeLorentz(UV2,Umu2*Vmu2, mu);
}
@ -345,16 +345,16 @@ int main(int argc, char** argv) {
// Check correspondence of algebra and group transformations
// Create a random vector
SU3::LatticeAlgebraVector h_sym(grid);
SU<Nc>::LatticeAlgebraVector h_sym(grid);
typename TwoIndexRep< Nc, Symmetric>::LatticeMatrix Ar_sym(grid);
random(gridRNG,h_sym);
h_sym = real(h_sym);
SU_TwoIndex<Nc,Symmetric>::TwoIndexLieAlgebraMatrix(h_sym,Ar_sym);
// Re-extract h_sym
SU3::LatticeAlgebraVector h_sym2(grid);
SU<Nc>::LatticeAlgebraVector h_sym2(grid);
SU_TwoIndex< Nc, Symmetric>::projectOnAlgebra(h_sym2, Ar_sym);
SU3::LatticeAlgebraVector h_diff_sym = h_sym - h_sym2;
SU<Nc>::LatticeAlgebraVector h_diff_sym = h_sym - h_sym2;
std::cout << GridLogMessage << "Projections structure check vector difference (Two Index Symmetric): " << norm2(h_diff_sym) << std::endl;
@ -379,11 +379,11 @@ int main(int argc, char** argv) {
// Construct the fundamental matrix in the group
SU3::LatticeMatrix Af_sym(grid);
SU3::FundamentalLieAlgebraMatrix(h_sym,Af_sym);
SU3::LatticeMatrix Ufund2(grid);
SU<Nc>::LatticeMatrix Af_sym(grid);
SU<Nc>::FundamentalLieAlgebraMatrix(h_sym,Af_sym);
SU<Nc>::LatticeMatrix Ufund2(grid);
Ufund2 = expMat(Af_sym, 1.0, 16);
SU3::LatticeMatrix UnitCheck2(grid);
SU<Nc>::LatticeMatrix UnitCheck2(grid);
UnitCheck2 = Ufund2 * adj(Ufund2) - uno_f;
std::cout << GridLogMessage << "unitarity check 1: " << norm2(UnitCheck2)
<< std::endl;
@ -421,14 +421,14 @@ int main(int argc, char** argv) {
// Test group structure
// (U_f * V_f)_r = U_r * V_r
LatticeGaugeField U2A(grid), V2A(grid);
SU3::HotConfiguration<LatticeGaugeField>(gridRNG, U2A);
SU3::HotConfiguration<LatticeGaugeField>(gridRNG, V2A);
SU<Nc>::HotConfiguration<LatticeGaugeField>(gridRNG, U2A);
SU<Nc>::HotConfiguration<LatticeGaugeField>(gridRNG, V2A);
LatticeGaugeField UV2A(grid);
UV2A = Zero();
for (int mu = 0; mu < Nd; mu++) {
SU3::LatticeMatrix Umu2A = peekLorentz(U2,mu);
SU3::LatticeMatrix Vmu2A = peekLorentz(V2,mu);
SU<Nc>::LatticeMatrix Umu2A = peekLorentz(U2,mu);
SU<Nc>::LatticeMatrix Vmu2A = peekLorentz(V2,mu);
pokeLorentz(UV2A,Umu2A*Vmu2A, mu);
}
@ -455,16 +455,16 @@ int main(int argc, char** argv) {
// Check correspondence of algebra and group transformations
// Create a random vector
SU3::LatticeAlgebraVector h_Asym(grid);
SU<Nc>::LatticeAlgebraVector h_Asym(grid);
typename TwoIndexRep< Nc, AntiSymmetric>::LatticeMatrix Ar_Asym(grid);
random(gridRNG,h_Asym);
h_Asym = real(h_Asym);
SU_TwoIndex< Nc, AntiSymmetric>::TwoIndexLieAlgebraMatrix(h_Asym,Ar_Asym);
// Re-extract h_sym
SU3::LatticeAlgebraVector h_Asym2(grid);
SU<Nc>::LatticeAlgebraVector h_Asym2(grid);
SU_TwoIndex< Nc, AntiSymmetric>::projectOnAlgebra(h_Asym2, Ar_Asym);
SU3::LatticeAlgebraVector h_diff_Asym = h_Asym - h_Asym2;
SU<Nc>::LatticeAlgebraVector h_diff_Asym = h_Asym - h_Asym2;
std::cout << GridLogMessage << "Projections structure check vector difference (Two Index anti-Symmetric): " << norm2(h_diff_Asym) << std::endl;
@ -489,11 +489,11 @@ int main(int argc, char** argv) {
// Construct the fundamental matrix in the group
SU3::LatticeMatrix Af_Asym(grid);
SU3::FundamentalLieAlgebraMatrix(h_Asym,Af_Asym);
SU3::LatticeMatrix Ufund2A(grid);
SU<Nc>::LatticeMatrix Af_Asym(grid);
SU<Nc>::FundamentalLieAlgebraMatrix(h_Asym,Af_Asym);
SU<Nc>::LatticeMatrix Ufund2A(grid);
Ufund2A = expMat(Af_Asym, 1.0, 16);
SU3::LatticeMatrix UnitCheck2A(grid);
SU<Nc>::LatticeMatrix UnitCheck2A(grid);
UnitCheck2A = Ufund2A * adj(Ufund2A) - uno_f;
std::cout << GridLogMessage << "unitarity check 1: " << norm2(UnitCheck2A)
<< std::endl;

View File

@ -1,145 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_quenched_update.cc
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
;
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
std::vector<int> latt({8,8,8,8});
GridCartesian * grid = SpaceTimeGrid::makeFourDimGrid(latt,
GridDefaultSimd(Nd,vComplexD::Nsimd()),
GridDefaultMpi());
GridCartesian * gridF = SpaceTimeGrid::makeFourDimGrid(latt,
GridDefaultSimd(Nd,vComplexF::Nsimd()),
GridDefaultMpi());
///////////////////////////////
// Configuration of known size
///////////////////////////////
LatticeColourMatrixD ident(grid);
LatticeColourMatrixD U(grid);
LatticeColourMatrixD UU(grid);
LatticeColourMatrixD tmp(grid);
LatticeColourMatrixD org(grid);
LatticeColourMatrixF UF(gridF);
LatticeGaugeField Umu(grid);
ident =1.0;
// RNG set up for test
std::vector<int> pseeds({1,2,3,4,5}); // once I caught a fish alive
std::vector<int> sseeds({6,7,8,9,10});// then i let it go again
GridParallelRNG pRNG(grid); pRNG.SeedFixedIntegers(pseeds);
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(sseeds);
SU<Nc>::HotConfiguration(pRNG,Umu);
U = PeekIndex<LorentzIndex>(Umu,0);
org=U;
tmp= U*adj(U) - ident ;
RealD Def1 = norm2( tmp );
std::cout << " Defect1 "<<Def1<<std::endl;
tmp = U - org;
std::cout << "Diff1 "<<norm2(tmp)<<std::endl;
precisionChange(UF,U);
precisionChange(U,UF);
tmp= U*adj(U) - ident ;
RealD Def2 = norm2( tmp );
std::cout << " Defect2 "<<Def2<<std::endl;
tmp = U - org;
std::cout << "Diff2 "<<norm2(tmp)<<std::endl;
U = ProjectOnGroup(U);
tmp= U*adj(U) - ident ;
RealD Def3 = norm2( tmp);
std::cout << " Defect3 "<<Def3<<std::endl;
tmp = U - org;
std::cout << "Diff3 "<<norm2(tmp)<<std::endl;
LatticeComplexD detU(grid);
LatticeComplexD detUU(grid);
detU= Determinant(U) ;
detU=detU-1.0;
std::cout << "Determinant defect before screw up " << norm2(detU)<<std::endl;
std::cout << " Screwing up determinant " << std::endl;
RealD theta = 0.2;
ComplexD phase(cos(theta),sin(theta));
for(int i=0;i<Nc;i++){
auto element = PeekIndex<ColourIndex>(U,Nc-1,i);
element = element * phase;
PokeIndex<ColourIndex>(U,element,Nc-1,i);
}
U=U*0.1;
UU=U;
detU= Determinant(U) ;
detU=detU-1.0;
std::cout << "Determinant defect before projection " <<norm2(detU)<<std::endl;
tmp = U*adj(U) - ident;
std::cout << "Unitarity check before projection " << norm2(tmp)<<std::endl;
ProjectSU3(U);
detU= Determinant(U) ;
detU= detU -1.0;
std::cout << "Determinant ProjectSU3 defect " <<norm2(detU)<<std::endl;
tmp = U*adj(U) - ident;
std::cout << "Unitarity check after projection " << norm2(tmp)<<std::endl;
ProjectSUn(UU);
detUU= Determinant(UU);
detUU= detUU -1.0;
std::cout << "Determinant ProjectSUn defect " <<norm2(detUU)<<std::endl;
tmp = UU*adj(UU) - ident;
std::cout << "Unitarity check after projection " << norm2(tmp)<<std::endl;
Grid_finalize();
}

View File

@ -1,106 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_quenched_update.cc
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
;
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
std::vector<int> latt({8,8,8,8});
GridCartesian * grid = SpaceTimeGrid::makeFourDimGrid(latt,
GridDefaultSimd(Nd,vComplexD::Nsimd()),
GridDefaultMpi());
GridCartesian * gridF = SpaceTimeGrid::makeFourDimGrid(latt,
GridDefaultSimd(Nd,vComplexF::Nsimd()),
GridDefaultMpi());
///////////////////////////////
// Configuration of known size
///////////////////////////////
LatticeColourMatrixD ident(grid);
LatticeColourMatrixD U(grid);
LatticeColourMatrixD tmp(grid);
LatticeColourMatrixD org(grid);
LatticeColourMatrixF UF(gridF);
LatticeGaugeField Umu(grid);
ident =1.0;
// RNG set up for test
std::vector<int> pseeds({1,2,3,4,5}); // once I caught a fish alive
std::vector<int> sseeds({6,7,8,9,10});// then i let it go again
GridParallelRNG pRNG(grid); pRNG.SeedFixedIntegers(pseeds);
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(sseeds);
SU<Nc>::HotConfiguration(pRNG,Umu);
U = PeekIndex<LorentzIndex>(Umu,0);
org=U;
tmp= U*adj(U) - ident ;
RealD Def1 = norm2( tmp );
std::cout << " Defect1 "<<Def1<<std::endl;
tmp = U - org;
std::cout << "Diff1 "<<norm2(tmp)<<std::endl;
precisionChange(UF,U);
precisionChange(U,UF);
tmp= U*adj(U) - ident ;
RealD Def2 = norm2( tmp );
std::cout << " Defect2 "<<Def2<<std::endl;
tmp = U - org;
std::cout << "Diff2 "<<norm2(tmp)<<std::endl;
U = ProjectOnGroup(U);
tmp= U*adj(U) - ident ;
RealD Def3 = norm2( tmp);
std::cout << " Defect3 "<<Def3<<std::endl;
tmp = U - org;
std::cout << "Diff3 "<<norm2(tmp)<<std::endl;
Grid_finalize();
}

View File

@ -108,18 +108,8 @@ int main (int argc, char ** argv)
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
LatticeGaugeField Umu(UGrid);
if( argc > 1 && argv[1][0] != '-' )
{
std::cout<<GridLogMessage <<"Loading configuration from "<<argv[1]<<std::endl;
FieldMetaData header;
NerscIO::readConfiguration(Umu, header, argv[1]);
}
else
{
std::cout<<GridLogMessage <<"Using cold configuration"<<std::endl;
SU<Nc>::ColdConfiguration(Umu);
// SU<Nc>::HotConfiguration(RNG4,Umu);
}
SU<Nc>::ColdConfiguration(Umu);
// SU<Nc>::HotConfiguration(RNG4,Umu);
RealD mass=0.3;
RealD M5 =1.0;

View File

@ -81,10 +81,6 @@ int main(int argc, char **argv) {
// that have a complex construction
// standard
RealD beta = 5.6 ;
const int nu = 3;
std::vector<int> twists(Nd,0);
twists[nu] = 1;
ConjugateGimplD::setDirections(twists);
ConjugateWilsonGaugeActionR Waction(beta);
const int Ls = 8;
@ -97,6 +93,9 @@ int main(int argc, char **argv) {
// temporarily need a gauge field
LatticeGaugeField U(GridPtr);
const int nu = 3;
std::vector<int> twists(Nd,0);
twists[nu] = 1;
FermionAction::ImplParams params;
params.twists = twists;
Real mass=0.04;

View File

@ -79,10 +79,6 @@ int main(int argc, char **argv) {
// that have a complex construction
// standard
RealD beta = 2.6 ;
const int nu = 3;
std::vector<int> twists(Nd,0);
twists[nu] = 1;
ConjugateGimplD::setDirections(twists);
ConjugateIwasakiGaugeActionR Waction(beta);

View File

@ -80,9 +80,6 @@ int main(int argc, char **argv) {
// that have a complex construction
// standard
RealD beta = 5.6 ;
std::vector<int> twists(Nd,0);
twists[3] = 1;
ConjugateGimplD::setDirections(twists);
ConjugateWilsonGaugeActionR Waction(beta);

View File

@ -1,475 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/solver/Test_coarse_even_odd.cc
Copyright (C) 2015-2020
Author: Daniel Richtmann <daniel.richtmann@ur.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace Grid;
#ifndef NBASIS
#define NBASIS 40
#endif
// NOTE: The tests in this file are written in analogy to
// - tests/core/Test_wilson_even_odd.cc
// - tests/core/Test_wilson_clover.cc
std::vector<int> readFromCommandlineIvec(int* argc,
char*** argv,
std::string&& option,
const std::vector<int>& defaultValue) {
std::string arg;
std::vector<int> ret(defaultValue);
if(GridCmdOptionExists(*argv, *argv + *argc, option)) {
arg = GridCmdOptionPayload(*argv, *argv + *argc, option);
GridCmdOptionIntVector(arg, ret);
}
return ret;
}
int main(int argc, char** argv) {
Grid_init(&argc, &argv);
/////////////////////////////////////////////////////////////////////////////
// Read from command line //
/////////////////////////////////////////////////////////////////////////////
const int nbasis = NBASIS; static_assert((nbasis & 0x1) == 0, "");
const int nb = nbasis/2;
Coordinate blockSize = readFromCommandlineIvec(&argc, &argv, "--blocksize", {2, 2, 2, 2});
std::cout << GridLogMessage << "Compiled with nbasis = " << nbasis << " -> nb = " << nb << std::endl;
/////////////////////////////////////////////////////////////////////////////
// General setup //
/////////////////////////////////////////////////////////////////////////////
Coordinate clatt = GridDefaultLatt();
for(int d=0; d<clatt.size(); d++) clatt[d] = clatt[d] / blockSize[d];
GridCartesian* Grid_f = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
GridCartesian* Grid_c = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
GridRedBlackCartesian* RBGrid_f = SpaceTimeGrid::makeFourDimRedBlackGrid(Grid_f);
GridRedBlackCartesian* RBGrid_c = SpaceTimeGrid::makeFourDimRedBlackGrid(Grid_c);
std::cout << GridLogMessage << "Grid_f:" << std::endl; Grid_f->show_decomposition();
std::cout << GridLogMessage << "Grid_c:" << std::endl; Grid_c->show_decomposition();
std::cout << GridLogMessage << "RBGrid_f:" << std::endl; RBGrid_f->show_decomposition();
std::cout << GridLogMessage << "RBGrid_c:" << std::endl; RBGrid_c->show_decomposition();
GridParallelRNG pRNG_f(Grid_f);
GridParallelRNG pRNG_c(Grid_c);
std::vector<int> seeds({1, 2, 3, 4});
pRNG_f.SeedFixedIntegers(seeds);
pRNG_c.SeedFixedIntegers(seeds);
/////////////////////////////////////////////////////////////////////////////
// Setup of Dirac Matrix and Operator //
/////////////////////////////////////////////////////////////////////////////
LatticeGaugeField Umu(Grid_f); SU3::HotConfiguration(pRNG_f, Umu);
RealD checkTolerance = (getPrecision<LatticeFermion>::value == 1) ? 1e-7 : 1e-15;
RealD mass = -0.30;
RealD csw = 1.9192;
WilsonCloverFermionR Dwc(Umu, *Grid_f, *RBGrid_f, mass, csw, csw);
MdagMLinearOperator<WilsonCloverFermionR, LatticeFermion> MdagMOp_Dwc(Dwc);
/////////////////////////////////////////////////////////////////////////////
// Type definitions //
/////////////////////////////////////////////////////////////////////////////
typedef Aggregation<vSpinColourVector, vTComplex, nbasis> Aggregates;
typedef CoarsenedMatrix<vSpinColourVector, vTComplex, nbasis> CoarseDiracMatrix;
typedef CoarseDiracMatrix::CoarseVector CoarseVector;
/////////////////////////////////////////////////////////////////////////////
// Setup of Aggregation //
/////////////////////////////////////////////////////////////////////////////
Aggregates Aggs(Grid_c, Grid_f, 0);
{
LatticeFermion tmp(Aggs.subspace[0].Grid());
for(int n = 0; n < nb; n++) {
gaussian(pRNG_f, Aggs.subspace[n]);
G5C(tmp, Aggs.subspace[n]);
axpby(Aggs.subspace[n + nb], 0.5, -0.5, Aggs.subspace[n], tmp);
axpby(Aggs.subspace[n], 0.5, 0.5, Aggs.subspace[n], tmp);
}
}
/////////////////////////////////////////////////////////////////////////////
// Setup of CoarsenedMatrix and Operator //
/////////////////////////////////////////////////////////////////////////////
const int hermitian = 0;
CoarseDiracMatrix Dc(*Grid_c, *RBGrid_c, hermitian);
Dc.CoarsenOperator(Grid_f, MdagMOp_Dwc, Aggs);
MdagMLinearOperator<CoarseDiracMatrix, CoarseVector> MdagMOp_Dc(Dc);
/////////////////////////////////////////////////////////////////////////////
// Setup vectors used in all tests //
/////////////////////////////////////////////////////////////////////////////
CoarseVector src(Grid_c); random(pRNG_c, src);
CoarseVector diff(Grid_c); diff = Zero();
/////////////////////////////////////////////////////////////////////////////
// Start of tests //
/////////////////////////////////////////////////////////////////////////////
{
std::cout << GridLogMessage << "===========================================================================" << std::endl;
std::cout << GridLogMessage << "= Test Dhop + Mdiag = Munprec" << std::endl;
std::cout << GridLogMessage << "===========================================================================" << std::endl;
CoarseVector phi(Grid_c); phi = Zero();
CoarseVector chi(Grid_c); chi = Zero();
CoarseVector res(Grid_c); res = Zero();
CoarseVector ref(Grid_c); ref = Zero();
Dc.Mdiag(src, phi); std::cout << GridLogMessage << "Applied Mdiag" << std::endl;
Dc.Dhop(src, chi, DaggerNo); std::cout << GridLogMessage << "Applied Dhop" << std::endl;
Dc.M(src, ref); std::cout << GridLogMessage << "Applied M" << std::endl;
res = phi + chi;
diff = ref - res;
auto absDev = norm2(diff);
auto relDev = absDev / norm2(ref);
std::cout << GridLogMessage << "norm2(Munprec), norm2(Dhop + Mdiag), abs. deviation, rel. deviation: "
<< norm2(ref) << " " << norm2(res) << " " << absDev << " " << relDev << " -> check "
<< ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
}
{
std::cout << GridLogMessage << "===========================================================================" << std::endl;
std::cout << GridLogMessage << "= Test Meo + Moe = Dhop" << std::endl;
std::cout << GridLogMessage << "===========================================================================" << std::endl;
CoarseVector src_e(RBGrid_c); src_e = Zero();
CoarseVector src_o(RBGrid_c); src_o = Zero();
CoarseVector res_e(RBGrid_c); res_e = Zero();
CoarseVector res_o(RBGrid_c); res_o = Zero();
CoarseVector res(Grid_c); res = Zero();
CoarseVector ref(Grid_c); ref = Zero();
pickCheckerboard(Even, src_e, src);
pickCheckerboard(Odd, src_o, src);
Dc.Meooe(src_e, res_o); std::cout << GridLogMessage << "Applied Meo" << std::endl;
Dc.Meooe(src_o, res_e); std::cout << GridLogMessage << "Applied Moe" << std::endl;
Dc.Dhop(src, ref, DaggerNo); std::cout << GridLogMessage << "Applied Dhop" << std::endl;
setCheckerboard(res, res_o);
setCheckerboard(res, res_e);
diff = ref - res;
auto absDev = norm2(diff);
auto relDev = absDev / norm2(ref);
std::cout << GridLogMessage << "norm2(Dhop), norm2(Meo + Moe), abs. deviation, rel. deviation: "
<< norm2(ref) << " " << norm2(res) << " " << absDev << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
}
{
std::cout << GridLogMessage << "===========================================================================" << std::endl;
std::cout << GridLogMessage << "= Test |(Im(v^dag M^dag M v)| = 0" << std::endl;
std::cout << GridLogMessage << "===========================================================================" << std::endl;
CoarseVector tmp(Grid_c); tmp = Zero();
CoarseVector phi(Grid_c); phi = Zero();
Dc.M(src, tmp); std::cout << GridLogMessage << "Applied M" << std::endl;
Dc.Mdag(tmp, phi); std::cout << GridLogMessage << "Applied Mdag" << std::endl;
std::cout << GridLogMessage << "src = " << norm2(src) << " tmp = " << norm2(tmp) << " phi = " << norm2(phi) << std::endl;
ComplexD dot = innerProduct(src, phi);
auto relDev = abs(imag(dot)) / abs(real(dot));
std::cout << GridLogMessage << "Re(v^dag M^dag M v), Im(v^dag M^dag M v), rel.deviation: "
<< real(dot) << " " << imag(dot) << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
}
{
std::cout << GridLogMessage << "===========================================================================" << std::endl;
std::cout << GridLogMessage << "= Test |(Im(v^dag Mooee^dag Mooee v)| = 0 (full grid)" << std::endl;
std::cout << GridLogMessage << "===========================================================================" << std::endl;
CoarseVector tmp(Grid_c); tmp = Zero();
CoarseVector phi(Grid_c); phi = Zero();
Dc.Mooee(src, tmp); std::cout << GridLogMessage << "Applied Mooee" << std::endl;
Dc.MooeeDag(tmp, phi); std::cout << GridLogMessage << "Applied MooeeDag" << std::endl;
ComplexD dot = innerProduct(src, phi);
auto relDev = abs(imag(dot)) / abs(real(dot));
std::cout << GridLogMessage << "Re(v^dag Mooee^dag Mooee v), Im(v^dag Mooee^dag Mooee v), rel.deviation: "
<< real(dot) << " " << imag(dot) << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
}
{
std::cout << GridLogMessage << "===========================================================================" << std::endl;
std::cout << GridLogMessage << "= Test MooeeInv Mooee = 1 (full grid)" << std::endl;
std::cout << GridLogMessage << "===========================================================================" << std::endl;
CoarseVector tmp(Grid_c); tmp = Zero();
CoarseVector phi(Grid_c); phi = Zero();
Dc.Mooee(src, tmp); std::cout << GridLogMessage << "Applied Mooee" << std::endl;
Dc.MooeeInv(tmp, phi); std::cout << GridLogMessage << "Applied MooeeInv" << std::endl;
diff = src - phi;
auto absDev = norm2(diff);
auto relDev = absDev / norm2(src);
std::cout << GridLogMessage << "norm2(src), norm2(MooeeInv Mooee src), abs. deviation, rel. deviation: "
<< norm2(src) << " " << norm2(phi) << " " << absDev << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
}
{
std::cout << GridLogMessage << "===========================================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeooeDagger is the dagger of Meooe by requiring" << std::endl;
std::cout << GridLogMessage << "= < phi | Meooe | chi > * = < chi | Meooe^dag| phi>" << std::endl;
std::cout << GridLogMessage << "===========================================================================" << std::endl;
// clang-format off
CoarseVector phi(Grid_c); random(pRNG_c, phi);
CoarseVector chi(Grid_c); random(pRNG_c, chi);
CoarseVector chi_e(RBGrid_c); chi_e = Zero();
CoarseVector chi_o(RBGrid_c); chi_o = Zero();
CoarseVector dchi_e(RBGrid_c); dchi_e = Zero();
CoarseVector dchi_o(RBGrid_c); dchi_o = Zero();
CoarseVector phi_e(RBGrid_c); phi_e = Zero();
CoarseVector phi_o(RBGrid_c); phi_o = Zero();
CoarseVector dphi_e(RBGrid_c); dphi_e = Zero();
CoarseVector dphi_o(RBGrid_c); dphi_o = Zero();
// clang-format on
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
Dc.Meooe(chi_e, dchi_o); std::cout << GridLogMessage << "Applied Meo" << std::endl;
Dc.Meooe(chi_o, dchi_e); std::cout << GridLogMessage << "Applied Moe" << std::endl;
Dc.MeooeDag(phi_e, dphi_o); std::cout << GridLogMessage << "Applied MeoDag" << std::endl;
Dc.MeooeDag(phi_o, dphi_e); std::cout << GridLogMessage << "Applied MoeDag" << std::endl;
ComplexD phiDchi_e = innerProduct(phi_e, dchi_e);
ComplexD phiDchi_o = innerProduct(phi_o, dchi_o);
ComplexD chiDphi_e = innerProduct(chi_e, dphi_e);
ComplexD chiDphi_o = innerProduct(chi_o, dphi_o);
std::cout << GridLogDebug << "norm dchi_e = " << norm2(dchi_e) << " norm dchi_o = " << norm2(dchi_o) << " norm dphi_e = " << norm2(dphi_e)
<< " norm dphi_o = " << norm2(dphi_e) << std::endl;
std::cout << GridLogMessage << "e " << phiDchi_e << " " << chiDphi_e << std::endl;
std::cout << GridLogMessage << "o " << phiDchi_o << " " << chiDphi_o << std::endl;
std::cout << GridLogMessage << "phiDchi_e - conj(chiDphi_o) " << phiDchi_e - conj(chiDphi_o) << std::endl;
std::cout << GridLogMessage << "phiDchi_o - conj(chiDphi_e) " << phiDchi_o - conj(chiDphi_e) << std::endl;
}
{
std::cout << GridLogMessage << "===========================================================================" << std::endl;
std::cout << GridLogMessage << "= Test MooeeInv Mooee = 1 (checkerboards separately)" << std::endl;
std::cout << GridLogMessage << "===========================================================================" << std::endl;
CoarseVector chi(Grid_c); random(pRNG_c, chi);
CoarseVector tmp(Grid_c); tmp = Zero();
CoarseVector phi(Grid_c); phi = Zero();
CoarseVector chi_e(RBGrid_c); chi_e = Zero();
CoarseVector chi_o(RBGrid_c); chi_o = Zero();
CoarseVector phi_e(RBGrid_c); phi_e = Zero();
CoarseVector phi_o(RBGrid_c); phi_o = Zero();
CoarseVector tmp_e(RBGrid_c); tmp_e = Zero();
CoarseVector tmp_o(RBGrid_c); tmp_o = Zero();
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
pickCheckerboard(Even, tmp_e, tmp);
pickCheckerboard(Odd, tmp_o, tmp);
Dc.Mooee(chi_e, tmp_e); std::cout << GridLogMessage << "Applied Mee" << std::endl;
Dc.MooeeInv(tmp_e, phi_e); std::cout << GridLogMessage << "Applied MeeInv" << std::endl;
Dc.Mooee(chi_o, tmp_o); std::cout << GridLogMessage << "Applied Moo" << std::endl;
Dc.MooeeInv(tmp_o, phi_o); std::cout << GridLogMessage << "Applied MooInv" << std::endl;
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
diff = chi - phi;
auto absDev = norm2(diff);
auto relDev = absDev / norm2(chi);
std::cout << GridLogMessage << "norm2(chi), norm2(MeeInv Mee chi), abs. deviation, rel. deviation: "
<< norm2(chi) << " " << norm2(phi) << " " << absDev << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
}
{
std::cout << GridLogMessage << "===========================================================================" << std::endl;
std::cout << GridLogMessage << "= Test MooeeDag MooeeInvDag = 1 (checkerboards separately)" << std::endl;
std::cout << GridLogMessage << "===========================================================================" << std::endl;
CoarseVector chi(Grid_c); random(pRNG_c, chi);
CoarseVector tmp(Grid_c); tmp = Zero();
CoarseVector phi(Grid_c); phi = Zero();
CoarseVector chi_e(RBGrid_c); chi_e = Zero();
CoarseVector chi_o(RBGrid_c); chi_o = Zero();
CoarseVector phi_e(RBGrid_c); phi_e = Zero();
CoarseVector phi_o(RBGrid_c); phi_o = Zero();
CoarseVector tmp_e(RBGrid_c); tmp_e = Zero();
CoarseVector tmp_o(RBGrid_c); tmp_o = Zero();
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
pickCheckerboard(Even, tmp_e, tmp);
pickCheckerboard(Odd, tmp_o, tmp);
Dc.MooeeDag(chi_e, tmp_e); std::cout << GridLogMessage << "Applied MeeDag" << std::endl;
Dc.MooeeInvDag(tmp_e, phi_e); std::cout << GridLogMessage << "Applied MeeInvDag" << std::endl;
Dc.MooeeDag(chi_o, tmp_o); std::cout << GridLogMessage << "Applied MooDag" << std::endl;
Dc.MooeeInvDag(tmp_o, phi_o); std::cout << GridLogMessage << "Applied MooInvDag" << std::endl;
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
diff = chi - phi;
auto absDev = norm2(diff);
auto relDev = absDev / norm2(chi);
std::cout << GridLogMessage << "norm2(chi), norm2(MeeDag MeeInvDag chi), abs. deviation, rel. deviation: "
<< norm2(chi) << " " << norm2(phi) << " " << absDev << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
}
{
std::cout << GridLogMessage << "===========================================================================" << std::endl;
std::cout << GridLogMessage << "= Test Meo + Moe + Moo + Mee = Munprec" << std::endl;
std::cout << GridLogMessage << "===========================================================================" << std::endl;
CoarseVector chi(Grid_c); chi = Zero();
CoarseVector phi(Grid_c); phi = Zero();
CoarseVector ref(Grid_c); ref = Zero();
CoarseVector src_e(RBGrid_c); src_e = Zero();
CoarseVector src_o(RBGrid_c); src_o = Zero();
CoarseVector phi_e(RBGrid_c); phi_e = Zero();
CoarseVector phi_o(RBGrid_c); phi_o = Zero();
CoarseVector chi_e(RBGrid_c); chi_e = Zero();
CoarseVector chi_o(RBGrid_c); chi_o = Zero();
pickCheckerboard(Even, src_e, src);
pickCheckerboard(Odd, src_o, src);
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
// M phi = (Mooee src_e + Meooe src_o , Mooee src_o + Meooe src_e)
Dc.M(src, ref); // Reference result from the unpreconditioned operator
// EO matrix
Dc.Mooee(src_e, chi_e); std::cout << GridLogMessage << "Applied Mee" << std::endl;
Dc.Mooee(src_o, chi_o); std::cout << GridLogMessage << "Applied Moo" << std::endl;
Dc.Meooe(src_o, phi_e); std::cout << GridLogMessage << "Applied Moe" << std::endl;
Dc.Meooe(src_e, phi_o); std::cout << GridLogMessage << "Applied Meo" << std::endl;
phi_o += chi_o;
phi_e += chi_e;
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
std::cout << GridLogDebug << "norm phi_e = " << norm2(phi_e) << " norm phi_o = " << norm2(phi_o) << " norm phi = " << norm2(phi) << std::endl;
diff = ref - phi;
auto absDev = norm2(diff);
auto relDev = absDev / norm2(ref);
std::cout << GridLogMessage << "norm2(Dunprec), norm2(Deoprec), abs. deviation, rel. deviation: "
<< norm2(ref) << " " << norm2(phi) << " " << absDev << " " << relDev
<< " -> check " << ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
assert(relDev <= checkTolerance);
}
{
std::cout << GridLogMessage << "===========================================================================" << std::endl;
std::cout << GridLogMessage << "= Test MpcDagMpc is hermitian" << std::endl;
std::cout << GridLogMessage << "===========================================================================" << std::endl;
CoarseVector phi(Grid_c); random(pRNG_c, phi);
CoarseVector chi(Grid_c); random(pRNG_c, chi);
CoarseVector chi_e(RBGrid_c); chi_e = Zero();
CoarseVector chi_o(RBGrid_c); chi_o = Zero();
CoarseVector dchi_e(RBGrid_c); dchi_e = Zero();
CoarseVector dchi_o(RBGrid_c); dchi_o = Zero();
CoarseVector phi_e(RBGrid_c); phi_e = Zero();
CoarseVector phi_o(RBGrid_c); phi_o = Zero();
CoarseVector dphi_e(RBGrid_c); dphi_e = Zero();
CoarseVector dphi_o(RBGrid_c); dphi_o = Zero();
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
SchurDiagMooeeOperator<CoarseDiracMatrix,CoarseVector> HermOpEO(Dc);
HermOpEO.MpcDagMpc(chi_e, dchi_e); std::cout << GridLogMessage << "Applied MpcDagMpc to chi_e" << std::endl;
HermOpEO.MpcDagMpc(chi_o, dchi_o); std::cout << GridLogMessage << "Applied MpcDagMpc to chi_o" << std::endl;
HermOpEO.MpcDagMpc(phi_e, dphi_e); std::cout << GridLogMessage << "Applied MpcDagMpc to phi_e" << std::endl;
HermOpEO.MpcDagMpc(phi_o, dphi_o); std::cout << GridLogMessage << "Applied MpcDagMpc to phi_o" << std::endl;
ComplexD phiDchi_e = innerProduct(phi_e, dchi_e);
ComplexD phiDchi_o = innerProduct(phi_o, dchi_o);
ComplexD chiDphi_e = innerProduct(chi_e, dphi_e);
ComplexD chiDphi_o = innerProduct(chi_o, dphi_o);
std::cout << GridLogMessage << "e " << phiDchi_e << " " << chiDphi_e << std::endl;
std::cout << GridLogMessage << "o " << phiDchi_o << " " << chiDphi_o << std::endl;
std::cout << GridLogMessage << "phiDchi_e - conj(chiDphi_e) " << phiDchi_e - conj(chiDphi_e) << std::endl;
std::cout << GridLogMessage << "phiDchi_o - conj(chiDphi_o) " << phiDchi_o - conj(chiDphi_o) << std::endl;
}
Grid_finalize();
}

View File

@ -222,16 +222,9 @@ int main (int argc, char ** argv)
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
GridCartesian *CoarseCoarse4d = SpaceTimeGrid::makeFourDimGrid(cclatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridCartesian *CoarseCoarse4d = SpaceTimeGrid::makeFourDimGrid(cclatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *CoarseCoarse5d = SpaceTimeGrid::makeFiveDimGrid(1,CoarseCoarse4d);
GridRedBlackCartesian * Coarse4dRB = SpaceTimeGrid::makeFourDimRedBlackGrid(Coarse4d);
GridRedBlackCartesian * Coarse5dRB = SpaceTimeGrid::makeFiveDimRedBlackGrid(1,Coarse4d);
GridRedBlackCartesian *CoarseCoarse4dRB = SpaceTimeGrid::makeFourDimRedBlackGrid(CoarseCoarse4d);
GridRedBlackCartesian *CoarseCoarse5dRB = SpaceTimeGrid::makeFiveDimRedBlackGrid(1,CoarseCoarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::vector<int> cseeds({5,6,7,8});
@ -289,7 +282,8 @@ int main (int argc, char ** argv)
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
Level1Op LDOp(*Coarse5d,*Coarse5dRB,1); LDOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
Level1Op LDOp(*Coarse5d,1); LDOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
//////////////////////////////////////////////////
// Deflate the course space. Recursive multigrid?
@ -317,11 +311,12 @@ int main (int argc, char ** argv)
}
}
Level2Op L2Op(*CoarseCoarse5d,*CoarseCoarse5dRB,1); // Hermitian matrix
Level2Op L2Op(*CoarseCoarse5d,1); // Hermitian matrix
typedef Level2Op::CoarseVector CoarseCoarseVector;
HermitianLinearOperator<Level1Op,CoarseVector> L1LinOp(LDOp);
L2Op.CoarsenOperator(Coarse5d,L1LinOp,CoarseAggregates);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " Running CoarseCoarse grid Lanczos "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;

View File

@ -1,397 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_hdcr.cc
Copyright (C) 2015
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
using namespace std;
using namespace Grid;
/* Params
* Grid:
* block1(4)
* block2(4)
*
* Subspace
* * Fine : Subspace(nbasis,hi,lo,order,first,step) -- 32, 60,0.02,500,100,100
* * Coarse: Subspace(nbasis,hi,lo,order,first,step) -- 32, 18,0.02,500,100,100
* Smoother:
* * Fine: Cheby(hi, lo, order) -- 60,0.5,10
* * Coarse: Cheby(hi, lo, order) -- 12,0.1,4
* Lanczos:
* CoarseCoarse IRL( Nk, Nm, Nstop, poly(lo,hi,order)) 24,36,24,0.002,4.0,61
*/
RealD InverseApproximation(RealD x){
return 1.0/x;
}
template<class Field> class SolverWrapper : public LinearFunction<Field> {
private:
CheckerBoardedSparseMatrixBase<Field> & _Matrix;
SchurRedBlackBase<Field> & _Solver;
public:
/////////////////////////////////////////////////////
// Wrap the usual normal equations trick
/////////////////////////////////////////////////////
SolverWrapper(CheckerBoardedSparseMatrixBase<Field> &Matrix,
SchurRedBlackBase<Field> &Solver)
: _Matrix(Matrix), _Solver(Solver) {};
void operator() (const Field &in, Field &out){
_Solver(_Matrix,in,out); // Mdag M out = Mdag in
}
};
template<class Field,class Matrix> class ChebyshevSmoother : public LinearFunction<Field>
{
public:
typedef LinearOperatorBase<Field> FineOperator;
Matrix & _SmootherMatrix;
FineOperator & _SmootherOperator;
Chebyshev<Field> Cheby;
ChebyshevSmoother(RealD _lo,RealD _hi,int _ord, FineOperator &SmootherOperator,Matrix &SmootherMatrix) :
_SmootherOperator(SmootherOperator),
_SmootherMatrix(SmootherMatrix),
Cheby(_lo,_hi,_ord,InverseApproximation)
{};
void operator() (const Field &in, Field &out)
{
Field tmp(in.Grid());
MdagMLinearOperator<Matrix,Field> MdagMOp(_SmootherMatrix);
_SmootherOperator.AdjOp(in,tmp);
Cheby(MdagMOp,tmp,out);
}
};
template<class Field,class Matrix> class MirsSmoother : public LinearFunction<Field>
{
public:
typedef LinearOperatorBase<Field> FineOperator;
Matrix & SmootherMatrix;
FineOperator & SmootherOperator;
RealD tol;
RealD shift;
int maxit;
MirsSmoother(RealD _shift,RealD _tol,int _maxit,FineOperator &_SmootherOperator,Matrix &_SmootherMatrix) :
shift(_shift),tol(_tol),maxit(_maxit),
SmootherOperator(_SmootherOperator),
SmootherMatrix(_SmootherMatrix)
{};
void operator() (const Field &in, Field &out)
{
ZeroGuesser<Field> Guess;
ConjugateGradient<Field> CG(tol,maxit,false);
Field src(in.Grid());
ShiftedMdagMLinearOperator<SparseMatrixBase<Field>,Field> MdagMOp(SmootherMatrix,shift);
SmootherOperator.AdjOp(in,src);
Guess(src,out);
CG(MdagMOp,src,out);
}
};
template<class Fobj,class CComplex,int nbasis, class Matrix, class Guesser, class CoarseSolver>
class MultiGridPreconditioner : public LinearFunction< Lattice<Fobj> > {
public:
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef CoarsenedMatrix<Fobj,CComplex,nbasis> CoarseOperator;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
Aggregates & _Aggregates;
CoarseOperator & _CoarseOperator;
Matrix & _FineMatrix;
FineOperator & _FineOperator;
Guesser & _Guess;
FineSmoother & _Smoother;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
#define GridLogLevel std::cout << GridLogMessage <<std::string(level,'\t')<< " Level "<<level <<" "
MultiGridPreconditioner(Aggregates &Agg, CoarseOperator &Coarse,
FineOperator &Fine,Matrix &FineMatrix,
FineSmoother &Smoother,
Guesser &Guess_,
CoarseSolver &CoarseSolve_)
: _Aggregates(Agg),
_CoarseOperator(Coarse),
_FineOperator(Fine),
_FineMatrix(FineMatrix),
_Smoother(Smoother),
_Guess(Guess_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
CoarseVector Csrc(_CoarseOperator.Grid());
CoarseVector Csol(_CoarseOperator.Grid());
FineField vec1(in.Grid());
FineField vec2(in.Grid());
double t;
// Fine Smoother
t=-usecond();
_Smoother(in,out);
t+=usecond();
GridLogLevel << "Smoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// Fine to Coarse
t=-usecond();
_Aggregates.ProjectToSubspace (Csrc,vec1);
t+=usecond();
GridLogLevel << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
_CoarseSolve(Csrc,Csol);
t+=usecond();
GridLogLevel << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
_Aggregates.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
GridLogLevel << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// Fine Smoother
t=-usecond();
_Smoother(vec1,vec2);
t+=usecond();
GridLogLevel << "Smoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=16;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
///////////////////////////////////////////////////
// Construct a coarsened grid; utility for this?
///////////////////////////////////////////////////
std::vector<int> block ({2,2,2,2});
std::vector<int> blockc ({2,2,2,2});
const int nbasis= 32;
const int nbasisc= 32;
auto clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/block[d];
}
auto cclatt = clatt;
for(int d=0;d<clatt.size();d++){
cclatt[d] = clatt[d]/blockc[d];
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
// GridCartesian *CoarseCoarse4d = SpaceTimeGrid::makeFourDimGrid(cclatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
// GridCartesian *CoarseCoarse5d = SpaceTimeGrid::makeFiveDimGrid(1,CoarseCoarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds);
LatticeFermion src(FGrid); gaussian(RNG5,src);// src=src+g5*src;
LatticeFermion result(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
std::string file("./ckpoint_lat.4000");
//std::string file("./ckpoint_lat.1000");
NerscIO::readConfiguration(Umu,header,file);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building g5R5 hermitian DWF operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
RealD mass=0.001;
RealD M5=1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
typedef CoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> CoarseOperator;
typedef CoarseOperator::CoarseVector CoarseVector;
typedef CoarseOperator::siteVector siteVector;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling Aggregation class to build subspace" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<DomainWallFermionR,LatticeFermion> HermDefOp(Ddwf);
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
LatticeFermion A(FGrid);
LatticeFermion B(FGrid);
// Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.002,1000,800,100,0.0);
// Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.02,1000,800,100,0.0);
Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.05,500,200,150,0.0);//
// Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.01,1000,100,100,0.0); // Slightly faster
for(int n=0;n<nb;n++){
std::cout << GridLogMessage << " G5R5 "<<n<<std::endl;
G5R5(Aggregates.subspace[n+nb],Aggregates.subspace[n]);
std::cout << GridLogMessage << " Projection "<<n<<std::endl;
A = Aggregates.subspace[n];
B = Aggregates.subspace[n+nb];
std::cout << GridLogMessage << " Copy "<<n<<std::endl;
Aggregates.subspace[n] = A+B; // 1+G5 // eigen value of G5R5 is +1
std::cout << GridLogMessage << " P+ "<<n<<std::endl;
Aggregates.subspace[n+nb]= A-B; // 1-G5 // eigen value of G5R5 is -1
std::cout << GridLogMessage << " P- "<<n<<std::endl;
}
}
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building coarse representation of Indef operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
typedef CoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> Level1Op;
typedef CoarsenedMatrix<siteVector,iScalar<vTComplex>,nbasisc> Level2Op;
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
GridRedBlackCartesian * Coarse4dRB = SpaceTimeGrid::makeFourDimRedBlackGrid(Coarse4d);
std::cout << " Making 5D coarse RB grid " <<std::endl;
GridRedBlackCartesian * Coarse5dRB = SpaceTimeGrid::makeFiveDimRedBlackGrid(1,Coarse4d);
std::cout << " Made 5D coarse RB grid " <<std::endl;
Level1Op LDOp(*Coarse5d,*Coarse5dRB,1); LDOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
//////////////////////////////////////////////////
// Deflate the course space. Recursive multigrid?
//////////////////////////////////////////////////
typedef Aggregation<siteVector,iScalar<vTComplex>,nbasisc> CoarseSubspace;
// CoarseSubspace CoarseAggregates(CoarseCoarse5d,Coarse5d,0);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Build deflation space in coarse operator "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<CoarseOperator,CoarseVector> PosdefLdop(LDOp);
typedef Level2Op::CoarseVector CoarseCoarseVector;
CoarseVector c_src(Coarse5d); c_src=1.0;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building 3 level Multigrid "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
typedef MultiGridPreconditioner<vSpinColourVector, vTComplex,nbasis, DomainWallFermionR,ZeroGuesser<CoarseVector> , SolverWrapper<CoarseVector> > TwoLevelMG;
typedef MultiGridPreconditioner<siteVector,iScalar<vTComplex>,nbasisc,Level1Op, DeflatedGuesser<CoarseCoarseVector>, NormalEquations<CoarseCoarseVector> > CoarseMG;
typedef MultiGridPreconditioner<vSpinColourVector, vTComplex,nbasis, DomainWallFermionR,ZeroGuesser<CoarseVector>, LinearFunction<CoarseVector> > ThreeLevelMG;
ChebyshevSmoother<LatticeFermion,DomainWallFermionR> FineSmoother(0.5,60.0,12,HermIndefOp,Ddwf);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling 2 level Multigrid "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
result=Zero();
ZeroGuesser<CoarseVector> CoarseZeroGuesser;
ConjugateGradient<CoarseVector> CoarseCG(0.005,1000);
// SchurDiagMooeeOperator<CoarseOperator,CoarseVector> CoarseMpcDagMpc(LDOp);
SchurRedBlackDiagMooeeSolve<CoarseVector> CoarseRBCG(CoarseCG);
SolverWrapper<CoarseVector> CoarseSolver(LDOp,CoarseRBCG);
// NormalEquations<CoarseVector> CoarseCGNE(LDOp,CoarseCG,CoarseZeroGuesser);
TwoLevelMG TwoLevelPrecon(Aggregates, LDOp,
HermIndefOp,Ddwf,
FineSmoother,
CoarseZeroGuesser,
CoarseSolver);
TwoLevelPrecon.Level(1);
PrecGeneralisedConjugateResidual<LatticeFermion> l1PGCR(1.0e-8,20,HermIndefOp,TwoLevelPrecon,16,16);
l1PGCR.Level(1);
l1PGCR(src,result);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling CG "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
ConjugateGradient<LatticeFermion> pCG(1.0e-8,60000);
result=Zero();
// pCG(HermDefOp,src,result);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling red black CG "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
result=Zero();
LatticeFermion src_o(FrbGrid);
LatticeFermion result_o(FrbGrid);
pickCheckerboard(Odd,src_o,src);
result_o=Zero();
SchurDiagMooeeOperator<DomainWallFermionR,LatticeFermion> HermOpEO(Ddwf);
// pCG(HermOpEO,src_o,result_o);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " Fine PowerMethod "<< std::endl;
PowerMethod<LatticeFermion> PM; PM(HermDefOp,src);
std::cout<<GridLogMessage << " Coarse PowerMethod "<< std::endl;
PowerMethod<CoarseVector> cPM; cPM(PosdefLdop,c_src);
// std::cout<<GridLogMessage << " CoarseCoarse PowerMethod "<< std::endl;
// PowerMethod<CoarseCoarseVector> ccPM; ccPM(IRLHermOpL2,cc_src);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
Grid_finalize();
}

View File

@ -1,477 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_hdcr.cc
Copyright (C) 2015
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
using namespace std;
using namespace Grid;
/* Params
* Grid:
* block1(4)
* block2(4)
*
* Subspace
* * Fine : Subspace(nbasis,hi,lo,order,first,step) -- 32, 60,0.02,500,100,100
* * Coarse: Subspace(nbasis,hi,lo,order,first,step) -- 32, 18,0.02,500,100,100
* Smoother:
* * Fine: Cheby(hi, lo, order) -- 60,0.5,10
* * Coarse: Cheby(hi, lo, order) -- 12,0.1,4
* Lanczos:
* CoarseCoarse IRL( Nk, Nm, Nstop, poly(lo,hi,order)) 24,36,24,0.002,4.0,61
*/
RealD InverseApproximation(RealD x){
return 1.0/x;
}
template<class Field,class Matrix> class ChebyshevSmoother : public LinearFunction<Field>
{
public:
typedef LinearOperatorBase<Field> FineOperator;
Matrix & _SmootherMatrix;
FineOperator & _SmootherOperator;
Chebyshev<Field> Cheby;
ChebyshevSmoother(RealD _lo,RealD _hi,int _ord, FineOperator &SmootherOperator,Matrix &SmootherMatrix) :
_SmootherOperator(SmootherOperator),
_SmootherMatrix(SmootherMatrix),
Cheby(_lo,_hi,_ord,InverseApproximation)
{};
void operator() (const Field &in, Field &out)
{
Field tmp(in.Grid());
MdagMLinearOperator<Matrix,Field> MdagMOp(_SmootherMatrix);
_SmootherOperator.AdjOp(in,tmp);
Cheby(MdagMOp,tmp,out);
}
};
template<class Field,class Matrix> class MirsSmoother : public LinearFunction<Field>
{
public:
typedef LinearOperatorBase<Field> FineOperator;
Matrix & SmootherMatrix;
FineOperator & SmootherOperator;
RealD tol;
RealD shift;
int maxit;
MirsSmoother(RealD _shift,RealD _tol,int _maxit,FineOperator &_SmootherOperator,Matrix &_SmootherMatrix) :
shift(_shift),tol(_tol),maxit(_maxit),
SmootherOperator(_SmootherOperator),
SmootherMatrix(_SmootherMatrix)
{};
void operator() (const Field &in, Field &out)
{
ZeroGuesser<Field> Guess;
ConjugateGradient<Field> CG(tol,maxit,false);
Field src(in.Grid());
ShiftedMdagMLinearOperator<SparseMatrixBase<Field>,Field> MdagMOp(SmootherMatrix,shift);
SmootherOperator.AdjOp(in,src);
Guess(src,out);
CG(MdagMOp,src,out);
}
};
template<class Fobj,class CComplex,int nbasis, class Matrix, class Guesser, class CoarseSolver>
class MultiGridPreconditioner : public LinearFunction< Lattice<Fobj> > {
public:
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef CoarsenedMatrix<Fobj,CComplex,nbasis> CoarseOperator;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
Aggregates & _Aggregates;
CoarseOperator & _CoarseOperator;
Matrix & _FineMatrix;
FineOperator & _FineOperator;
Guesser & _Guess;
FineSmoother & _Smoother;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
#define GridLogLevel std::cout << GridLogMessage <<std::string(level,'\t')<< " Level "<<level <<" "
MultiGridPreconditioner(Aggregates &Agg, CoarseOperator &Coarse,
FineOperator &Fine,Matrix &FineMatrix,
FineSmoother &Smoother,
Guesser &Guess_,
CoarseSolver &CoarseSolve_)
: _Aggregates(Agg),
_CoarseOperator(Coarse),
_FineOperator(Fine),
_FineMatrix(FineMatrix),
_Smoother(Smoother),
_Guess(Guess_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
CoarseVector Csrc(_CoarseOperator.Grid());
CoarseVector Csol(_CoarseOperator.Grid());
FineField vec1(in.Grid());
FineField vec2(in.Grid());
double t;
// Fine Smoother
t=-usecond();
_Smoother(in,out);
t+=usecond();
GridLogLevel << "Smoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// Fine to Coarse
t=-usecond();
_Aggregates.ProjectToSubspace (Csrc,vec1);
t+=usecond();
GridLogLevel << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
_CoarseSolve(Csrc,Csol);
t+=usecond();
GridLogLevel << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
_Aggregates.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
GridLogLevel << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// Fine Smoother
t=-usecond();
_Smoother(vec1,vec2);
t+=usecond();
GridLogLevel << "Smoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=24;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
///////////////////////////////////////////////////
// Construct a coarsened grid; utility for this?
///////////////////////////////////////////////////
std::vector<int> block ({2,2,2,2});
std::vector<int> blockc ({2,2,2,2});
const int nbasis= 40;
const int nbasisc= 40;
auto clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/block[d];
}
auto cclatt = clatt;
for(int d=0;d<clatt.size();d++){
cclatt[d] = clatt[d]/blockc[d];
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
// GridCartesian *CoarseCoarse4d = SpaceTimeGrid::makeFourDimGrid(cclatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
// GridCartesian *CoarseCoarse5d = SpaceTimeGrid::makeFiveDimGrid(1,CoarseCoarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds);
LatticeFermion src(FGrid); gaussian(RNG5,src);// src=src+g5*src;
LatticeFermion result(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
// std::string file("./ckpoint_lat.4000");
// std::string file("./ckpoint_lat.1000");
// NerscIO::readConfiguration(Umu,header,file);
SU<Nc>::HotConfiguration(RNG4,Umu);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building g5R5 hermitian DWF operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
RealD mass=0.00078;
RealD M5=1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
typedef CoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> CoarseOperator;
typedef CoarseOperator::CoarseVector CoarseVector;
typedef CoarseOperator::siteVector siteVector;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling Aggregation class to build subspace" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<DomainWallFermionR,LatticeFermion> HermDefOp(Ddwf);
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
LatticeFermion A(FGrid);
LatticeFermion B(FGrid);
// Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.002,1000,800,100,0.0);
// Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.02,1000,800,100,0.0);
Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.01,400,50,50,0.0); // Slightly faster
for(int n=0;n<nb;n++){
std::cout << GridLogMessage << " G5R5 "<<n<<std::endl;
G5R5(Aggregates.subspace[n+nb],Aggregates.subspace[n]);
std::cout << GridLogMessage << " Projection "<<n<<std::endl;
A = Aggregates.subspace[n];
B = Aggregates.subspace[n+nb];
std::cout << GridLogMessage << " Copy "<<n<<std::endl;
Aggregates.subspace[n] = A+B; // 1+G5 // eigen value of G5R5 is +1
std::cout << GridLogMessage << " P+ "<<n<<std::endl;
Aggregates.subspace[n+nb]= A-B; // 1-G5 // eigen value of G5R5 is -1
std::cout << GridLogMessage << " P- "<<n<<std::endl;
}
}
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building coarse representation of Indef operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
typedef CoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> Level1Op;
typedef CoarsenedMatrix<siteVector,iScalar<vTComplex>,nbasisc> Level2Op;
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
GridRedBlackCartesian * Coarse4dRB = SpaceTimeGrid::makeFourDimRedBlackGrid(Coarse4d);
std::cout << " Making 5D coarse RB grid " <<std::endl;
GridRedBlackCartesian * Coarse5dRB = SpaceTimeGrid::makeFiveDimRedBlackGrid(1,Coarse4d);
std::cout << " Made 5D coarse RB grid " <<std::endl;
Level1Op LDOp(*Coarse5d,*Coarse5dRB,1);
std::cout << " LDOp.CoarsenOperator " <<std::endl;
LDOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
std::cout << " Coarsened Operator " <<std::endl;
//////////////////////////////////////////////////
// Deflate the course space. Recursive multigrid?
//////////////////////////////////////////////////
typedef Aggregation<siteVector,iScalar<vTComplex>,nbasisc> CoarseSubspace;
// CoarseSubspace CoarseAggregates(CoarseCoarse5d,Coarse5d,0);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Build deflation space in coarse operator "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<CoarseOperator,CoarseVector> PosdefLdop(LDOp);
/*
{
int nb=nbasisc/2;
CoarseAggregates.CreateSubspaceChebyshev(CRNG,PosdefLdop,nb,15.0,0.02,1000,800,100,0.0);
for(int n=0;n<nb;n++){
autoView( subspace , CoarseAggregates.subspace[n],CpuWrite);
autoView( subspace_g5, CoarseAggregates.subspace[n+nb],CpuWrite);
for(int nn=0;nn<nb;nn++){
for(int site=0;site<Coarse5d->oSites();site++){
subspace_g5[site](nn) = subspace[site](nn);
subspace_g5[site](nn+nb)=-subspace[site](nn+nb);
}
}
}
}
*/
typedef Level2Op::CoarseVector CoarseCoarseVector;
/*
Level2Op L2Op(*CoarseCoarse5d,1); // Hermitian matrix
HermitianLinearOperator<Level1Op,CoarseVector> L1LinOp(LDOp);
L2Op.CoarsenOperator(Coarse5d,L1LinOp,CoarseAggregates);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " Running CoarseCoarse grid Lanczos "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<Level2Op,CoarseCoarseVector> IRLHermOpL2(L2Op);
CoarseCoarseVector cc_src(CoarseCoarse5d); cc_src=1.0;
*/
/*
Chebyshev<CoarseCoarseVector> IRLChebyL2(0.001,15.0,301);
FunctionHermOp<CoarseCoarseVector> IRLOpChebyL2(IRLChebyL2,IRLHermOpL2);
PlainHermOp<CoarseCoarseVector> IRLOpL2 (IRLHermOpL2);
int cNk=24;
int cNm=36;
int cNstop=24;
ImplicitlyRestartedLanczos<CoarseCoarseVector> IRLL2(IRLOpChebyL2,IRLOpL2,cNstop,cNk,cNm,1.0e-3,20);
int cNconv;
std::vector<RealD> eval2(cNm);
std::vector<CoarseCoarseVector> evec2(cNm,CoarseCoarse5d);
IRLL2.calc(eval2,evec2,cc_src,cNconv);
ConjugateGradient<CoarseCoarseVector> CoarseCoarseCG(0.1,1000);
DeflatedGuesser<CoarseCoarseVector> DeflCoarseCoarseGuesser(evec2,eval2);
NormalEquations<CoarseCoarseVector> DeflCoarseCoarseCGNE(L2Op,CoarseCoarseCG,DeflCoarseCoarseGuesser);
*/
/*
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " Running Coarse grid Lanczos "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<Level1Op,CoarseVector> IRLHermOp(LDOp);
// Chebyshev<CoarseVector> IRLCheby(0.001,15.0,301);
Chebyshev<CoarseVector> IRLCheby(0.03,12.0,101);
FunctionHermOp<CoarseVector> IRLOpCheby(IRLCheby,IRLHermOp);
PlainHermOp<CoarseVector> IRLOp (IRLHermOp);
int Nk=64;
int Nm=128;
int Nstop=Nk;
ImplicitlyRestartedLanczos<CoarseVector> IRL(IRLOpCheby,IRLOp,Nstop,Nk,Nm,1.0e-3,20);
int Nconv;
std::vector<RealD> eval(Nm);
std::vector<CoarseVector> evec(Nm,Coarse5d);
IRL.calc(eval,evec,c_src,Nconv);
*/
CoarseVector c_src(Coarse5d); c_src=1.0;
// DeflatedGuesser<CoarseVector> DeflCoarseGuesser(evec,eval);
// NormalEquations<CoarseVector> DeflCoarseCGNE(LDOp,CoarseCG,DeflCoarseGuesser);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building 3 level Multigrid "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
// typedef MultiGridPreconditioner<vSpinColourVector, vTComplex,nbasis, DomainWallFermionR,DeflatedGuesser<CoarseVector> , NormalEquations<CoarseVector> > TwoLevelMG;
typedef MultiGridPreconditioner<vSpinColourVector, vTComplex,nbasis, DomainWallFermionR,ZeroGuesser<CoarseVector> , NormalEquations<CoarseVector> > TwoLevelMG;
typedef MultiGridPreconditioner<siteVector,iScalar<vTComplex>,nbasisc,Level1Op, DeflatedGuesser<CoarseCoarseVector>, NormalEquations<CoarseCoarseVector> > CoarseMG;
typedef MultiGridPreconditioner<vSpinColourVector, vTComplex,nbasis, DomainWallFermionR,ZeroGuesser<CoarseVector>, LinearFunction<CoarseVector> > ThreeLevelMG;
ChebyshevSmoother<LatticeFermion,DomainWallFermionR> FineSmoother(0.25,60.0,12,HermIndefOp,Ddwf);
/*
// MultiGrid preconditioner acting on the coarse space <-> coarsecoarse space
ChebyshevSmoother<CoarseVector, Level1Op > CoarseSmoother(0.1,15.0,3,L1LinOp,LDOp);
// MirsSmoother<CoarseVector, Level1Op > CoarseCGSmoother(0.1,0.1,4,L1LinOp,LDOp);
// MirsSmoother<LatticeFermion,DomainWallFermionR> FineCGSmoother(0.0,0.01,8,HermIndefOp,Ddwf);
CoarseMG Level2Precon (CoarseAggregates, L2Op,
L1LinOp,LDOp,
CoarseSmoother,
DeflCoarseCoarseGuesser,
DeflCoarseCoarseCGNE);
Level2Precon.Level(2);
// PGCR Applying this solver to solve the coarse space problem
PrecGeneralisedConjugateResidual<CoarseVector> l2PGCR(0.1, 100, L1LinOp,Level2Precon,16,16);
l2PGCR.Level(2);
// Wrap the 2nd level solver in a MultiGrid preconditioner acting on the fine space
ZeroGuesser<CoarseVector> CoarseZeroGuesser;
ThreeLevelMG ThreeLevelPrecon(Aggregates, LDOp,
HermIndefOp,Ddwf,
FineSmoother,
CoarseZeroGuesser,
l2PGCR);
ThreeLevelPrecon.Level(1);
// Apply the fine-coarse-coarsecoarse 2 deep MG preconditioner in an outer PGCR on the fine fgrid
PrecGeneralisedConjugateResidual<LatticeFermion> l1PGCR(1.0e-8,1000,HermIndefOp,ThreeLevelPrecon,16,16);
l1PGCR.Level(1);
*/
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling 2 level Multigrid "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
result=Zero();
ZeroGuesser<CoarseVector> CoarseZeroGuesser;
ConjugateGradient<CoarseVector> CoarseCG(0.01,1000);
NormalEquations<CoarseVector> CoarseCGNE(LDOp,CoarseCG,CoarseZeroGuesser);
TwoLevelMG TwoLevelPrecon(Aggregates, LDOp,
HermIndefOp,Ddwf,
FineSmoother,
CoarseZeroGuesser,
CoarseCGNE);
TwoLevelPrecon.Level(1);
PrecGeneralisedConjugateResidual<LatticeFermion> l1PGCR(1.0e-8,20,HermIndefOp,TwoLevelPrecon,16,16);
l1PGCR.Level(1);
l1PGCR(src,result);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling CG "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
ConjugateGradient<LatticeFermion> pCG(1.0e-8,60000);
result=Zero();
// pCG(HermDefOp,src,result);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling red black CG "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
result=Zero();
LatticeFermion src_o(FrbGrid);
LatticeFermion result_o(FrbGrid);
pickCheckerboard(Odd,src_o,src);
result_o=Zero();
SchurDiagMooeeOperator<DomainWallFermionR,LatticeFermion> HermOpEO(Ddwf);
// pCG(HermOpEO,src_o,result_o);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " Fine PowerMethod "<< std::endl;
PowerMethod<LatticeFermion> PM; PM(HermDefOp,src);
std::cout<<GridLogMessage << " Coarse PowerMethod "<< std::endl;
PowerMethod<CoarseVector> cPM; cPM(PosdefLdop,c_src);
// std::cout<<GridLogMessage << " CoarseCoarse PowerMethod "<< std::endl;
// PowerMethod<CoarseCoarseVector> ccPM; ccPM(IRLHermOpL2,cc_src);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
Grid_finalize();
}

View File

@ -262,8 +262,6 @@ int main (int argc, char ** argv)
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
GridRedBlackCartesian * Coarse4dRB = SpaceTimeGrid::makeFourDimRedBlackGrid(Coarse4d);
GridRedBlackCartesian * Coarse5dRB = SpaceTimeGrid::makeFiveDimRedBlackGrid(1,Coarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
@ -330,7 +328,7 @@ int main (int argc, char ** argv)
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
Level1Op LDOp(*Coarse5d,*Coarse5dRB,1); LDOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
Level1Op LDOp(*Coarse5d,1); LDOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " Running Coarse grid Lanczos "<< std::endl;
@ -354,9 +352,7 @@ int main (int argc, char ** argv)
// ConjugateGradient<CoarseVector> CoarseCG(0.01,1000);
ConjugateGradient<CoarseVector> CoarseCG(0.01,2000);// 14.7s
eval.resize(0);
evec.resize(0,Coarse5d);
ConjugateGradient<CoarseVector> CoarseCG(0.02,1000);// 14.7s
DeflatedGuesser<CoarseVector> DeflCoarseGuesser(evec,eval);
NormalEquations<CoarseVector> DeflCoarseCGNE(LDOp,CoarseCG,DeflCoarseGuesser);

View File

@ -1,397 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_hdcr.cc
Copyright (C) 2015
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
using namespace std;
using namespace Grid;
/* Params
* Grid:
* block1(4)
* block2(4)
*
* Subspace
* * Fine : Subspace(nbasis,hi,lo,order,first,step) -- 32, 60,0.02,500,100,100
* * Coarse: Subspace(nbasis,hi,lo,order,first,step) -- 32, 18,0.02,500,100,100
* Smoother:
* * Fine: Cheby(hi, lo, order) -- 60,0.5,10
* * Coarse: Cheby(hi, lo, order) -- 12,0.1,4
* Lanczos:
* CoarseCoarse IRL( Nk, Nm, Nstop, poly(lo,hi,order)) 24,36,24,0.002,4.0,61
*/
RealD InverseApproximation(RealD x){
return 1.0/x;
}
template<class Field> class SolverWrapper : public LinearFunction<Field> {
private:
CheckerBoardedSparseMatrixBase<Field> & _Matrix;
SchurRedBlackBase<Field> & _Solver;
public:
/////////////////////////////////////////////////////
// Wrap the usual normal equations trick
/////////////////////////////////////////////////////
SolverWrapper(CheckerBoardedSparseMatrixBase<Field> &Matrix,
SchurRedBlackBase<Field> &Solver)
: _Matrix(Matrix), _Solver(Solver) {};
void operator() (const Field &in, Field &out){
_Solver(_Matrix,in,out); // Mdag M out = Mdag in
}
};
template<class Field,class Matrix> class ChebyshevSmoother : public LinearFunction<Field>
{
public:
typedef LinearOperatorBase<Field> FineOperator;
Matrix & _SmootherMatrix;
FineOperator & _SmootherOperator;
Chebyshev<Field> Cheby;
ChebyshevSmoother(RealD _lo,RealD _hi,int _ord, FineOperator &SmootherOperator,Matrix &SmootherMatrix) :
_SmootherOperator(SmootherOperator),
_SmootherMatrix(SmootherMatrix),
Cheby(_lo,_hi,_ord,InverseApproximation)
{};
void operator() (const Field &in, Field &out)
{
Field tmp(in.Grid());
MdagMLinearOperator<Matrix,Field> MdagMOp(_SmootherMatrix);
_SmootherOperator.AdjOp(in,tmp);
Cheby(MdagMOp,tmp,out);
}
};
template<class Field,class Matrix> class MirsSmoother : public LinearFunction<Field>
{
public:
typedef LinearOperatorBase<Field> FineOperator;
Matrix & SmootherMatrix;
FineOperator & SmootherOperator;
RealD tol;
RealD shift;
int maxit;
MirsSmoother(RealD _shift,RealD _tol,int _maxit,FineOperator &_SmootherOperator,Matrix &_SmootherMatrix) :
shift(_shift),tol(_tol),maxit(_maxit),
SmootherOperator(_SmootherOperator),
SmootherMatrix(_SmootherMatrix)
{};
void operator() (const Field &in, Field &out)
{
ZeroGuesser<Field> Guess;
ConjugateGradient<Field> CG(tol,maxit,false);
Field src(in.Grid());
ShiftedMdagMLinearOperator<SparseMatrixBase<Field>,Field> MdagMOp(SmootherMatrix,shift);
SmootherOperator.AdjOp(in,src);
Guess(src,out);
CG(MdagMOp,src,out);
}
};
template<class Fobj,class CComplex,int nbasis, class Matrix, class Guesser, class CoarseSolver>
class MultiGridPreconditioner : public LinearFunction< Lattice<Fobj> > {
public:
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef CoarsenedMatrix<Fobj,CComplex,nbasis> CoarseOperator;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
Aggregates & _Aggregates;
CoarseOperator & _CoarseOperator;
Matrix & _FineMatrix;
FineOperator & _FineOperator;
Guesser & _Guess;
FineSmoother & _Smoother;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
#define GridLogLevel std::cout << GridLogMessage <<std::string(level,'\t')<< " Level "<<level <<" "
MultiGridPreconditioner(Aggregates &Agg, CoarseOperator &Coarse,
FineOperator &Fine,Matrix &FineMatrix,
FineSmoother &Smoother,
Guesser &Guess_,
CoarseSolver &CoarseSolve_)
: _Aggregates(Agg),
_CoarseOperator(Coarse),
_FineOperator(Fine),
_FineMatrix(FineMatrix),
_Smoother(Smoother),
_Guess(Guess_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
CoarseVector Csrc(_CoarseOperator.Grid());
CoarseVector Csol(_CoarseOperator.Grid());
FineField vec1(in.Grid());
FineField vec2(in.Grid());
double t;
// Fine Smoother
t=-usecond();
_Smoother(in,out);
t+=usecond();
GridLogLevel << "Smoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// Fine to Coarse
t=-usecond();
_Aggregates.ProjectToSubspace (Csrc,vec1);
t+=usecond();
GridLogLevel << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
_CoarseSolve(Csrc,Csol);
t+=usecond();
GridLogLevel << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
_Aggregates.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
GridLogLevel << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// Fine Smoother
t=-usecond();
_Smoother(vec1,vec2);
t+=usecond();
GridLogLevel << "Smoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=24;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
///////////////////////////////////////////////////
// Construct a coarsened grid; utility for this?
///////////////////////////////////////////////////
std::vector<int> block ({2,2,2,2});
//std::vector<int> block ({2,2,2,2});
const int nbasis= 40;
const int nbasisc= 40;
auto clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/block[d];
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
// GridCartesian *CoarseCoarse4d = SpaceTimeGrid::makeFourDimGrid(cclatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
// GridCartesian *CoarseCoarse5d = SpaceTimeGrid::makeFiveDimGrid(1,CoarseCoarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds);
LatticeFermion src(FGrid); gaussian(RNG5,src);// src=src+g5*src;
LatticeFermion result(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
//std::string file("./ckpoint_lat.4000");
std::string file("./ckpoint_lat.1000");
NerscIO::readConfiguration(Umu,header,file);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building g5R5 hermitian DWF operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
RealD mass=0.00078;
RealD M5=1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
typedef CoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> CoarseOperator;
typedef CoarseOperator::CoarseVector CoarseVector;
typedef CoarseOperator::siteVector siteVector;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling Aggregation class to build subspace" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<DomainWallFermionR,LatticeFermion> HermDefOp(Ddwf);
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
LatticeFermion A(FGrid);
LatticeFermion B(FGrid);
// Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.002,1000,800,100,0.0);
// Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.02,1000,800,100,0.0);
Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.01,1000,100,100,0.0); // Slightly faster
for(int n=0;n<nb;n++){
std::cout << GridLogMessage << " G5R5 "<<n<<std::endl;
G5R5(Aggregates.subspace[n+nb],Aggregates.subspace[n]);
std::cout << GridLogMessage << " Projection "<<n<<std::endl;
A = Aggregates.subspace[n];
B = Aggregates.subspace[n+nb];
std::cout << GridLogMessage << " Copy "<<n<<std::endl;
Aggregates.subspace[n] = A+B; // 1+G5 // eigen value of G5R5 is +1
std::cout << GridLogMessage << " P+ "<<n<<std::endl;
Aggregates.subspace[n+nb]= A-B; // 1-G5 // eigen value of G5R5 is -1
std::cout << GridLogMessage << " P- "<<n<<std::endl;
}
}
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building coarse representation of Indef operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
typedef CoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> Level1Op;
typedef CoarsenedMatrix<siteVector,iScalar<vTComplex>,nbasisc> Level2Op;
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
GridRedBlackCartesian * Coarse4dRB = SpaceTimeGrid::makeFourDimRedBlackGrid(Coarse4d);
GridRedBlackCartesian * Coarse5dRB = SpaceTimeGrid::makeFiveDimRedBlackGrid(1,Coarse4d);
Level1Op LDOp(*Coarse5d,*Coarse5dRB,1); LDOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
//////////////////////////////////////////////////
// Deflate the course space. Recursive multigrid?
//////////////////////////////////////////////////
typedef Aggregation<siteVector,iScalar<vTComplex>,nbasisc> CoarseSubspace;
// CoarseSubspace CoarseAggregates(CoarseCoarse5d,Coarse5d,0);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Build deflation space in coarse operator "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<CoarseOperator,CoarseVector> PosdefLdop(LDOp);
typedef Level2Op::CoarseVector CoarseCoarseVector;
CoarseVector c_src(Coarse5d); c_src=1.0;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building 3 level Multigrid "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
typedef MultiGridPreconditioner<vSpinColourVector, vTComplex,nbasis, DomainWallFermionR,ZeroGuesser<CoarseVector> , SolverWrapper<CoarseVector> > TwoLevelMG;
typedef MultiGridPreconditioner<siteVector,iScalar<vTComplex>,nbasisc,Level1Op, DeflatedGuesser<CoarseCoarseVector>, NormalEquations<CoarseCoarseVector> > CoarseMG;
typedef MultiGridPreconditioner<vSpinColourVector, vTComplex,nbasis, DomainWallFermionR,ZeroGuesser<CoarseVector>, LinearFunction<CoarseVector> > ThreeLevelMG;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling 2 level Multigrid "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::vector<RealD> tols({0.015});
std::vector<int> ords({12});
std::vector<RealD> los({0.8});
for(int l=0;l<los.size();l++){
for(int o=0;o<ords.size();o++){
for(int t=0;t<tols.size();t++){
result=Zero();
std::cout << GridLogMessage <<" tol " << tols[t] << " cheby order " <<ords[o]<< " lo "<<los[l] <<std::endl;
ChebyshevSmoother<LatticeFermion,DomainWallFermionR> FineSmoother(los[l],60.0,ords[o],HermIndefOp,Ddwf);
ZeroGuesser<CoarseVector> CoarseZeroGuesser;
ConjugateGradient<CoarseVector> CoarseCG(tols[t],10000);
SchurRedBlackDiagMooeeSolve<CoarseVector> CoarseRBCG(CoarseCG);
SolverWrapper<CoarseVector> CoarseSolver(LDOp,CoarseRBCG);
TwoLevelMG TwoLevelPrecon(Aggregates, LDOp,
HermIndefOp,Ddwf,
FineSmoother,
CoarseZeroGuesser,
CoarseSolver);
TwoLevelPrecon.Level(1);
PrecGeneralisedConjugateResidual<LatticeFermion> l1PGCR(1.0e-8,20,HermIndefOp,TwoLevelPrecon,16,16);
l1PGCR.Level(1);
l1PGCR(src,result);
}}}
ConjugateGradient<LatticeFermion> pCG(1.0e-8,60000);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling red black CG "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
result=Zero();
LatticeFermion src_o(FrbGrid);
LatticeFermion result_o(FrbGrid);
pickCheckerboard(Odd,src_o,src);
result_o=Zero();
SchurDiagMooeeOperator<DomainWallFermionR,LatticeFermion> HermOpEO(Ddwf);
pCG(HermOpEO,src_o,result_o);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling CG "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
result=Zero();
pCG(HermDefOp,src,result);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " Fine PowerMethod "<< std::endl;
PowerMethod<LatticeFermion> PM; PM(HermDefOp,src);
std::cout<<GridLogMessage << " Coarse PowerMethod "<< std::endl;
PowerMethod<CoarseVector> cPM; cPM(PosdefLdop,c_src);
// std::cout<<GridLogMessage << " CoarseCoarse PowerMethod "<< std::endl;
// PowerMethod<CoarseCoarseVector> ccPM; ccPM(IRLHermOpL2,cc_src);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
Grid_finalize();
}

View File

@ -1,473 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_hdcr.cc
Copyright (C) 2015
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidual.h>
using namespace std;
using namespace Grid;
/* Params
* Grid:
* block1(4)
* block2(4)
*
* Subspace
* * Fine : Subspace(nbasis,hi,lo,order,first,step) -- 32, 60,0.02,500,100,100
* * Coarse: Subspace(nbasis,hi,lo,order,first,step) -- 32, 18,0.02,500,100,100
* Smoother:
* * Fine: Cheby(hi, lo, order) -- 60,0.5,10
* * Coarse: Cheby(hi, lo, order) -- 12,0.1,4
* Lanczos:
* CoarseCoarse IRL( Nk, Nm, Nstop, poly(lo,hi,order)) 24,36,24,0.002,4.0,61
*/
RealD InverseApproximation(RealD x){
return 1.0/x;
}
template<class Field,class Matrix> class ChebyshevSmoother : public LinearFunction<Field>
{
public:
typedef LinearOperatorBase<Field> FineOperator;
Matrix & _SmootherMatrix;
FineOperator & _SmootherOperator;
Chebyshev<Field> Cheby;
ChebyshevSmoother(RealD _lo,RealD _hi,int _ord, FineOperator &SmootherOperator,Matrix &SmootherMatrix) :
_SmootherOperator(SmootherOperator),
_SmootherMatrix(SmootherMatrix),
Cheby(_lo,_hi,_ord,InverseApproximation)
{};
void operator() (const Field &in, Field &out)
{
Field tmp(in.Grid());
MdagMLinearOperator<Matrix,Field> MdagMOp(_SmootherMatrix);
_SmootherOperator.AdjOp(in,tmp);
Cheby(MdagMOp,tmp,out);
}
};
template<class Field,class Matrix> class MirsSmoother : public LinearFunction<Field>
{
public:
typedef LinearOperatorBase<Field> FineOperator;
Matrix & SmootherMatrix;
FineOperator & SmootherOperator;
RealD tol;
RealD shift;
int maxit;
MirsSmoother(RealD _shift,RealD _tol,int _maxit,FineOperator &_SmootherOperator,Matrix &_SmootherMatrix) :
shift(_shift),tol(_tol),maxit(_maxit),
SmootherOperator(_SmootherOperator),
SmootherMatrix(_SmootherMatrix)
{};
void operator() (const Field &in, Field &out)
{
ZeroGuesser<Field> Guess;
ConjugateGradient<Field> CG(tol,maxit,false);
Field src(in.Grid());
ShiftedMdagMLinearOperator<SparseMatrixBase<Field>,Field> MdagMOp(SmootherMatrix,shift);
SmootherOperator.AdjOp(in,src);
Guess(src,out);
CG(MdagMOp,src,out);
}
};
template<class Fobj,class CComplex,int nbasis, class Matrix, class Guesser, class CoarseSolver>
class MultiGridPreconditioner : public LinearFunction< Lattice<Fobj> > {
public:
typedef Aggregation<Fobj,CComplex,nbasis> Aggregates;
typedef CoarsenedMatrix<Fobj,CComplex,nbasis> CoarseOperator;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseVector CoarseVector;
typedef typename Aggregation<Fobj,CComplex,nbasis>::CoarseMatrix CoarseMatrix;
typedef typename Aggregation<Fobj,CComplex,nbasis>::FineField FineField;
typedef LinearOperatorBase<FineField> FineOperator;
typedef LinearFunction <FineField> FineSmoother;
Aggregates & _Aggregates;
CoarseOperator & _CoarseOperator;
Matrix & _FineMatrix;
FineOperator & _FineOperator;
Guesser & _Guess;
FineSmoother & _Smoother;
CoarseSolver & _CoarseSolve;
int level; void Level(int lv) {level = lv; };
#define GridLogLevel std::cout << GridLogMessage <<std::string(level,'\t')<< " Level "<<level <<" "
MultiGridPreconditioner(Aggregates &Agg, CoarseOperator &Coarse,
FineOperator &Fine,Matrix &FineMatrix,
FineSmoother &Smoother,
Guesser &Guess_,
CoarseSolver &CoarseSolve_)
: _Aggregates(Agg),
_CoarseOperator(Coarse),
_FineOperator(Fine),
_FineMatrix(FineMatrix),
_Smoother(Smoother),
_Guess(Guess_),
_CoarseSolve(CoarseSolve_),
level(1) { }
virtual void operator()(const FineField &in, FineField & out)
{
CoarseVector Csrc(_CoarseOperator.Grid());
CoarseVector Csol(_CoarseOperator.Grid());
FineField vec1(in.Grid());
FineField vec2(in.Grid());
double t;
// Fine Smoother
t=-usecond();
_Smoother(in,out);
t+=usecond();
GridLogLevel << "Smoother took "<< t/1000.0<< "ms" <<std::endl;
// Update the residual
_FineOperator.Op(out,vec1); sub(vec1, in ,vec1);
// Fine to Coarse
t=-usecond();
_Aggregates.ProjectToSubspace (Csrc,vec1);
t+=usecond();
GridLogLevel << "Project to coarse took "<< t/1000.0<< "ms" <<std::endl;
// Coarse correction
t=-usecond();
_CoarseSolve(Csrc,Csol);
t+=usecond();
GridLogLevel << "Coarse solve took "<< t/1000.0<< "ms" <<std::endl;
// Coarse to Fine
t=-usecond();
_Aggregates.PromoteFromSubspace(Csol,vec1);
add(out,out,vec1);
t+=usecond();
GridLogLevel << "Promote to this level took "<< t/1000.0<< "ms" <<std::endl;
// Residual
_FineOperator.Op(out,vec1); sub(vec1 ,in , vec1);
// Fine Smoother
t=-usecond();
_Smoother(vec1,vec2);
t+=usecond();
GridLogLevel << "Smoother took "<< t/1000.0<< "ms" <<std::endl;
add( out,out,vec2);
}
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=24;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
///////////////////////////////////////////////////
// Construct a coarsened grid; utility for this?
///////////////////////////////////////////////////
std::vector<int> block ({2,2,2,2});
std::vector<int> blockc ({2,2,2,2});
const int nbasis= 40;
const int nbasisc= 40;
auto clatt = GridDefaultLatt();
for(int d=0;d<clatt.size();d++){
clatt[d] = clatt[d]/block[d];
}
auto cclatt = clatt;
for(int d=0;d<clatt.size();d++){
cclatt[d] = clatt[d]/blockc[d];
}
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
// GridCartesian *CoarseCoarse4d = SpaceTimeGrid::makeFourDimGrid(cclatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
// GridCartesian *CoarseCoarse5d = SpaceTimeGrid::makeFiveDimGrid(1,CoarseCoarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::vector<int> cseeds({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds);
LatticeFermion src(FGrid); gaussian(RNG5,src);// src=src+g5*src;
LatticeFermion result(FGrid);
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
// std::string file("./ckpoint_lat.4000");
std::string file("./ckpoint_lat.1000");
NerscIO::readConfiguration(Umu,header,file);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building g5R5 hermitian DWF operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
RealD mass=0.00078;
RealD M5=1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
typedef CoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> CoarseOperator;
typedef CoarseOperator::CoarseVector CoarseVector;
typedef CoarseOperator::siteVector siteVector;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling Aggregation class to build subspace" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<DomainWallFermionR,LatticeFermion> HermDefOp(Ddwf);
Subspace Aggregates(Coarse5d,FGrid,0);
assert ( (nbasis & 0x1)==0);
{
int nb=nbasis/2;
LatticeFermion A(FGrid);
LatticeFermion B(FGrid);
// Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.002,1000,800,100,0.0);
// Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.02,1000,800,100,0.0);
Aggregates.CreateSubspaceChebyshev(RNG5,HermDefOp,nb,60.0,0.01,1000,100,100,0.0); // Slightly faster
for(int n=0;n<nb;n++){
std::cout << GridLogMessage << " G5R5 "<<n<<std::endl;
G5R5(Aggregates.subspace[n+nb],Aggregates.subspace[n]);
std::cout << GridLogMessage << " Projection "<<n<<std::endl;
A = Aggregates.subspace[n];
B = Aggregates.subspace[n+nb];
std::cout << GridLogMessage << " Copy "<<n<<std::endl;
Aggregates.subspace[n] = A+B; // 1+G5 // eigen value of G5R5 is +1
std::cout << GridLogMessage << " P+ "<<n<<std::endl;
Aggregates.subspace[n+nb]= A-B; // 1-G5 // eigen value of G5R5 is -1
std::cout << GridLogMessage << " P- "<<n<<std::endl;
}
}
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building coarse representation of Indef operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
typedef CoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> Level1Op;
typedef CoarsenedMatrix<siteVector,iScalar<vTComplex>,nbasisc> Level2Op;
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
GridRedBlackCartesian * Coarse4dRB = SpaceTimeGrid::makeFourDimRedBlackGrid(Coarse4d);
std::cout << " Making 5D coarse RB grid " <<std::endl;
GridRedBlackCartesian * Coarse5dRB = SpaceTimeGrid::makeFiveDimRedBlackGrid(1,Coarse4d);
std::cout << " Made 5D coarse RB grid " <<std::endl;
Level1Op LDOp(*Coarse5d,*Coarse5dRB,1); LDOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
//////////////////////////////////////////////////
// Deflate the course space. Recursive multigrid?
//////////////////////////////////////////////////
typedef Aggregation<siteVector,iScalar<vTComplex>,nbasisc> CoarseSubspace;
// CoarseSubspace CoarseAggregates(CoarseCoarse5d,Coarse5d,0);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Build deflation space in coarse operator "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<CoarseOperator,CoarseVector> PosdefLdop(LDOp);
/*
{
int nb=nbasisc/2;
CoarseAggregates.CreateSubspaceChebyshev(CRNG,PosdefLdop,nb,15.0,0.02,1000,800,100,0.0);
for(int n=0;n<nb;n++){
autoView( subspace , CoarseAggregates.subspace[n],CpuWrite);
autoView( subspace_g5, CoarseAggregates.subspace[n+nb],CpuWrite);
for(int nn=0;nn<nb;nn++){
for(int site=0;site<Coarse5d->oSites();site++){
subspace_g5[site](nn) = subspace[site](nn);
subspace_g5[site](nn+nb)=-subspace[site](nn+nb);
}
}
}
}
*/
typedef Level2Op::CoarseVector CoarseCoarseVector;
/*
Level2Op L2Op(*CoarseCoarse5d,1); // Hermitian matrix
HermitianLinearOperator<Level1Op,CoarseVector> L1LinOp(LDOp);
L2Op.CoarsenOperator(Coarse5d,L1LinOp,CoarseAggregates);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " Running CoarseCoarse grid Lanczos "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<Level2Op,CoarseCoarseVector> IRLHermOpL2(L2Op);
CoarseCoarseVector cc_src(CoarseCoarse5d); cc_src=1.0;
*/
/*
Chebyshev<CoarseCoarseVector> IRLChebyL2(0.001,15.0,301);
FunctionHermOp<CoarseCoarseVector> IRLOpChebyL2(IRLChebyL2,IRLHermOpL2);
PlainHermOp<CoarseCoarseVector> IRLOpL2 (IRLHermOpL2);
int cNk=24;
int cNm=36;
int cNstop=24;
ImplicitlyRestartedLanczos<CoarseCoarseVector> IRLL2(IRLOpChebyL2,IRLOpL2,cNstop,cNk,cNm,1.0e-3,20);
int cNconv;
std::vector<RealD> eval2(cNm);
std::vector<CoarseCoarseVector> evec2(cNm,CoarseCoarse5d);
IRLL2.calc(eval2,evec2,cc_src,cNconv);
ConjugateGradient<CoarseCoarseVector> CoarseCoarseCG(0.1,1000);
DeflatedGuesser<CoarseCoarseVector> DeflCoarseCoarseGuesser(evec2,eval2);
NormalEquations<CoarseCoarseVector> DeflCoarseCoarseCGNE(L2Op,CoarseCoarseCG,DeflCoarseCoarseGuesser);
*/
/*
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " Running Coarse grid Lanczos "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<Level1Op,CoarseVector> IRLHermOp(LDOp);
// Chebyshev<CoarseVector> IRLCheby(0.001,15.0,301);
Chebyshev<CoarseVector> IRLCheby(0.03,12.0,101);
FunctionHermOp<CoarseVector> IRLOpCheby(IRLCheby,IRLHermOp);
PlainHermOp<CoarseVector> IRLOp (IRLHermOp);
int Nk=64;
int Nm=128;
int Nstop=Nk;
ImplicitlyRestartedLanczos<CoarseVector> IRL(IRLOpCheby,IRLOp,Nstop,Nk,Nm,1.0e-3,20);
int Nconv;
std::vector<RealD> eval(Nm);
std::vector<CoarseVector> evec(Nm,Coarse5d);
IRL.calc(eval,evec,c_src,Nconv);
*/
CoarseVector c_src(Coarse5d); c_src=1.0;
// DeflatedGuesser<CoarseVector> DeflCoarseGuesser(evec,eval);
// NormalEquations<CoarseVector> DeflCoarseCGNE(LDOp,CoarseCG,DeflCoarseGuesser);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building 3 level Multigrid "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
// typedef MultiGridPreconditioner<vSpinColourVector, vTComplex,nbasis, DomainWallFermionR,DeflatedGuesser<CoarseVector> , NormalEquations<CoarseVector> > TwoLevelMG;
typedef MultiGridPreconditioner<vSpinColourVector, vTComplex,nbasis, DomainWallFermionR,ZeroGuesser<CoarseVector> , NormalEquations<CoarseVector> > TwoLevelMG;
typedef MultiGridPreconditioner<siteVector,iScalar<vTComplex>,nbasisc,Level1Op, DeflatedGuesser<CoarseCoarseVector>, NormalEquations<CoarseCoarseVector> > CoarseMG;
typedef MultiGridPreconditioner<vSpinColourVector, vTComplex,nbasis, DomainWallFermionR,ZeroGuesser<CoarseVector>, LinearFunction<CoarseVector> > ThreeLevelMG;
ChebyshevSmoother<LatticeFermion,DomainWallFermionR> FineSmoother(0.25,60.0,12,HermIndefOp,Ddwf);
/*
// MultiGrid preconditioner acting on the coarse space <-> coarsecoarse space
ChebyshevSmoother<CoarseVector, Level1Op > CoarseSmoother(0.1,15.0,3,L1LinOp,LDOp);
// MirsSmoother<CoarseVector, Level1Op > CoarseCGSmoother(0.1,0.1,4,L1LinOp,LDOp);
// MirsSmoother<LatticeFermion,DomainWallFermionR> FineCGSmoother(0.0,0.01,8,HermIndefOp,Ddwf);
CoarseMG Level2Precon (CoarseAggregates, L2Op,
L1LinOp,LDOp,
CoarseSmoother,
DeflCoarseCoarseGuesser,
DeflCoarseCoarseCGNE);
Level2Precon.Level(2);
// PGCR Applying this solver to solve the coarse space problem
PrecGeneralisedConjugateResidual<CoarseVector> l2PGCR(0.1, 100, L1LinOp,Level2Precon,16,16);
l2PGCR.Level(2);
// Wrap the 2nd level solver in a MultiGrid preconditioner acting on the fine space
ZeroGuesser<CoarseVector> CoarseZeroGuesser;
ThreeLevelMG ThreeLevelPrecon(Aggregates, LDOp,
HermIndefOp,Ddwf,
FineSmoother,
CoarseZeroGuesser,
l2PGCR);
ThreeLevelPrecon.Level(1);
// Apply the fine-coarse-coarsecoarse 2 deep MG preconditioner in an outer PGCR on the fine fgrid
PrecGeneralisedConjugateResidual<LatticeFermion> l1PGCR(1.0e-8,1000,HermIndefOp,ThreeLevelPrecon,16,16);
l1PGCR.Level(1);
*/
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling 2 level Multigrid "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
result=Zero();
ZeroGuesser<CoarseVector> CoarseZeroGuesser;
ConjugateGradient<CoarseVector> CoarseCG(0.01,1000);
NormalEquations<CoarseVector> CoarseCGNE(LDOp,CoarseCG,CoarseZeroGuesser);
TwoLevelMG TwoLevelPrecon(Aggregates, LDOp,
HermIndefOp,Ddwf,
FineSmoother,
CoarseZeroGuesser,
CoarseCGNE);
TwoLevelPrecon.Level(1);
PrecGeneralisedConjugateResidual<LatticeFermion> l1PGCR(1.0e-8,20,HermIndefOp,TwoLevelPrecon,16,16);
l1PGCR.Level(1);
l1PGCR(src,result);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling CG "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
ConjugateGradient<LatticeFermion> pCG(1.0e-8,60000);
result=Zero();
// pCG(HermDefOp,src,result);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling red black CG "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
result=Zero();
LatticeFermion src_o(FrbGrid);
LatticeFermion result_o(FrbGrid);
pickCheckerboard(Odd,src_o,src);
result_o=Zero();
SchurDiagMooeeOperator<DomainWallFermionR,LatticeFermion> HermOpEO(Ddwf);
pCG(HermOpEO,src_o,result_o);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << " Fine PowerMethod "<< std::endl;
PowerMethod<LatticeFermion> PM; PM(HermDefOp,src);
std::cout<<GridLogMessage << " Coarse PowerMethod "<< std::endl;
PowerMethod<CoarseVector> cPM; cPM(PosdefLdop,c_src);
// std::cout<<GridLogMessage << " CoarseCoarse PowerMethod "<< std::endl;
// PowerMethod<CoarseCoarseVector> ccPM; ccPM(IRLHermOpL2,cc_src);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Done "<< std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
Grid_finalize();
}

View File

@ -370,11 +370,6 @@ int main (int argc, char ** argv)
GridCartesian *CoarseCoarse4d = SpaceTimeGrid::makeFourDimGrid(cclatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *CoarseCoarse5d = SpaceTimeGrid::makeFiveDimGrid(1,CoarseCoarse4d);
GridRedBlackCartesian * Coarse4dRB = SpaceTimeGrid::makeFourDimRedBlackGrid(Coarse4d);
GridRedBlackCartesian * Coarse5dRB = SpaceTimeGrid::makeFiveDimRedBlackGrid(1,Coarse4d);
GridRedBlackCartesian *CoarseCoarse4dRB = SpaceTimeGrid::makeFourDimRedBlackGrid(CoarseCoarse4d);
GridRedBlackCartesian *CoarseCoarse5dRB = SpaceTimeGrid::makeFiveDimRedBlackGrid(1,CoarseCoarse4d);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::vector<int> cseeds({5,6,7,8});
@ -439,8 +434,8 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "Building coarse representation of Indef operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
Level1Op LDOp(*Coarse5d,*Coarse5dRB,1); LDOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
Level1Op LDOpPV(*Coarse5d,*Coarse5dRB,1); LDOpPV.CoarsenOperator(FGrid,HermIndefOpPV,Aggregates);
Level1Op LDOp(*Coarse5d,1); LDOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
Level1Op LDOpPV(*Coarse5d,1); LDOpPV.CoarsenOperator(FGrid,HermIndefOpPV,Aggregates);
std::cout<<GridLogMessage << "**************************************************"<< std::endl;

View File

@ -274,8 +274,6 @@ int main (int argc, char ** argv)
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(Ls,Coarse4d);
GridRedBlackCartesian * Coarse4dRB = SpaceTimeGrid::makeFourDimRedBlackGrid(Coarse4d);
GridRedBlackCartesian * Coarse5dRB = SpaceTimeGrid::makeFiveDimRedBlackGrid(1,Coarse4d);
std::vector<int> seeds({1,2,3,4});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds);
@ -337,7 +335,7 @@ int main (int argc, char ** argv)
NonHermitianLinearOperator<DomainWallFermionR,LatticeFermion> LinOpDwf(Ddwf);
Level1Op LDOp (*Coarse5d,*Coarse5dRB,0);
Level1Op LDOp (*Coarse5d,0);
std::cout<<GridLogMessage << " Callinig Coarsen the operator " <<std::endl;
LDOp.CoarsenOperator(FGrid,LinOpDwf,Aggregates5D);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff