mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-03 21:44:33 +00:00 
			
		
		
		
	Compare commits
	
		
			86 Commits
		
	
	
		
			DiRAC-ITT-
			...
			feature/sy
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					229709a980 | ||
| 
						 | 
					9295eeadfe | ||
| 
						 | 
					36f471e333 | ||
| 
						 | 
					ca4eadd4ab | ||
| 
						 | 
					d954595922 | ||
| 
						 | 
					1ac13ec3a7 | ||
| 
						 | 
					55de69a569 | ||
| 
						 | 
					eda9ab487b | ||
| 
						 | 
					cd99edcc5f | ||
| 
						 | 
					69f1f04f74 | ||
| 
						 | 
					11a5fd09d6 | ||
| 
						 | 
					ff1fa98808 | ||
| 
						 | 
					b0339bc5a4 | ||
| 
						 | 
					3c23a947cc | ||
| 
						 | 
					56111bb823 | ||
| 
						 | 
					99445673f6 | ||
| 
						 | 
					97a59643f7 | ||
| 
						 | 
					579595f547 | ||
| 
						 | 
					281ac5fc12 | ||
| 
						 | 
					d8fa903b02 | ||
| 
						 | 
					eaff0f3aeb | ||
| 
						 | 
					e8e20c01b2 | ||
| 
						 | 
					a4afc3ea2a | ||
| 
						 | 
					3fe75bc7cb | ||
| 
						 | 
					45d49d8648 | ||
| 
						 | 
					6013183361 | ||
| 
						 | 
					4b882e8056 | ||
| 
						 | 
					3f9ae6e7e7 | ||
| 
						 | 
					909acd55cd | ||
| 
						 | 
					4dd9e39e0d | ||
| 
						 | 
					7adb253e25 | ||
| 
						 | 
					873519e960 | ||
| 
						 | 
					9aec4a3c26 | ||
| 
						 | 
					70510d151b | ||
| 
						 | 
					9e7bacb5a4 | ||
| 
						 | 
					2ef1fa66a8 | ||
| 
						 | 
					cf76741ec6 | ||
| 
						 | 
					497e7c1c40 | ||
| 
						 | 
					888eacd3b8 | ||
| 
						 | 
					321f0f51b5 | ||
| 
						 | 
					30ad9578a2 | ||
| 
						 | 
					9dce101586 | ||
| 
						 | 
					97e264d0ff | ||
| 
						 | 
					683a5e5bf5 | ||
| 
						 | 
					d4861a362c | ||
| 
						 | 
					5ff3eae027 | ||
| 
						 | 
					147dc15d26 | ||
| 
						 | 
					c61ea72949 | ||
| 
						 | 
					86e8b9fe38 | ||
| 
						 | 
					612e468889 | ||
| 
						 | 
					4ea8d128c2 | ||
| 
						 | 
					e49b7f2f88 | ||
| 
						 | 
					aace3d47b9 | ||
| 
						 | 
					d5049949a4 | ||
| 
						 | 
					f1c7480e3c | ||
| 
						 | 
					5adae5d6ff | ||
| 
						 | 
					a8412ace05 | ||
| 
						 | 
					9fd1c2ad4b | ||
| 
						 | 
					4cf3575353 | ||
| 
						 | 
					804a810d68 | ||
| 
						 | 
					8fcb392e24 | ||
| 
						 | 
					dd8d70eeff | ||
| 
						 | 
					aa8aba6543 | ||
| 
						 | 
					13df14f96e | ||
| 
						 | 
					b3881d2636 | ||
| d060341168 | |||
| c772bcd514 | |||
| 467deee46f | |||
| 
						 | 
					80fd6ab407 | ||
| 
						 | 
					5534921bee | ||
| 
						 | 
					5cffa05c7e | ||
| 
						 | 
					d50a2164d7 | ||
| 
						 | 
					32ff766dbd | ||
| 
						 | 
					01652d8cfe | ||
| 
						 | 
					4d2dc7ba03 | ||
| 
						 | 
					51d1beb1f3 | ||
| 
						 | 
					249e2db87d | ||
| 
						 | 
					cf3535d16e | ||
| 
						 | 
					d61ee817f4 | ||
| 
						 | 
					2a75516330 | ||
| 
						 | 
					b2087f14c4 | ||
| 
						 | 
					dd1ba266b2 | ||
| 
						 | 
					1292d59563 | ||
| 
						 | 
					9877ed9bf8 | ||
| 
						 | 
					f0dc0f3621 | ||
| 
						 | 
					63b0a19f37 | 
@@ -37,7 +37,9 @@ directory
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 //disables and intel compiler specific warning (in json.hpp)
 | 
			
		||||
#ifdef __ICC
 | 
			
		||||
#pragma warning disable 488  
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef __NVCC__
 | 
			
		||||
 //disables nvcc specific warning in json.hpp
 | 
			
		||||
 
 | 
			
		||||
@@ -21,6 +21,7 @@ if BUILD_HDF5
 | 
			
		||||
  extra_headers+=serialisation/Hdf5Type.h
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
all: version-cache Version.h
 | 
			
		||||
 | 
			
		||||
version-cache:
 | 
			
		||||
@@ -53,6 +54,17 @@ Version.h: version-cache
 | 
			
		||||
include Make.inc
 | 
			
		||||
include Eigen.inc
 | 
			
		||||
 | 
			
		||||
#extra_sources+=$(ZWILS_FERMION_FILES)
 | 
			
		||||
extra_sources+=$(WILS_FERMION_FILES)
 | 
			
		||||
extra_sources+=$(STAG_FERMION_FILES)
 | 
			
		||||
if BUILD_GPARITY
 | 
			
		||||
  extra_sources+=$(GP_FERMION_FILES)
 | 
			
		||||
endif
 | 
			
		||||
if BUILD_FERMION_REPS
 | 
			
		||||
  extra_sources+=$(ADJ_FERMION_FILES)
 | 
			
		||||
  extra_sources+=$(TWOIND_FERMION_FILES)
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
lib_LIBRARIES = libGrid.a
 | 
			
		||||
 | 
			
		||||
CCFILES += $(extra_sources)
 | 
			
		||||
 
 | 
			
		||||
@@ -31,6 +31,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  GRID_ALGORITHM_COARSENED_MATRIX_H
 | 
			
		||||
#define  GRID_ALGORITHM_COARSENED_MATRIX_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/QCD.h> // needed for Dagger(Yes|No), Inverse(Yes|No)
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
@@ -59,12 +60,14 @@ inline void blockMaskedInnerProduct(Lattice<CComplex> &CoarseInner,
 | 
			
		||||
class Geometry {
 | 
			
		||||
public:
 | 
			
		||||
  int npoint;
 | 
			
		||||
  int base;
 | 
			
		||||
  std::vector<int> directions   ;
 | 
			
		||||
  std::vector<int> displacements;
 | 
			
		||||
  std::vector<int> points_dagger;
 | 
			
		||||
 | 
			
		||||
  Geometry(int _d)  {
 | 
			
		||||
    
 | 
			
		||||
    int base = (_d==5) ? 1:0;
 | 
			
		||||
    base = (_d==5) ? 1:0;
 | 
			
		||||
 | 
			
		||||
    // make coarse grid stencil for 4d , not 5d
 | 
			
		||||
    if ( _d==5 ) _d=4;
 | 
			
		||||
@@ -72,16 +75,51 @@ public:
 | 
			
		||||
    npoint = 2*_d+1;
 | 
			
		||||
    directions.resize(npoint);
 | 
			
		||||
    displacements.resize(npoint);
 | 
			
		||||
    points_dagger.resize(npoint);
 | 
			
		||||
    for(int d=0;d<_d;d++){
 | 
			
		||||
      directions[d   ] = d+base;
 | 
			
		||||
      directions[d+_d] = d+base;
 | 
			
		||||
      displacements[d  ] = +1;
 | 
			
		||||
      displacements[d+_d]= -1;
 | 
			
		||||
      points_dagger[d   ] = d+_d;
 | 
			
		||||
      points_dagger[d+_d] = d;
 | 
			
		||||
    }
 | 
			
		||||
    directions   [2*_d]=0;
 | 
			
		||||
    displacements[2*_d]=0;
 | 
			
		||||
    points_dagger[2*_d]=2*_d;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int point(int dir, int disp) {
 | 
			
		||||
    assert(disp == -1 || disp == 0 || disp == 1);
 | 
			
		||||
    assert(base+0 <= dir && dir < base+4);
 | 
			
		||||
 | 
			
		||||
    // directions faster index = new indexing
 | 
			
		||||
    // 4d (base = 0):
 | 
			
		||||
    // point 0  1  2  3  4  5  6  7  8
 | 
			
		||||
    // dir   0  1  2  3  0  1  2  3  0
 | 
			
		||||
    // disp +1 +1 +1 +1 -1 -1 -1 -1  0
 | 
			
		||||
    // 5d (base = 1):
 | 
			
		||||
    // point 0  1  2  3  4  5  6  7  8
 | 
			
		||||
    // dir   1  2  3  4  1  2  3  4  0
 | 
			
		||||
    // disp +1 +1 +1 +1 -1 -1 -1 -1  0
 | 
			
		||||
 | 
			
		||||
    // displacements faster index = old indexing
 | 
			
		||||
    // 4d (base = 0):
 | 
			
		||||
    // point 0  1  2  3  4  5  6  7  8
 | 
			
		||||
    // dir   0  0  1  1  2  2  3  3  0
 | 
			
		||||
    // disp +1 -1 +1 -1 +1 -1 +1 -1  0
 | 
			
		||||
    // 5d (base = 1):
 | 
			
		||||
    // point 0  1  2  3  4  5  6  7  8
 | 
			
		||||
    // dir   1  1  2  2  3  3  4  4  0
 | 
			
		||||
    // disp +1 -1 +1 -1 +1 -1 +1 -1  0
 | 
			
		||||
 | 
			
		||||
    if(dir == 0 and disp == 0)
 | 
			
		||||
      return 8;
 | 
			
		||||
    else // New indexing
 | 
			
		||||
      return (1 - disp) / 2 * 4 + dir - base;
 | 
			
		||||
    // else // Old indexing
 | 
			
		||||
    //   return (4 * (dir - base) + 1 - disp) / 2;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
  
 | 
			
		||||
template<class Fobj,class CComplex,int nbasis>
 | 
			
		||||
@@ -258,7 +296,7 @@ public:
 | 
			
		||||
// Fine Object == (per site) type of fine field
 | 
			
		||||
// nbasis      == number of deflation vectors
 | 
			
		||||
template<class Fobj,class CComplex,int nbasis>
 | 
			
		||||
class CoarsenedMatrix : public SparseMatrixBase<Lattice<iVector<CComplex,nbasis > > >  {
 | 
			
		||||
class CoarsenedMatrix : public CheckerBoardedSparseMatrixBase<Lattice<iVector<CComplex,nbasis > > >  {
 | 
			
		||||
public:
 | 
			
		||||
    
 | 
			
		||||
  typedef iVector<CComplex,nbasis >           siteVector;
 | 
			
		||||
@@ -268,33 +306,59 @@ public:
 | 
			
		||||
  typedef iMatrix<CComplex,nbasis >  Cobj;
 | 
			
		||||
  typedef Lattice< CComplex >   CoarseScalar; // used for inner products on fine field
 | 
			
		||||
  typedef Lattice<Fobj >        FineField;
 | 
			
		||||
  typedef CoarseVector FermionField;
 | 
			
		||||
 | 
			
		||||
  // enrich interface, use default implementation as in FermionOperator ///////
 | 
			
		||||
  void Dminus(CoarseVector const& in, CoarseVector& out) { out = in; }
 | 
			
		||||
  void DminusDag(CoarseVector const& in, CoarseVector& out) { out = in; }
 | 
			
		||||
  void ImportPhysicalFermionSource(CoarseVector const& input, CoarseVector& imported) { imported = input; }
 | 
			
		||||
  void ImportUnphysicalFermion(CoarseVector const& input, CoarseVector& imported) { imported = input; }
 | 
			
		||||
  void ExportPhysicalFermionSolution(CoarseVector const& solution, CoarseVector& exported) { exported = solution; };
 | 
			
		||||
  void ExportPhysicalFermionSource(CoarseVector const& solution, CoarseVector& exported) { exported = solution; };
 | 
			
		||||
 | 
			
		||||
  ////////////////////
 | 
			
		||||
  // Data members
 | 
			
		||||
  ////////////////////
 | 
			
		||||
  Geometry         geom;
 | 
			
		||||
  GridBase *       _grid; 
 | 
			
		||||
  GridBase*        _cbgrid;
 | 
			
		||||
  int hermitian;
 | 
			
		||||
 | 
			
		||||
  CartesianStencil<siteVector,siteVector,int> Stencil; 
 | 
			
		||||
  CartesianStencil<siteVector,siteVector,int> StencilEven;
 | 
			
		||||
  CartesianStencil<siteVector,siteVector,int> StencilOdd;
 | 
			
		||||
 | 
			
		||||
  std::vector<CoarseMatrix> A;
 | 
			
		||||
    
 | 
			
		||||
  std::vector<CoarseMatrix> Aeven;
 | 
			
		||||
  std::vector<CoarseMatrix> Aodd;
 | 
			
		||||
 | 
			
		||||
  CoarseMatrix AselfInv;
 | 
			
		||||
  CoarseMatrix AselfInvEven;
 | 
			
		||||
  CoarseMatrix AselfInvOdd;
 | 
			
		||||
 | 
			
		||||
  Vector<RealD> dag_factor;
 | 
			
		||||
 | 
			
		||||
  ///////////////////////
 | 
			
		||||
  // Interface
 | 
			
		||||
  ///////////////////////
 | 
			
		||||
  GridBase * Grid(void)         { return _grid; };   // this is all the linalg routines need to know
 | 
			
		||||
  GridBase * RedBlackGrid()     { return _cbgrid; };
 | 
			
		||||
 | 
			
		||||
  int ConstEE() { return 0; }
 | 
			
		||||
 | 
			
		||||
  void M (const CoarseVector &in, CoarseVector &out)
 | 
			
		||||
  {
 | 
			
		||||
    conformable(_grid,in.Grid());
 | 
			
		||||
    conformable(in.Grid(),out.Grid());
 | 
			
		||||
    out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
 | 
			
		||||
    SimpleCompressor<siteVector> compressor;
 | 
			
		||||
 | 
			
		||||
    Stencil.HaloExchange(in,compressor);
 | 
			
		||||
    autoView( in_v , in, AcceleratorRead);
 | 
			
		||||
    autoView( out_v , out, AcceleratorWrite);
 | 
			
		||||
    autoView( Stencil_v  , Stencil, AcceleratorRead);
 | 
			
		||||
    auto& geom_v = geom;
 | 
			
		||||
    typedef LatticeView<Cobj> Aview;
 | 
			
		||||
      
 | 
			
		||||
    Vector<Aview> AcceleratorViewContainer;
 | 
			
		||||
@@ -316,14 +380,14 @@ public:
 | 
			
		||||
      int ptype;
 | 
			
		||||
      StencilEntry *SE;
 | 
			
		||||
 | 
			
		||||
      for(int point=0;point<geom.npoint;point++){
 | 
			
		||||
      for(int point=0;point<geom_v.npoint;point++){
 | 
			
		||||
 | 
			
		||||
	SE=Stencil.GetEntry(ptype,point,ss);
 | 
			
		||||
	SE=Stencil_v.GetEntry(ptype,point,ss);
 | 
			
		||||
	  
 | 
			
		||||
	if(SE->_is_local) { 
 | 
			
		||||
	  nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
 | 
			
		||||
	} else {
 | 
			
		||||
	  nbr = coalescedRead(Stencil.CommBuf()[SE->_offset]);
 | 
			
		||||
	  nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
 | 
			
		||||
	}
 | 
			
		||||
	acceleratorSynchronise();
 | 
			
		||||
 | 
			
		||||
@@ -344,12 +408,72 @@ public:
 | 
			
		||||
      return M(in,out);
 | 
			
		||||
    } else {
 | 
			
		||||
      // corresponds to Galerkin coarsening
 | 
			
		||||
      CoarseVector tmp(Grid());
 | 
			
		||||
      G5C(tmp, in); 
 | 
			
		||||
      M(tmp, out);
 | 
			
		||||
      G5C(out, out);
 | 
			
		||||
      return MdagNonHermitian(in, out);
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  void MdagNonHermitian(const CoarseVector &in, CoarseVector &out)
 | 
			
		||||
  {
 | 
			
		||||
    conformable(_grid,in.Grid());
 | 
			
		||||
    conformable(in.Grid(),out.Grid());
 | 
			
		||||
    out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
 | 
			
		||||
    SimpleCompressor<siteVector> compressor;
 | 
			
		||||
 | 
			
		||||
    Stencil.HaloExchange(in,compressor);
 | 
			
		||||
    autoView( in_v , in, AcceleratorRead);
 | 
			
		||||
    autoView( out_v , out, AcceleratorWrite);
 | 
			
		||||
    autoView( Stencil_v  , Stencil, AcceleratorRead);
 | 
			
		||||
    auto& geom_v = geom;
 | 
			
		||||
    typedef LatticeView<Cobj> Aview;
 | 
			
		||||
 | 
			
		||||
    Vector<Aview> AcceleratorViewContainer;
 | 
			
		||||
 | 
			
		||||
    for(int p=0;p<geom.npoint;p++) AcceleratorViewContainer.push_back(A[p].View(AcceleratorRead));
 | 
			
		||||
    Aview *Aview_p = & AcceleratorViewContainer[0];
 | 
			
		||||
 | 
			
		||||
    const int Nsimd = CComplex::Nsimd();
 | 
			
		||||
    typedef decltype(coalescedRead(in_v[0])) calcVector;
 | 
			
		||||
    typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
 | 
			
		||||
 | 
			
		||||
    int osites=Grid()->oSites();
 | 
			
		||||
 | 
			
		||||
    Vector<int> points(geom.npoint, 0);
 | 
			
		||||
    for(int p=0; p<geom.npoint; p++)
 | 
			
		||||
      points[p] = geom.points_dagger[p];
 | 
			
		||||
 | 
			
		||||
    RealD* dag_factor_p = &dag_factor[0];
 | 
			
		||||
 | 
			
		||||
    accelerator_for(sss, Grid()->oSites()*nbasis, Nsimd, {
 | 
			
		||||
      int ss = sss/nbasis;
 | 
			
		||||
      int b  = sss%nbasis;
 | 
			
		||||
      calcComplex res = Zero();
 | 
			
		||||
      calcVector nbr;
 | 
			
		||||
      int ptype;
 | 
			
		||||
      StencilEntry *SE;
 | 
			
		||||
 | 
			
		||||
      for(int p=0;p<geom_v.npoint;p++){
 | 
			
		||||
        int point = points[p];
 | 
			
		||||
 | 
			
		||||
	SE=Stencil_v.GetEntry(ptype,point,ss);
 | 
			
		||||
 | 
			
		||||
	if(SE->_is_local) {
 | 
			
		||||
	  nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
 | 
			
		||||
	} else {
 | 
			
		||||
	  nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
 | 
			
		||||
	}
 | 
			
		||||
	acceleratorSynchronise();
 | 
			
		||||
 | 
			
		||||
	for(int bb=0;bb<nbasis;bb++) {
 | 
			
		||||
	  res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      coalescedWrite(out_v[ss](b),res);
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
    for(int p=0;p<geom.npoint;p++) AcceleratorViewContainer[p].ViewClose();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void MdirComms(const CoarseVector &in)
 | 
			
		||||
  {
 | 
			
		||||
    SimpleCompressor<siteVector> compressor;
 | 
			
		||||
@@ -359,6 +483,7 @@ public:
 | 
			
		||||
  {
 | 
			
		||||
    conformable(_grid,in.Grid());
 | 
			
		||||
    conformable(_grid,out.Grid());
 | 
			
		||||
    out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
 | 
			
		||||
    typedef LatticeView<Cobj> Aview;
 | 
			
		||||
    Vector<Aview> AcceleratorViewContainer;
 | 
			
		||||
@@ -367,6 +492,7 @@ public:
 | 
			
		||||
 | 
			
		||||
    autoView( out_v , out, AcceleratorWrite);
 | 
			
		||||
    autoView( in_v  , in, AcceleratorRead);
 | 
			
		||||
    autoView( Stencil_v  , Stencil, AcceleratorRead);
 | 
			
		||||
 | 
			
		||||
    const int Nsimd = CComplex::Nsimd();
 | 
			
		||||
    typedef decltype(coalescedRead(in_v[0])) calcVector;
 | 
			
		||||
@@ -380,12 +506,12 @@ public:
 | 
			
		||||
      int ptype;
 | 
			
		||||
      StencilEntry *SE;
 | 
			
		||||
 | 
			
		||||
      SE=Stencil.GetEntry(ptype,point,ss);
 | 
			
		||||
      SE=Stencil_v.GetEntry(ptype,point,ss);
 | 
			
		||||
	  
 | 
			
		||||
      if(SE->_is_local) { 
 | 
			
		||||
	nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
 | 
			
		||||
      } else {
 | 
			
		||||
	nbr = coalescedRead(Stencil.CommBuf()[SE->_offset]);
 | 
			
		||||
	nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]);
 | 
			
		||||
      }
 | 
			
		||||
      acceleratorSynchronise();
 | 
			
		||||
 | 
			
		||||
@@ -413,34 +539,7 @@ public:
 | 
			
		||||
 | 
			
		||||
    this->MdirComms(in);
 | 
			
		||||
 | 
			
		||||
    int ndim = in.Grid()->Nd();
 | 
			
		||||
 | 
			
		||||
    //////////////
 | 
			
		||||
    // 4D action like wilson
 | 
			
		||||
    // 0+ => 0 
 | 
			
		||||
    // 0- => 1
 | 
			
		||||
    // 1+ => 2 
 | 
			
		||||
    // 1- => 3
 | 
			
		||||
    // etc..
 | 
			
		||||
    //////////////
 | 
			
		||||
    // 5D action like DWF
 | 
			
		||||
    // 1+ => 0 
 | 
			
		||||
    // 1- => 1
 | 
			
		||||
    // 2+ => 2 
 | 
			
		||||
    // 2- => 3
 | 
			
		||||
    // etc..
 | 
			
		||||
    auto point = [dir, disp, ndim](){
 | 
			
		||||
      if(dir == 0 and disp == 0)
 | 
			
		||||
	return 8;
 | 
			
		||||
      else if ( ndim==4 ) { 
 | 
			
		||||
	return (4 * dir + 1 - disp) / 2;
 | 
			
		||||
      } else { 
 | 
			
		||||
	return (4 * (dir-1) + 1 - disp) / 2;
 | 
			
		||||
      }
 | 
			
		||||
    }();
 | 
			
		||||
 | 
			
		||||
    MdirCalc(in,out,point);
 | 
			
		||||
 | 
			
		||||
    MdirCalc(in,out,geom.point(dir,disp));
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  void Mdiag(const CoarseVector &in, CoarseVector &out)
 | 
			
		||||
@@ -449,23 +548,296 @@ public:
 | 
			
		||||
    MdirCalc(in, out, point); // No comms
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
 CoarsenedMatrix(GridCartesian &CoarseGrid, int hermitian_=0) 	: 
 | 
			
		||||
  void Mooee(const CoarseVector &in, CoarseVector &out) {
 | 
			
		||||
    MooeeInternal(in, out, DaggerNo, InverseNo);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void MooeeInv(const CoarseVector &in, CoarseVector &out) {
 | 
			
		||||
    MooeeInternal(in, out, DaggerNo, InverseYes);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void MooeeDag(const CoarseVector &in, CoarseVector &out) {
 | 
			
		||||
    MooeeInternal(in, out, DaggerYes, InverseNo);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void MooeeInvDag(const CoarseVector &in, CoarseVector &out) {
 | 
			
		||||
    MooeeInternal(in, out, DaggerYes, InverseYes);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void Meooe(const CoarseVector &in, CoarseVector &out) {
 | 
			
		||||
    if(in.Checkerboard() == Odd) {
 | 
			
		||||
      DhopEO(in, out, DaggerNo);
 | 
			
		||||
    } else {
 | 
			
		||||
      DhopOE(in, out, DaggerNo);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void MeooeDag(const CoarseVector &in, CoarseVector &out) {
 | 
			
		||||
    if(in.Checkerboard() == Odd) {
 | 
			
		||||
      DhopEO(in, out, DaggerYes);
 | 
			
		||||
    } else {
 | 
			
		||||
      DhopOE(in, out, DaggerYes);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void Dhop(const CoarseVector &in, CoarseVector &out, int dag) {
 | 
			
		||||
    conformable(in.Grid(), _grid); // verifies full grid
 | 
			
		||||
    conformable(in.Grid(), out.Grid());
 | 
			
		||||
 | 
			
		||||
    out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
 | 
			
		||||
    DhopInternal(Stencil, A, in, out, dag);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void DhopOE(const CoarseVector &in, CoarseVector &out, int dag) {
 | 
			
		||||
    conformable(in.Grid(), _cbgrid);    // verifies half grid
 | 
			
		||||
    conformable(in.Grid(), out.Grid()); // drops the cb check
 | 
			
		||||
 | 
			
		||||
    assert(in.Checkerboard() == Even);
 | 
			
		||||
    out.Checkerboard() = Odd;
 | 
			
		||||
 | 
			
		||||
    DhopInternal(StencilEven, Aodd, in, out, dag);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void DhopEO(const CoarseVector &in, CoarseVector &out, int dag) {
 | 
			
		||||
    conformable(in.Grid(), _cbgrid);    // verifies half grid
 | 
			
		||||
    conformable(in.Grid(), out.Grid()); // drops the cb check
 | 
			
		||||
 | 
			
		||||
    assert(in.Checkerboard() == Odd);
 | 
			
		||||
    out.Checkerboard() = Even;
 | 
			
		||||
 | 
			
		||||
    DhopInternal(StencilOdd, Aeven, in, out, dag);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void MooeeInternal(const CoarseVector &in, CoarseVector &out, int dag, int inv) {
 | 
			
		||||
    out.Checkerboard() = in.Checkerboard();
 | 
			
		||||
    assert(in.Checkerboard() == Odd || in.Checkerboard() == Even);
 | 
			
		||||
 | 
			
		||||
    CoarseMatrix *Aself = nullptr;
 | 
			
		||||
    if(in.Grid()->_isCheckerBoarded) {
 | 
			
		||||
      if(in.Checkerboard() == Odd) {
 | 
			
		||||
        Aself = (inv) ? &AselfInvOdd : &Aodd[geom.npoint-1];
 | 
			
		||||
        DselfInternal(StencilOdd, *Aself, in, out, dag);
 | 
			
		||||
      } else {
 | 
			
		||||
        Aself = (inv) ? &AselfInvEven : &Aeven[geom.npoint-1];
 | 
			
		||||
        DselfInternal(StencilEven, *Aself, in, out, dag);
 | 
			
		||||
      }
 | 
			
		||||
    } else {
 | 
			
		||||
      Aself = (inv) ? &AselfInv : &A[geom.npoint-1];
 | 
			
		||||
      DselfInternal(Stencil, *Aself, in, out, dag);
 | 
			
		||||
    }
 | 
			
		||||
    assert(Aself != nullptr);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void DselfInternal(CartesianStencil<siteVector,siteVector,int> &st, CoarseMatrix &a,
 | 
			
		||||
                       const CoarseVector &in, CoarseVector &out, int dag) {
 | 
			
		||||
    int point = geom.npoint-1;
 | 
			
		||||
    autoView( out_v, out, AcceleratorWrite);
 | 
			
		||||
    autoView( in_v,  in,  AcceleratorRead);
 | 
			
		||||
    autoView( st_v,  st,  AcceleratorRead);
 | 
			
		||||
    autoView( a_v,   a,   AcceleratorRead);
 | 
			
		||||
 | 
			
		||||
    const int Nsimd = CComplex::Nsimd();
 | 
			
		||||
    typedef decltype(coalescedRead(in_v[0])) calcVector;
 | 
			
		||||
    typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
 | 
			
		||||
 | 
			
		||||
    RealD* dag_factor_p = &dag_factor[0];
 | 
			
		||||
 | 
			
		||||
    if(dag) {
 | 
			
		||||
      accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
 | 
			
		||||
        int ss = sss/nbasis;
 | 
			
		||||
        int b  = sss%nbasis;
 | 
			
		||||
        calcComplex res = Zero();
 | 
			
		||||
        calcVector nbr;
 | 
			
		||||
        int ptype;
 | 
			
		||||
        StencilEntry *SE;
 | 
			
		||||
 | 
			
		||||
        SE=st_v.GetEntry(ptype,point,ss);
 | 
			
		||||
 | 
			
		||||
        if(SE->_is_local) {
 | 
			
		||||
          nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
 | 
			
		||||
        } else {
 | 
			
		||||
          nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
 | 
			
		||||
        }
 | 
			
		||||
        acceleratorSynchronise();
 | 
			
		||||
 | 
			
		||||
        for(int bb=0;bb<nbasis;bb++) {
 | 
			
		||||
          res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(a_v[ss](b,bb))*nbr(bb);
 | 
			
		||||
        }
 | 
			
		||||
        coalescedWrite(out_v[ss](b),res);
 | 
			
		||||
      });
 | 
			
		||||
    } else {
 | 
			
		||||
      accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
 | 
			
		||||
        int ss = sss/nbasis;
 | 
			
		||||
        int b  = sss%nbasis;
 | 
			
		||||
        calcComplex res = Zero();
 | 
			
		||||
        calcVector nbr;
 | 
			
		||||
        int ptype;
 | 
			
		||||
        StencilEntry *SE;
 | 
			
		||||
 | 
			
		||||
        SE=st_v.GetEntry(ptype,point,ss);
 | 
			
		||||
 | 
			
		||||
        if(SE->_is_local) {
 | 
			
		||||
          nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
 | 
			
		||||
        } else {
 | 
			
		||||
          nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
 | 
			
		||||
        }
 | 
			
		||||
        acceleratorSynchronise();
 | 
			
		||||
 | 
			
		||||
        for(int bb=0;bb<nbasis;bb++) {
 | 
			
		||||
          res = res + coalescedRead(a_v[ss](b,bb))*nbr(bb);
 | 
			
		||||
        }
 | 
			
		||||
        coalescedWrite(out_v[ss](b),res);
 | 
			
		||||
      });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void DhopInternal(CartesianStencil<siteVector,siteVector,int> &st, std::vector<CoarseMatrix> &a,
 | 
			
		||||
                    const CoarseVector &in, CoarseVector &out, int dag) {
 | 
			
		||||
    SimpleCompressor<siteVector> compressor;
 | 
			
		||||
 | 
			
		||||
    st.HaloExchange(in,compressor);
 | 
			
		||||
    autoView( in_v,  in,  AcceleratorRead);
 | 
			
		||||
    autoView( out_v, out, AcceleratorWrite);
 | 
			
		||||
    autoView( st_v , st,  AcceleratorRead);
 | 
			
		||||
    typedef LatticeView<Cobj> Aview;
 | 
			
		||||
 | 
			
		||||
    // determine in what order we need the points
 | 
			
		||||
    int npoint = geom.npoint-1;
 | 
			
		||||
    Vector<int> points(npoint, 0);
 | 
			
		||||
    for(int p=0; p<npoint; p++)
 | 
			
		||||
      points[p] = (dag && !hermitian) ? geom.points_dagger[p] : p;
 | 
			
		||||
 | 
			
		||||
    Vector<Aview> AcceleratorViewContainer;
 | 
			
		||||
    for(int p=0;p<npoint;p++) AcceleratorViewContainer.push_back(a[p].View(AcceleratorRead));
 | 
			
		||||
    Aview *Aview_p = & AcceleratorViewContainer[0];
 | 
			
		||||
 | 
			
		||||
    const int Nsimd = CComplex::Nsimd();
 | 
			
		||||
    typedef decltype(coalescedRead(in_v[0])) calcVector;
 | 
			
		||||
    typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
 | 
			
		||||
 | 
			
		||||
    RealD* dag_factor_p = &dag_factor[0];
 | 
			
		||||
 | 
			
		||||
    if(dag) {
 | 
			
		||||
      accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
 | 
			
		||||
        int ss = sss/nbasis;
 | 
			
		||||
        int b  = sss%nbasis;
 | 
			
		||||
        calcComplex res = Zero();
 | 
			
		||||
        calcVector nbr;
 | 
			
		||||
        int ptype;
 | 
			
		||||
        StencilEntry *SE;
 | 
			
		||||
 | 
			
		||||
        for(int p=0;p<npoint;p++){
 | 
			
		||||
          int point = points[p];
 | 
			
		||||
          SE=st_v.GetEntry(ptype,point,ss);
 | 
			
		||||
 | 
			
		||||
          if(SE->_is_local) {
 | 
			
		||||
            nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
 | 
			
		||||
          } else {
 | 
			
		||||
            nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
 | 
			
		||||
          }
 | 
			
		||||
          acceleratorSynchronise();
 | 
			
		||||
 | 
			
		||||
          for(int bb=0;bb<nbasis;bb++) {
 | 
			
		||||
            res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
        coalescedWrite(out_v[ss](b),res);
 | 
			
		||||
      });
 | 
			
		||||
    } else {
 | 
			
		||||
      accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, {
 | 
			
		||||
        int ss = sss/nbasis;
 | 
			
		||||
        int b  = sss%nbasis;
 | 
			
		||||
        calcComplex res = Zero();
 | 
			
		||||
        calcVector nbr;
 | 
			
		||||
        int ptype;
 | 
			
		||||
        StencilEntry *SE;
 | 
			
		||||
 | 
			
		||||
        for(int p=0;p<npoint;p++){
 | 
			
		||||
          int point = points[p];
 | 
			
		||||
          SE=st_v.GetEntry(ptype,point,ss);
 | 
			
		||||
 | 
			
		||||
          if(SE->_is_local) {
 | 
			
		||||
            nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute);
 | 
			
		||||
          } else {
 | 
			
		||||
            nbr = coalescedRead(st_v.CommBuf()[SE->_offset]);
 | 
			
		||||
          }
 | 
			
		||||
          acceleratorSynchronise();
 | 
			
		||||
 | 
			
		||||
          for(int bb=0;bb<nbasis;bb++) {
 | 
			
		||||
            res = res + coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb);
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
        coalescedWrite(out_v[ss](b),res);
 | 
			
		||||
      });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for(int p=0;p<npoint;p++) AcceleratorViewContainer[p].ViewClose();
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  CoarsenedMatrix(GridCartesian &CoarseGrid, int hermitian_=0) 	:
 | 
			
		||||
    _grid(&CoarseGrid),
 | 
			
		||||
    _cbgrid(new GridRedBlackCartesian(&CoarseGrid)),
 | 
			
		||||
    geom(CoarseGrid._ndimension),
 | 
			
		||||
    hermitian(hermitian_),
 | 
			
		||||
    Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
 | 
			
		||||
      A(geom.npoint,&CoarseGrid)
 | 
			
		||||
    StencilEven(_cbgrid,geom.npoint,Even,geom.directions,geom.displacements,0),
 | 
			
		||||
    StencilOdd(_cbgrid,geom.npoint,Odd,geom.directions,geom.displacements,0),
 | 
			
		||||
    A(geom.npoint,&CoarseGrid),
 | 
			
		||||
    Aeven(geom.npoint,_cbgrid),
 | 
			
		||||
    Aodd(geom.npoint,_cbgrid),
 | 
			
		||||
    AselfInv(&CoarseGrid),
 | 
			
		||||
    AselfInvEven(_cbgrid),
 | 
			
		||||
    AselfInvOdd(_cbgrid),
 | 
			
		||||
    dag_factor(nbasis*nbasis)
 | 
			
		||||
  {
 | 
			
		||||
    fillFactor();
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  CoarsenedMatrix(GridCartesian &CoarseGrid, GridRedBlackCartesian &CoarseRBGrid, int hermitian_=0) 	:
 | 
			
		||||
 | 
			
		||||
    _grid(&CoarseGrid),
 | 
			
		||||
    _cbgrid(&CoarseRBGrid),
 | 
			
		||||
    geom(CoarseGrid._ndimension),
 | 
			
		||||
    hermitian(hermitian_),
 | 
			
		||||
    Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
 | 
			
		||||
    StencilEven(&CoarseRBGrid,geom.npoint,Even,geom.directions,geom.displacements,0),
 | 
			
		||||
    StencilOdd(&CoarseRBGrid,geom.npoint,Odd,geom.directions,geom.displacements,0),
 | 
			
		||||
    A(geom.npoint,&CoarseGrid),
 | 
			
		||||
    Aeven(geom.npoint,&CoarseRBGrid),
 | 
			
		||||
    Aodd(geom.npoint,&CoarseRBGrid),
 | 
			
		||||
    AselfInv(&CoarseGrid),
 | 
			
		||||
    AselfInvEven(&CoarseRBGrid),
 | 
			
		||||
    AselfInvOdd(&CoarseRBGrid),
 | 
			
		||||
    dag_factor(nbasis*nbasis)
 | 
			
		||||
  {
 | 
			
		||||
    fillFactor();
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  void fillFactor() {
 | 
			
		||||
    Eigen::MatrixXd dag_factor_eigen = Eigen::MatrixXd::Ones(nbasis, nbasis);
 | 
			
		||||
    if(!hermitian) {
 | 
			
		||||
      const int nb = nbasis/2;
 | 
			
		||||
      dag_factor_eigen.block(0,nb,nb,nb) *= -1.0;
 | 
			
		||||
      dag_factor_eigen.block(nb,0,nb,nb) *= -1.0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // GPU readable prefactor
 | 
			
		||||
    thread_for(i, nbasis*nbasis, {
 | 
			
		||||
      int j = i/nbasis;
 | 
			
		||||
      int k = i%nbasis;
 | 
			
		||||
      dag_factor[i] = dag_factor_eigen(j, k);
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void CoarsenOperator(GridBase *FineGrid,LinearOperatorBase<Lattice<Fobj> > &linop,
 | 
			
		||||
		       Aggregation<Fobj,CComplex,nbasis> & Subspace)
 | 
			
		||||
  {
 | 
			
		||||
    typedef Lattice<typename Fobj::tensor_reduced> FineComplexField;
 | 
			
		||||
    typedef typename Fobj::scalar_type scalar_type;
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage<< "CoarsenMatrix "<< std::endl;
 | 
			
		||||
 | 
			
		||||
    FineComplexField one(FineGrid); one=scalar_type(1.0,0.0);
 | 
			
		||||
    FineComplexField zero(FineGrid); zero=scalar_type(0.0,0.0);
 | 
			
		||||
 | 
			
		||||
@@ -496,11 +868,13 @@ public:
 | 
			
		||||
 | 
			
		||||
    CoarseScalar InnerProd(Grid()); 
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage<< "CoarsenMatrix Orthog "<< std::endl;
 | 
			
		||||
    // Orthogonalise the subblocks over the basis
 | 
			
		||||
    blockOrthogonalise(InnerProd,Subspace.subspace);
 | 
			
		||||
 | 
			
		||||
    // Compute the matrix elements of linop between this orthonormal
 | 
			
		||||
    // set of vectors.
 | 
			
		||||
    std::cout << GridLogMessage<< "CoarsenMatrix masks "<< std::endl;
 | 
			
		||||
    int self_stencil=-1;
 | 
			
		||||
    for(int p=0;p<geom.npoint;p++)
 | 
			
		||||
    { 
 | 
			
		||||
@@ -539,7 +913,7 @@ public:
 | 
			
		||||
 | 
			
		||||
      phi=Subspace.subspace[i];
 | 
			
		||||
 | 
			
		||||
      //      std::cout << GridLogMessage<< "CoarsenMatrix vector "<<i << std::endl;
 | 
			
		||||
      std::cout << GridLogMessage<< "CoarsenMatrix vector "<<i << std::endl;
 | 
			
		||||
      linop.OpDirAll(phi,Mphi_p);
 | 
			
		||||
      linop.OpDiag  (phi,Mphi_p[geom.npoint-1]);
 | 
			
		||||
 | 
			
		||||
@@ -568,6 +942,18 @@ public:
 | 
			
		||||
	    autoView( A_self  , A[self_stencil], AcceleratorWrite);
 | 
			
		||||
 | 
			
		||||
	    accelerator_for(ss, Grid()->oSites(), Fobj::Nsimd(),{ coalescedWrite(A_p[ss](j,i),oZProj_v(ss)); });
 | 
			
		||||
	    if ( hermitian && (disp==-1) ) {
 | 
			
		||||
	      for(int pp=0;pp<geom.npoint;pp++){// Find the opposite link and set <j|A|i> = <i|A|j>*
 | 
			
		||||
		int dirp   = geom.directions[pp];
 | 
			
		||||
		int dispp  = geom.displacements[pp];
 | 
			
		||||
		if ( (dirp==dir) && (dispp==1) ){
 | 
			
		||||
		  auto sft = conjugate(Cshift(oZProj,dir,1));
 | 
			
		||||
		  autoView( sft_v    ,  sft  , AcceleratorWrite);
 | 
			
		||||
		  autoView( A_pp     ,  A[pp], AcceleratorWrite);
 | 
			
		||||
		  accelerator_for(ss, Grid()->oSites(), Fobj::Nsimd(),{ coalescedWrite(A_pp[ss](i,j),sft_v(ss)); });
 | 
			
		||||
		}
 | 
			
		||||
	      }
 | 
			
		||||
	    }
 | 
			
		||||
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
@@ -606,28 +992,54 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
    if(hermitian) {
 | 
			
		||||
      std::cout << GridLogMessage << " ForceHermitian, new code "<<std::endl;
 | 
			
		||||
      ForceHermitian();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    InvertSelfStencilLink(); std::cout << GridLogMessage << "Coarse self link inverted" << std::endl;
 | 
			
		||||
    FillHalfCbs(); std::cout << GridLogMessage << "Coarse half checkerboards filled" << std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void ForceHermitian(void) {
 | 
			
		||||
    CoarseMatrix Diff  (Grid());
 | 
			
		||||
    for(int p=0;p<geom.npoint;p++){
 | 
			
		||||
      int dir   = geom.directions[p];
 | 
			
		||||
      int disp  = geom.displacements[p];
 | 
			
		||||
      if(disp==-1) {
 | 
			
		||||
	// Find the opposite link
 | 
			
		||||
	for(int pp=0;pp<geom.npoint;pp++){
 | 
			
		||||
	  int dirp   = geom.directions[pp];
 | 
			
		||||
	  int dispp  = geom.displacements[pp];
 | 
			
		||||
	  if ( (dirp==dir) && (dispp==1) ){
 | 
			
		||||
	    //	    Diff = adj(Cshift(A[p],dir,1)) - A[pp]; 
 | 
			
		||||
	    //	    std::cout << GridLogMessage<<" Replacing stencil leg "<<pp<<" with leg "<<p<< " diff "<<norm2(Diff) <<std::endl;
 | 
			
		||||
	    A[pp] = adj(Cshift(A[p],dir,1));
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
  void InvertSelfStencilLink() {
 | 
			
		||||
    std::cout << GridLogDebug << "CoarsenedMatrix::InvertSelfStencilLink" << std::endl;
 | 
			
		||||
    int localVolume = Grid()->lSites();
 | 
			
		||||
 | 
			
		||||
    typedef typename Cobj::scalar_object scalar_object;
 | 
			
		||||
 | 
			
		||||
    autoView(Aself_v,    A[geom.npoint-1], CpuRead);
 | 
			
		||||
    autoView(AselfInv_v, AselfInv,         CpuWrite);
 | 
			
		||||
    thread_for(site, localVolume, { // NOTE: Not able to bring this to GPU because of Eigen + peek/poke
 | 
			
		||||
      Eigen::MatrixXcd selfLinkEigen    = Eigen::MatrixXcd::Zero(nbasis, nbasis);
 | 
			
		||||
      Eigen::MatrixXcd selfLinkInvEigen = Eigen::MatrixXcd::Zero(nbasis, nbasis);
 | 
			
		||||
 | 
			
		||||
      scalar_object selfLink    = Zero();
 | 
			
		||||
      scalar_object selfLinkInv = Zero();
 | 
			
		||||
 | 
			
		||||
      Coordinate lcoor;
 | 
			
		||||
 | 
			
		||||
      Grid()->LocalIndexToLocalCoor(site, lcoor);
 | 
			
		||||
      peekLocalSite(selfLink, Aself_v, lcoor);
 | 
			
		||||
 | 
			
		||||
      for (int i = 0; i < nbasis; ++i)
 | 
			
		||||
        for (int j = 0; j < nbasis; ++j)
 | 
			
		||||
          selfLinkEigen(i, j) = static_cast<ComplexD>(TensorRemove(selfLink(i, j)));
 | 
			
		||||
 | 
			
		||||
      selfLinkInvEigen = selfLinkEigen.inverse();
 | 
			
		||||
 | 
			
		||||
      for(int i = 0; i < nbasis; ++i)
 | 
			
		||||
        for(int j = 0; j < nbasis; ++j)
 | 
			
		||||
          selfLinkInv(i, j) = selfLinkInvEigen(i, j);
 | 
			
		||||
 | 
			
		||||
      pokeLocalSite(selfLinkInv, AselfInv_v, lcoor);
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void FillHalfCbs() {
 | 
			
		||||
    std::cout << GridLogDebug << "CoarsenedMatrix::FillHalfCbs" << std::endl;
 | 
			
		||||
    for(int p = 0; p < geom.npoint; ++p) {
 | 
			
		||||
      pickCheckerboard(Even, Aeven[p], A[p]);
 | 
			
		||||
      pickCheckerboard(Odd, Aodd[p], A[p]);
 | 
			
		||||
    }
 | 
			
		||||
    pickCheckerboard(Even, AselfInvEven, AselfInv);
 | 
			
		||||
    pickCheckerboard(Odd, AselfInvOdd, AselfInv);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,67 +0,0 @@
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <fcntl.h>
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
MemoryStats *MemoryProfiler::stats = nullptr;
 | 
			
		||||
bool         MemoryProfiler::debug = false;
 | 
			
		||||
 | 
			
		||||
void check_huge_pages(void *Buf,uint64_t BYTES)
 | 
			
		||||
{
 | 
			
		||||
#ifdef __linux__
 | 
			
		||||
  int fd = open("/proc/self/pagemap", O_RDONLY);
 | 
			
		||||
  assert(fd >= 0);
 | 
			
		||||
  const int page_size = 4096;
 | 
			
		||||
  uint64_t virt_pfn = (uint64_t)Buf / page_size;
 | 
			
		||||
  off_t offset = sizeof(uint64_t) * virt_pfn;
 | 
			
		||||
  uint64_t npages = (BYTES + page_size-1) / page_size;
 | 
			
		||||
  uint64_t pagedata[npages];
 | 
			
		||||
  uint64_t ret = lseek(fd, offset, SEEK_SET);
 | 
			
		||||
  assert(ret == offset);
 | 
			
		||||
  ret = ::read(fd, pagedata, sizeof(uint64_t)*npages);
 | 
			
		||||
  assert(ret == sizeof(uint64_t) * npages);
 | 
			
		||||
  int nhugepages = npages / 512;
 | 
			
		||||
  int n4ktotal, nnothuge;
 | 
			
		||||
  n4ktotal = 0;
 | 
			
		||||
  nnothuge = 0;
 | 
			
		||||
  for (int i = 0; i < nhugepages; ++i) {
 | 
			
		||||
    uint64_t baseaddr = (pagedata[i*512] & 0x7fffffffffffffULL) * page_size;
 | 
			
		||||
    for (int j = 0; j < 512; ++j) {
 | 
			
		||||
      uint64_t pageaddr = (pagedata[i*512+j] & 0x7fffffffffffffULL) * page_size;
 | 
			
		||||
      ++n4ktotal;
 | 
			
		||||
      if (pageaddr != baseaddr + j * page_size)
 | 
			
		||||
	++nnothuge;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  int rank = CartesianCommunicator::RankWorld();
 | 
			
		||||
  printf("rank %d Allocated %d 4k pages, %d not in huge pages\n", rank, n4ktotal, nnothuge);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string sizeString(const size_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  constexpr unsigned int bufSize = 256;
 | 
			
		||||
  const char             *suffixes[7] = {"", "K", "M", "G", "T", "P", "E"};
 | 
			
		||||
  char                   buf[256];
 | 
			
		||||
  size_t                 s     = 0;
 | 
			
		||||
  double                 count = bytes;
 | 
			
		||||
  
 | 
			
		||||
  while (count >= 1024 && s < 7)
 | 
			
		||||
    {
 | 
			
		||||
      s++;
 | 
			
		||||
      count /= 1024;
 | 
			
		||||
    }
 | 
			
		||||
  if (count - floor(count) == 0.0)
 | 
			
		||||
    {
 | 
			
		||||
      snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]);
 | 
			
		||||
    }
 | 
			
		||||
  else
 | 
			
		||||
    {
 | 
			
		||||
      snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]);
 | 
			
		||||
    }
 | 
			
		||||
  
 | 
			
		||||
  return std::string(buf);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
@@ -173,7 +173,8 @@ template<class T> using cshiftAllocator = devAllocator<T>;
 | 
			
		||||
template<class T> using cshiftAllocator = std::allocator<T>;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template<class T> using Vector     = std::vector<T,uvmAllocator<T> >;           
 | 
			
		||||
template<class T> using Vector        = std::vector<T,uvmAllocator<T> >;           
 | 
			
		||||
template<class T> using stencilVector = std::vector<T,alignedAllocator<T> >;           
 | 
			
		||||
template<class T> using commVector = std::vector<T,devAllocator<T> >;
 | 
			
		||||
template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -34,8 +34,6 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
// Move control to configure.ac and Config.h?
 | 
			
		||||
 | 
			
		||||
#define ALLOCATION_CACHE
 | 
			
		||||
#define GRID_ALLOC_ALIGN (2*1024*1024)
 | 
			
		||||
#define GRID_ALLOC_SMALL_LIMIT (4096)
 | 
			
		||||
 | 
			
		||||
/*Pinning pages is costly*/
 | 
			
		||||
 
 | 
			
		||||
@@ -1,11 +1,12 @@
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
#ifndef GRID_UVM
 | 
			
		||||
 | 
			
		||||
#warning "Using explicit device memory copies"
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
//define dprintf(...) printf ( __VA_ARGS__ ); fflush(stdout);
 | 
			
		||||
#define dprintf(...)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////
 | 
			
		||||
// For caching copies of data on device
 | 
			
		||||
////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -103,7 +104,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
 | 
			
		||||
  ///////////////////////////////////////////////////////////
 | 
			
		||||
  assert(AccCache.state!=Empty);
 | 
			
		||||
  
 | 
			
		||||
  //  dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr); 
 | 
			
		||||
   dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr); 
 | 
			
		||||
  assert(AccCache.accLock==0);
 | 
			
		||||
  assert(AccCache.cpuLock==0);
 | 
			
		||||
  assert(AccCache.CpuPtr!=(uint64_t)NULL);
 | 
			
		||||
@@ -111,7 +112,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
 | 
			
		||||
    AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
 | 
			
		||||
    DeviceBytes   -=AccCache.bytes;
 | 
			
		||||
    LRUremove(AccCache);
 | 
			
		||||
    //    dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);  
 | 
			
		||||
    dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);  
 | 
			
		||||
  }
 | 
			
		||||
  uint64_t CpuPtr = AccCache.CpuPtr;
 | 
			
		||||
  EntryErase(CpuPtr);
 | 
			
		||||
@@ -125,7 +126,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  assert(AccCache.state!=Empty);
 | 
			
		||||
  
 | 
			
		||||
  //  dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr); 
 | 
			
		||||
  dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr); 
 | 
			
		||||
  assert(AccCache.accLock==0);
 | 
			
		||||
  assert(AccCache.cpuLock==0);
 | 
			
		||||
  if(AccCache.state==AccDirty) {
 | 
			
		||||
@@ -136,7 +137,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
 | 
			
		||||
    AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
 | 
			
		||||
    DeviceBytes   -=AccCache.bytes;
 | 
			
		||||
    LRUremove(AccCache);
 | 
			
		||||
    //    dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);  
 | 
			
		||||
    dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);  
 | 
			
		||||
  }
 | 
			
		||||
  uint64_t CpuPtr = AccCache.CpuPtr;
 | 
			
		||||
  EntryErase(CpuPtr);
 | 
			
		||||
@@ -149,7 +150,7 @@ void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
 | 
			
		||||
  assert(AccCache.AccPtr!=(uint64_t)NULL);
 | 
			
		||||
  assert(AccCache.CpuPtr!=(uint64_t)NULL);
 | 
			
		||||
  acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
 | 
			
		||||
  //  dprintf("MemoryManager: Flush  %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
 | 
			
		||||
  dprintf("MemoryManager: Flush  %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
 | 
			
		||||
  DeviceToHostBytes+=AccCache.bytes;
 | 
			
		||||
  DeviceToHostXfer++;
 | 
			
		||||
  AccCache.state=Consistent;
 | 
			
		||||
@@ -164,7 +165,7 @@ void MemoryManager::Clone(AcceleratorViewEntry &AccCache)
 | 
			
		||||
    AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
 | 
			
		||||
    DeviceBytes+=AccCache.bytes;
 | 
			
		||||
  }
 | 
			
		||||
  //  dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
 | 
			
		||||
  dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
 | 
			
		||||
  acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
 | 
			
		||||
  HostToDeviceBytes+=AccCache.bytes;
 | 
			
		||||
  HostToDeviceXfer++;
 | 
			
		||||
@@ -227,18 +228,24 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
 | 
			
		||||
  // Find if present, otherwise get or force an empty
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  if ( EntryPresent(CpuPtr)==0 ){
 | 
			
		||||
    EvictVictims(bytes);
 | 
			
		||||
    EntryCreate(CpuPtr,bytes,mode,hint);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  auto AccCacheIterator = EntryLookup(CpuPtr);
 | 
			
		||||
  auto & AccCache = AccCacheIterator->second;
 | 
			
		||||
  
 | 
			
		||||
  if (!AccCache.AccPtr) {
 | 
			
		||||
    EvictVictims(bytes); 
 | 
			
		||||
  } 
 | 
			
		||||
  assert((mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard));
 | 
			
		||||
 | 
			
		||||
  assert(AccCache.cpuLock==0);  // Programming error
 | 
			
		||||
 | 
			
		||||
  if(AccCache.state!=Empty) {
 | 
			
		||||
    dprintf("ViewOpen found entry %llx %llx : %lld %lld\n",
 | 
			
		||||
		    (uint64_t)AccCache.CpuPtr,
 | 
			
		||||
		    (uint64_t)CpuPtr,
 | 
			
		||||
		    (uint64_t)AccCache.bytes,
 | 
			
		||||
		    (uint64_t)bytes);
 | 
			
		||||
    assert(AccCache.CpuPtr == CpuPtr);
 | 
			
		||||
    assert(AccCache.bytes  ==bytes);
 | 
			
		||||
  }
 | 
			
		||||
@@ -285,21 +292,21 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
 | 
			
		||||
      AccCache.state  = Consistent; // CpuDirty + AccRead => Consistent
 | 
			
		||||
    }
 | 
			
		||||
    AccCache.accLock++;
 | 
			
		||||
    //    printf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
 | 
			
		||||
    dprintf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
 | 
			
		||||
  } else if(AccCache.state==Consistent) {
 | 
			
		||||
    if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
 | 
			
		||||
      AccCache.state  = AccDirty;   // Consistent + AcceleratorWrite=> AccDirty
 | 
			
		||||
    else
 | 
			
		||||
      AccCache.state  = Consistent; // Consistent + AccRead => Consistent
 | 
			
		||||
    AccCache.accLock++;
 | 
			
		||||
    //    printf("Consistent entry into device accLock %d\n",AccCache.accLock);
 | 
			
		||||
    dprintf("Consistent entry into device accLock %d\n",AccCache.accLock);
 | 
			
		||||
  } else if(AccCache.state==AccDirty) {
 | 
			
		||||
    if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
 | 
			
		||||
      AccCache.state  = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
 | 
			
		||||
    else
 | 
			
		||||
      AccCache.state  = AccDirty; // AccDirty + AccRead => AccDirty
 | 
			
		||||
    AccCache.accLock++;
 | 
			
		||||
    //    printf("AccDirty entry into device accLock %d\n",AccCache.accLock);
 | 
			
		||||
    dprintf("AccDirty entry into device accLock %d\n",AccCache.accLock);
 | 
			
		||||
  } else {
 | 
			
		||||
    assert(0);
 | 
			
		||||
  }
 | 
			
		||||
@@ -361,13 +368,16 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V
 | 
			
		||||
  // Find if present, otherwise get or force an empty
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  if ( EntryPresent(CpuPtr)==0 ){
 | 
			
		||||
    EvictVictims(bytes);
 | 
			
		||||
    EntryCreate(CpuPtr,bytes,mode,transient);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  auto AccCacheIterator = EntryLookup(CpuPtr);
 | 
			
		||||
  auto & AccCache = AccCacheIterator->second;
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  if (!AccCache.AccPtr) {
 | 
			
		||||
     EvictVictims(bytes);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  assert((mode==CpuRead)||(mode==CpuWrite));
 | 
			
		||||
  assert(AccCache.accLock==0);  // Programming error
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,7 +1,6 @@
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#ifdef GRID_UVM
 | 
			
		||||
 | 
			
		||||
#warning "Grid is assuming unified virtual memory address space"
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
/////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// View management is 1:1 address space mapping
 | 
			
		||||
 
 | 
			
		||||
@@ -36,7 +36,7 @@ static const int CbBlack=1;
 | 
			
		||||
static const int Even   =CbRed;
 | 
			
		||||
static const int Odd    =CbBlack;
 | 
			
		||||
 | 
			
		||||
accelerator_inline int RedBlackCheckerBoardFromOindex (int oindex, Coordinate &rdim, Coordinate &chk_dim_msk)
 | 
			
		||||
accelerator_inline int RedBlackCheckerBoardFromOindex (int oindex,const Coordinate &rdim,const Coordinate &chk_dim_msk)
 | 
			
		||||
{
 | 
			
		||||
  int nd=rdim.size();
 | 
			
		||||
  Coordinate coor(nd);
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,3 @@
 | 
			
		||||
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
@@ -108,6 +107,8 @@ public:
 | 
			
		||||
  ////////////////////////////////////////////////////////////
 | 
			
		||||
  // Reduction
 | 
			
		||||
  ////////////////////////////////////////////////////////////
 | 
			
		||||
  void GlobalMax(RealD &);
 | 
			
		||||
  void GlobalMax(RealF &);
 | 
			
		||||
  void GlobalSum(RealF &);
 | 
			
		||||
  void GlobalSumVector(RealF *,int N);
 | 
			
		||||
  void GlobalSum(RealD &);
 | 
			
		||||
 
 | 
			
		||||
@@ -275,6 +275,16 @@ void CartesianCommunicator::GlobalXOR(uint64_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalMax(float &f)
 | 
			
		||||
{
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_MAX,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalMax(double &d)
 | 
			
		||||
{
 | 
			
		||||
  int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_MAX,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(float &f){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
 
 | 
			
		||||
@@ -67,6 +67,8 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::~CartesianCommunicator(){}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::GlobalMax(float &){}
 | 
			
		||||
void CartesianCommunicator::GlobalMax(double &){}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(float &){}
 | 
			
		||||
void CartesianCommunicator::GlobalSumVector(float *,int N){}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(double &){}
 | 
			
		||||
 
 | 
			
		||||
@@ -102,7 +102,7 @@ public:
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
  static void SharedMemoryAllocate(uint64_t bytes, int flags);
 | 
			
		||||
  static void SharedMemoryFree(void);
 | 
			
		||||
  static void SharedMemoryCopy(void *dest,const void *src,size_t bytes);
 | 
			
		||||
  static void SharedMemoryCopy(void *dest,void *src,size_t bytes);
 | 
			
		||||
  static void SharedMemoryZero(void *dest,size_t bytes);
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -666,7 +666,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
#endif
 | 
			
		||||
      void * ptr =  mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0);
 | 
			
		||||
      
 | 
			
		||||
      //      std::cout << "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< size<< "bytes)"<<std::endl;
 | 
			
		||||
      if ( ptr == (void * )MAP_FAILED ) {       
 | 
			
		||||
	perror("failed mmap");     
 | 
			
		||||
	assert(0);    
 | 
			
		||||
@@ -716,7 +715,7 @@ void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
 | 
			
		||||
  bzero(dest,bytes);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryCopy(void *dest,const void *src,size_t bytes)
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
 | 
			
		||||
{
 | 
			
		||||
#ifdef GRID_CUDA
 | 
			
		||||
  cudaMemcpy(dest,src,bytes,cudaMemcpyDefault);
 | 
			
		||||
@@ -772,11 +771,13 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
 | 
			
		||||
  std::vector<int> ranks(size);   for(int r=0;r<size;r++) ranks[r]=r;
 | 
			
		||||
  MPI_Group_translate_ranks (FullGroup,size,&ranks[0],ShmGroup, &ShmRanks[0]); 
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_SHM_DISABLE
 | 
			
		||||
#ifdef GRID_SHM_FORCE_MPI
 | 
			
		||||
  // Hide the shared memory path between ranks
 | 
			
		||||
  {
 | 
			
		||||
    for(int r=0;r<size;r++){
 | 
			
		||||
      ShmRanks[r] = MPI_UNDEFINED;
 | 
			
		||||
      if ( r!=rank ) {
 | 
			
		||||
	ShmRanks[r] = MPI_UNDEFINED;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -29,6 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid); 
 | 
			
		||||
#define header "SharedMemoryNone: "
 | 
			
		||||
 | 
			
		||||
/*Construct from an MPI communicator*/
 | 
			
		||||
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
 | 
			
		||||
@@ -55,6 +56,38 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Hugetlbfs mapping intended, use anonymous mmap
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#if 1
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
{
 | 
			
		||||
  std::cout << header "SharedMemoryAllocate "<< bytes<< " GPU implementation "<<std::endl;
 | 
			
		||||
  void * ShmCommBuf ; 
 | 
			
		||||
  assert(_ShmSetup==1);
 | 
			
		||||
  assert(_ShmAlloc==0);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Each MPI rank should allocate our own buffer
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  ShmCommBuf = acceleratorAllocDevice(bytes);
 | 
			
		||||
 | 
			
		||||
  if (ShmCommBuf == (void *)NULL ) {
 | 
			
		||||
    std::cerr << " SharedMemoryNone.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
 | 
			
		||||
    exit(EXIT_FAILURE);  
 | 
			
		||||
  }
 | 
			
		||||
  if ( WorldRank == 0 ){
 | 
			
		||||
    std::cout << WorldRank << header " SharedMemoryNone.cc acceleratorAllocDevice "<< bytes 
 | 
			
		||||
	      << "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  SharedMemoryZero(ShmCommBuf,bytes);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Loop over ranks/gpu's on our node
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  WorldShmCommBufs[0] = ShmCommBuf;
 | 
			
		||||
 | 
			
		||||
  _ShmAllocBytes=bytes;
 | 
			
		||||
  _ShmAlloc=1;
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
{
 | 
			
		||||
  void * ShmCommBuf ; 
 | 
			
		||||
@@ -83,7 +116,15 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
  _ShmAllocBytes=bytes;
 | 
			
		||||
  _ShmAlloc=1;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  acceleratorMemSet(dest,0,bytes);
 | 
			
		||||
}
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  acceleratorCopyToDevice(src,dest,bytes);
 | 
			
		||||
}
 | 
			
		||||
////////////////////////////////////////////////////////
 | 
			
		||||
// Global shared functionality finished
 | 
			
		||||
// Now move to per communicator functionality
 | 
			
		||||
 
 | 
			
		||||
@@ -62,7 +62,7 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
 | 
			
		||||
    basis_v.push_back(basis[k].View(AcceleratorWrite));
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#if ( (!defined(GRID_SYCL)) && (!defined(GRID_CUDA)) )
 | 
			
		||||
#if ( (!defined(GRID_CUDA)) )
 | 
			
		||||
  int max_threads = thread_max();
 | 
			
		||||
  Vector < vobj > Bt(Nm * max_threads);
 | 
			
		||||
  thread_region
 | 
			
		||||
@@ -164,7 +164,8 @@ void basisRotateJ(Field &result,std::vector<Field> &basis,Eigen::MatrixXd& Qt,in
 | 
			
		||||
  auto basis_vp=& basis_v[0];
 | 
			
		||||
  autoView(result_v,result,AcceleratorWrite);
 | 
			
		||||
  accelerator_for(ss, grid->oSites(),vobj::Nsimd(),{
 | 
			
		||||
    auto B=coalescedRead(zz);
 | 
			
		||||
    vobj zzz=Zero();
 | 
			
		||||
    auto B=coalescedRead(zzz);
 | 
			
		||||
    for(int k=k0; k<k1; ++k){
 | 
			
		||||
      B +=Qt_j[k] * coalescedRead(basis_vp[k][ss]);
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -96,8 +96,34 @@ inline typename vobj::scalar_objectD sumD_cpu(const vobj *arg, Integer osites)
 | 
			
		||||
  ssobj ret = ssum;
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
/*
 | 
			
		||||
Threaded max, don't use for now
 | 
			
		||||
template<class Double>
 | 
			
		||||
inline Double max(const Double *arg, Integer osites)
 | 
			
		||||
{
 | 
			
		||||
  //  const int Nsimd = vobj::Nsimd();
 | 
			
		||||
  const int nthread = GridThread::GetThreads();
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  std::vector<Double> maxarray(nthread);
 | 
			
		||||
  
 | 
			
		||||
  thread_for(thr,nthread, {
 | 
			
		||||
    int nwork, mywork, myoff;
 | 
			
		||||
    nwork = osites;
 | 
			
		||||
    GridThread::GetWork(nwork,thr,mywork,myoff);
 | 
			
		||||
    Double max=arg[0];
 | 
			
		||||
    for(int ss=myoff;ss<mywork+myoff; ss++){
 | 
			
		||||
      if( arg[ss] > max ) max = arg[ss];
 | 
			
		||||
    }
 | 
			
		||||
    maxarray[thr]=max;
 | 
			
		||||
  });
 | 
			
		||||
  
 | 
			
		||||
  Double tmax=maxarray[0];
 | 
			
		||||
  for(int i=0;i<nthread;i++){
 | 
			
		||||
    if (maxarray[i]>tmax) tmax = maxarray[i];
 | 
			
		||||
  } 
 | 
			
		||||
  return tmax;
 | 
			
		||||
}
 | 
			
		||||
*/
 | 
			
		||||
template<class vobj>
 | 
			
		||||
inline typename vobj::scalar_object sum(const vobj *arg, Integer osites)
 | 
			
		||||
{
 | 
			
		||||
@@ -141,6 +167,32 @@ template<class vobj> inline RealD norm2(const Lattice<vobj> &arg){
 | 
			
		||||
  return real(nrm); 
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//The global maximum of the site norm2
 | 
			
		||||
template<class vobj> inline RealD maxLocalNorm2(const Lattice<vobj> &arg)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vobj::tensor_reduced vscalar;  //iScalar<iScalar<.... <vPODtype> > >
 | 
			
		||||
  typedef typename vscalar::scalar_object  scalar;   //iScalar<iScalar<.... <PODtype> > >
 | 
			
		||||
 | 
			
		||||
  Lattice<vscalar> inner = localNorm2(arg);
 | 
			
		||||
 | 
			
		||||
  auto grid = arg.Grid();
 | 
			
		||||
 | 
			
		||||
  RealD max;
 | 
			
		||||
  for(int l=0;l<grid->lSites();l++){
 | 
			
		||||
    Coordinate coor;
 | 
			
		||||
    scalar val;
 | 
			
		||||
    RealD r;
 | 
			
		||||
    grid->LocalIndexToLocalCoor(l,coor);
 | 
			
		||||
    peekLocalSite(val,inner,coor);
 | 
			
		||||
    r=real(TensorRemove(val));
 | 
			
		||||
    if( (l==0) || (r>max)){
 | 
			
		||||
      max=r;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  grid->GlobalMax(max);
 | 
			
		||||
  return max;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Double inner product
 | 
			
		||||
template<class vobj>
 | 
			
		||||
inline ComplexD rankInnerProduct(const Lattice<vobj> &left,const Lattice<vobj> &right)
 | 
			
		||||
 
 | 
			
		||||
@@ -127,6 +127,11 @@ accelerator_inline void convertType(T1 & out, const iScalar<T2> & in) {
 | 
			
		||||
  convertType(out,in._internal);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<typename T1, typename std::enable_if<!isGridScalar<T1>::value, T1>::type* = nullptr>
 | 
			
		||||
accelerator_inline void convertType(T1 & out, const iScalar<T1> & in) {
 | 
			
		||||
  convertType(out,in._internal);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<typename T1,typename T2>
 | 
			
		||||
accelerator_inline void convertType(iScalar<T1> & out, const T2 & in) {
 | 
			
		||||
  convertType(out._internal,in);
 | 
			
		||||
 
 | 
			
		||||
@@ -67,8 +67,13 @@ public:
 | 
			
		||||
  accelerator_inline const vobj & operator()(size_t i) const { return this->_odata[i]; }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; };
 | 
			
		||||
  accelerator_inline vobj       & operator[](size_t i)       { return this->_odata[i]; };
 | 
			
		||||
#if 1
 | 
			
		||||
  //  accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; };
 | 
			
		||||
  accelerator_inline vobj       & operator[](size_t i) const { return this->_odata[i]; };
 | 
			
		||||
#else
 | 
			
		||||
  //  accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; };
 | 
			
		||||
  //  accelerator_inline vobj       & operator[](size_t i)       { return this->_odata[i]; };
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  accelerator_inline uint64_t begin(void) const { return 0;};
 | 
			
		||||
  accelerator_inline uint64_t end(void)   const { return this->_odata_size; };
 | 
			
		||||
 
 | 
			
		||||
@@ -123,7 +123,7 @@ assert(GRID_FIELD_NORM_CALC(FieldNormMetaData_, n2ck) < 1.0e-5);
 | 
			
		||||
 ////////////////////////////////////////////////////////////
 | 
			
		||||
 // Helper to fill out metadata
 | 
			
		||||
 ////////////////////////////////////////////////////////////
 | 
			
		||||
 template<class vobj> void ScidacMetaData(Lattice<vobj> & field,
 | 
			
		||||
template<class vobj> void ScidacMetaData(Lattice<vobj> & field,
 | 
			
		||||
					  FieldMetaData &header,
 | 
			
		||||
					  scidacRecord & _scidacRecord,
 | 
			
		||||
					  scidacFile   & _scidacFile) 
 | 
			
		||||
@@ -619,12 +619,12 @@ class IldgWriter : public ScidacWriter {
 | 
			
		||||
  // Don't require scidac records EXCEPT checksum
 | 
			
		||||
  // Use Grid MetaData object if present.
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  template <class vsimd>
 | 
			
		||||
  void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,int sequence,std::string LFN,std::string description) 
 | 
			
		||||
  template <class stats = PeriodicGaugeStatistics>
 | 
			
		||||
  void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,int sequence,std::string LFN,std::string description) 
 | 
			
		||||
  {
 | 
			
		||||
    GridBase * grid = Umu.Grid();
 | 
			
		||||
    typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
 | 
			
		||||
    typedef iLorentzColourMatrix<vsimd> vobj;
 | 
			
		||||
    typedef Lattice<vLorentzColourMatrixD> GaugeField;
 | 
			
		||||
    typedef vLorentzColourMatrixD vobj;
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////
 | 
			
		||||
@@ -636,6 +636,9 @@ class IldgWriter : public ScidacWriter {
 | 
			
		||||
 | 
			
		||||
    ScidacMetaData(Umu,header,_scidacRecord,_scidacFile);
 | 
			
		||||
 | 
			
		||||
    stats Stats;
 | 
			
		||||
    Stats(Umu,header);
 | 
			
		||||
    
 | 
			
		||||
    std::string format = header.floating_point;
 | 
			
		||||
    header.ensemble_id    = description;
 | 
			
		||||
    header.ensemble_label = description;
 | 
			
		||||
@@ -705,10 +708,10 @@ class IldgReader : public GridLimeReader {
 | 
			
		||||
  // Else use ILDG MetaData object if present.
 | 
			
		||||
  // Else use SciDAC MetaData object if present.
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  template <class vsimd>
 | 
			
		||||
  void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, FieldMetaData &FieldMetaData_) {
 | 
			
		||||
  template <class stats = PeriodicGaugeStatistics>
 | 
			
		||||
  void readConfiguration(Lattice<vLorentzColourMatrixD> &Umu, FieldMetaData &FieldMetaData_) {
 | 
			
		||||
 | 
			
		||||
    typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
 | 
			
		||||
    typedef Lattice<vLorentzColourMatrixD > GaugeField;
 | 
			
		||||
    typedef typename GaugeField::vector_object  vobj;
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
 | 
			
		||||
@@ -921,7 +924,8 @@ class IldgReader : public GridLimeReader {
 | 
			
		||||
 | 
			
		||||
    if ( found_FieldMetaData || found_usqcdInfo ) {
 | 
			
		||||
      FieldMetaData checker;
 | 
			
		||||
      GaugeStatistics(Umu,checker);
 | 
			
		||||
      stats Stats;
 | 
			
		||||
      Stats(Umu,checker);
 | 
			
		||||
      assert(fabs(checker.plaquette  - FieldMetaData_.plaquette )<1.0e-5);
 | 
			
		||||
      assert(fabs(checker.link_trace - FieldMetaData_.link_trace)<1.0e-5);
 | 
			
		||||
      std::cout << GridLogMessage<<"Plaquette and link trace match " << std::endl;
 | 
			
		||||
 
 | 
			
		||||
@@ -176,29 +176,18 @@ template<class vobj> inline void PrepareMetaData(Lattice<vobj> & field, FieldMet
 | 
			
		||||
  GridMetaData(grid,header); 
 | 
			
		||||
  MachineCharacteristics(header);
 | 
			
		||||
}
 | 
			
		||||
inline void GaugeStatistics(Lattice<vLorentzColourMatrixF> & data,FieldMetaData &header)
 | 
			
		||||
template<class Impl>
 | 
			
		||||
class GaugeStatistics
 | 
			
		||||
{
 | 
			
		||||
  // How to convert data precision etc...
 | 
			
		||||
  header.link_trace=WilsonLoops<PeriodicGimplF>::linkTrace(data);
 | 
			
		||||
  header.plaquette =WilsonLoops<PeriodicGimplF>::avgPlaquette(data);
 | 
			
		||||
}
 | 
			
		||||
inline void GaugeStatistics(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header)
 | 
			
		||||
{
 | 
			
		||||
  // How to convert data precision etc...
 | 
			
		||||
  header.link_trace=WilsonLoops<PeriodicGimplD>::linkTrace(data);
 | 
			
		||||
  header.plaquette =WilsonLoops<PeriodicGimplD>::avgPlaquette(data);
 | 
			
		||||
}
 | 
			
		||||
template<> inline void PrepareMetaData<vLorentzColourMatrixF>(Lattice<vLorentzColourMatrixF> & field, FieldMetaData &header)
 | 
			
		||||
{
 | 
			
		||||
   
 | 
			
		||||
  GridBase *grid = field.Grid();
 | 
			
		||||
  std::string format = getFormatString<vLorentzColourMatrixF>();
 | 
			
		||||
  header.floating_point = format;
 | 
			
		||||
  header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac
 | 
			
		||||
  GridMetaData(grid,header); 
 | 
			
		||||
  GaugeStatistics(field,header);
 | 
			
		||||
  MachineCharacteristics(header);
 | 
			
		||||
}
 | 
			
		||||
public:
 | 
			
		||||
  void operator()(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header)
 | 
			
		||||
  {
 | 
			
		||||
    header.link_trace=WilsonLoops<Impl>::linkTrace(data);
 | 
			
		||||
    header.plaquette =WilsonLoops<Impl>::avgPlaquette(data);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
typedef GaugeStatistics<PeriodicGimplD> PeriodicGaugeStatistics;
 | 
			
		||||
typedef GaugeStatistics<ConjugateGimplD> ConjugateGaugeStatistics;
 | 
			
		||||
template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzColourMatrixD> & field, FieldMetaData &header)
 | 
			
		||||
{
 | 
			
		||||
  GridBase *grid = field.Grid();
 | 
			
		||||
@@ -206,7 +195,6 @@ template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzCo
 | 
			
		||||
  header.floating_point = format;
 | 
			
		||||
  header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac
 | 
			
		||||
  GridMetaData(grid,header); 
 | 
			
		||||
  GaugeStatistics(field,header);
 | 
			
		||||
  MachineCharacteristics(header);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -40,6 +40,8 @@ using namespace Grid;
 | 
			
		||||
class NerscIO : public BinaryIO { 
 | 
			
		||||
public:
 | 
			
		||||
 | 
			
		||||
  typedef Lattice<vLorentzColourMatrixD> GaugeField;
 | 
			
		||||
 | 
			
		||||
  static inline void truncate(std::string file){
 | 
			
		||||
    std::ofstream fout(file,std::ios::out);
 | 
			
		||||
  }
 | 
			
		||||
@@ -129,12 +131,12 @@ public:
 | 
			
		||||
  // Now the meat: the object readers
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
  template<class vsimd>
 | 
			
		||||
  static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
 | 
			
		||||
  template<class GaugeStats=PeriodicGaugeStatistics>
 | 
			
		||||
  static inline void readConfiguration(GaugeField &Umu,
 | 
			
		||||
				       FieldMetaData& header,
 | 
			
		||||
				       std::string file)
 | 
			
		||||
				       std::string file,
 | 
			
		||||
				       GaugeStats GaugeStatisticsCalculator=GaugeStats())
 | 
			
		||||
  {
 | 
			
		||||
    typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
 | 
			
		||||
 | 
			
		||||
    GridBase *grid = Umu.Grid();
 | 
			
		||||
    uint64_t offset = readHeader(file,Umu.Grid(),header);
 | 
			
		||||
@@ -153,23 +155,23 @@ public:
 | 
			
		||||
    // munger is a function of <floating point, Real, data_type>
 | 
			
		||||
    if ( header.data_type == std::string("4D_SU3_GAUGE") ) {
 | 
			
		||||
      if ( ieee32 || ieee32big ) {
 | 
			
		||||
	BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3F> 
 | 
			
		||||
	BinaryIO::readLatticeObject<vLorentzColourMatrixD, LorentzColour2x3F> 
 | 
			
		||||
	  (Umu,file,Gauge3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format,
 | 
			
		||||
	   nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
      }
 | 
			
		||||
      if ( ieee64 || ieee64big ) {
 | 
			
		||||
	BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3D> 
 | 
			
		||||
	BinaryIO::readLatticeObject<vLorentzColourMatrixD, LorentzColour2x3D> 
 | 
			
		||||
	  (Umu,file,Gauge3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format,
 | 
			
		||||
	   nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
      }
 | 
			
		||||
    } else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) {
 | 
			
		||||
      if ( ieee32 || ieee32big ) {
 | 
			
		||||
	BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
 | 
			
		||||
	BinaryIO::readLatticeObject<vLorentzColourMatrixD,LorentzColourMatrixF>
 | 
			
		||||
	  (Umu,file,GaugeSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format,
 | 
			
		||||
	   nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
      }
 | 
			
		||||
      if ( ieee64 || ieee64big ) {
 | 
			
		||||
	BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
 | 
			
		||||
	BinaryIO::readLatticeObject<vLorentzColourMatrixD,LorentzColourMatrixD>
 | 
			
		||||
	  (Umu,file,GaugeSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format,
 | 
			
		||||
	   nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
      }
 | 
			
		||||
@@ -177,7 +179,7 @@ public:
 | 
			
		||||
      assert(0);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    GaugeStatistics(Umu,clone);
 | 
			
		||||
    GaugeStats Stats; Stats(Umu,clone);
 | 
			
		||||
 | 
			
		||||
    std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<<nersc_csum<< std::dec
 | 
			
		||||
	     <<" header   "<<std::hex<<header.checksum<<std::dec <<std::endl;
 | 
			
		||||
@@ -203,15 +205,13 @@ public:
 | 
			
		||||
    std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class vsimd>
 | 
			
		||||
  static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
 | 
			
		||||
  template<class GaugeStats=PeriodicGaugeStatistics>
 | 
			
		||||
  static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,
 | 
			
		||||
					std::string file, 
 | 
			
		||||
					int two_row,
 | 
			
		||||
					int bits32)
 | 
			
		||||
  {
 | 
			
		||||
    typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
 | 
			
		||||
 | 
			
		||||
    typedef iLorentzColourMatrix<vsimd> vobj;
 | 
			
		||||
    typedef vLorentzColourMatrixD vobj;
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
 | 
			
		||||
    FieldMetaData header;
 | 
			
		||||
@@ -229,7 +229,7 @@ public:
 | 
			
		||||
 | 
			
		||||
    GridMetaData(grid,header);
 | 
			
		||||
    assert(header.nd==4);
 | 
			
		||||
    GaugeStatistics(Umu,header);
 | 
			
		||||
    GaugeStats Stats; Stats(Umu,header);
 | 
			
		||||
    MachineCharacteristics(header);
 | 
			
		||||
 | 
			
		||||
	uint64_t offset;
 | 
			
		||||
@@ -238,19 +238,19 @@ public:
 | 
			
		||||
    header.floating_point = std::string("IEEE64BIG");
 | 
			
		||||
    header.data_type      = std::string("4D_SU3_GAUGE_3x3");
 | 
			
		||||
    GaugeSimpleUnmunger<fobj3D,sobj> munge;
 | 
			
		||||
	if ( grid->IsBoss() ) { 
 | 
			
		||||
	  truncate(file);
 | 
			
		||||
    offset = writeHeader(header,file);
 | 
			
		||||
	}
 | 
			
		||||
	grid->Broadcast(0,(void *)&offset,sizeof(offset));
 | 
			
		||||
    if ( grid->IsBoss() ) { 
 | 
			
		||||
      truncate(file);
 | 
			
		||||
      offset = writeHeader(header,file);
 | 
			
		||||
    }
 | 
			
		||||
    grid->Broadcast(0,(void *)&offset,sizeof(offset));
 | 
			
		||||
 | 
			
		||||
    uint32_t nersc_csum,scidac_csuma,scidac_csumb;
 | 
			
		||||
    BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point,
 | 
			
		||||
					      nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
    header.checksum = nersc_csum;
 | 
			
		||||
	if ( grid->IsBoss() ) { 
 | 
			
		||||
    writeHeader(header,file);
 | 
			
		||||
	}
 | 
			
		||||
    if ( grid->IsBoss() ) { 
 | 
			
		||||
      writeHeader(header,file);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum "
 | 
			
		||||
	     <<std::hex<<header.checksum
 | 
			
		||||
 
 | 
			
		||||
@@ -154,7 +154,7 @@ public:
 | 
			
		||||
    grid->Barrier(); timer.Stop();
 | 
			
		||||
    std::cout << Grid::GridLogMessage << "OpenQcdIO::readConfiguration: redistribute overhead " << timer.Elapsed() << std::endl;
 | 
			
		||||
 | 
			
		||||
    GaugeStatistics(Umu, clone);
 | 
			
		||||
    PeriodicGaugeStatistics Stats; Stats(Umu, clone);
 | 
			
		||||
 | 
			
		||||
    RealD plaq_diff = fabs(clone.plaquette - header.plaquette);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -208,7 +208,7 @@ public:
 | 
			
		||||
 | 
			
		||||
    FieldMetaData clone(header);
 | 
			
		||||
 | 
			
		||||
    GaugeStatistics(Umu, clone);
 | 
			
		||||
    PeriodicGaugeStatistics Stats; Stats(Umu, clone);
 | 
			
		||||
 | 
			
		||||
    RealD plaq_diff = fabs(clone.plaquette - header.plaquette);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -80,6 +80,13 @@ template<typename T> struct isSpinor {
 | 
			
		||||
template <typename T> using IfSpinor    = Invoke<std::enable_if< isSpinor<T>::value,int> > ;
 | 
			
		||||
template <typename T> using IfNotSpinor = Invoke<std::enable_if<!isSpinor<T>::value,int> > ;
 | 
			
		||||
 | 
			
		||||
const int CoarseIndex = 4;
 | 
			
		||||
template<typename T> struct isCoarsened {
 | 
			
		||||
   static constexpr bool value = (CoarseIndex<=T::TensorLevel);
 | 
			
		||||
};
 | 
			
		||||
template <typename T> using IfCoarsened    = Invoke<std::enable_if< isCoarsened<T>::value,int> > ;
 | 
			
		||||
template <typename T> using IfNotCoarsened = Invoke<std::enable_if<!isCoarsened<T>::value,int> > ;
 | 
			
		||||
 | 
			
		||||
// ChrisK very keen to add extra space for Gparity doubling.
 | 
			
		||||
//
 | 
			
		||||
// Also add domain wall index, in a way where Wilson operator 
 | 
			
		||||
 
 | 
			
		||||
@@ -88,7 +88,7 @@ public:
 | 
			
		||||
					  const _Spinor &chi, 
 | 
			
		||||
					  int mu, 
 | 
			
		||||
					  StencilEntry *SE,
 | 
			
		||||
					  StencilView &St) 
 | 
			
		||||
					  const StencilView &St) 
 | 
			
		||||
  {
 | 
			
		||||
    int direction = St._directions[mu];
 | 
			
		||||
    int distance  = St._distances[mu];
 | 
			
		||||
@@ -97,42 +97,30 @@ public:
 | 
			
		||||
    Coordinate icoor;
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_SIMT
 | 
			
		||||
    _Spinor tmp;
 | 
			
		||||
 | 
			
		||||
    const int Nsimd =SiteDoubledGaugeField::Nsimd();
 | 
			
		||||
    int s = acceleratorSIMTlane(Nsimd);
 | 
			
		||||
    St.iCoorFromIindex(icoor,s);
 | 
			
		||||
 | 
			
		||||
    int mmu = mu % Nd;
 | 
			
		||||
    if ( SE->_around_the_world && St.parameters.twists[mmu] ) {
 | 
			
		||||
      
 | 
			
		||||
      int permute_lane = (sl==1) 
 | 
			
		||||
    	|| ((distance== 1)&&(icoor[direction]==1))
 | 
			
		||||
	|| ((distance==-1)&&(icoor[direction]==0));
 | 
			
		||||
 | 
			
		||||
      if ( permute_lane ) { 
 | 
			
		||||
	tmp(0) = chi(1);
 | 
			
		||||
	tmp(1) = chi(0);
 | 
			
		||||
      } else {
 | 
			
		||||
	tmp(0) = chi(0);
 | 
			
		||||
	tmp(1) = chi(1);
 | 
			
		||||
      }
 | 
			
		||||
    auto UU0=coalescedRead(U(0)(mu));
 | 
			
		||||
    auto UU1=coalescedRead(U(1)(mu));
 | 
			
		||||
    
 | 
			
		||||
    //Decide whether we do a G-parity flavor twist
 | 
			
		||||
    //Note: this assumes (but does not check) that sl==1 || sl==2 i.e. max 2 SIMD lanes in G-parity dir
 | 
			
		||||
    //It also assumes (but does not check) that abs(distance) == 1
 | 
			
		||||
    int permute_lane = (sl==1) 
 | 
			
		||||
    || ((distance== 1)&&(icoor[direction]==1))
 | 
			
		||||
    || ((distance==-1)&&(icoor[direction]==0));
 | 
			
		||||
 | 
			
		||||
      auto UU0=coalescedRead(U(0)(mu));
 | 
			
		||||
      auto UU1=coalescedRead(U(1)(mu));
 | 
			
		||||
    permute_lane = permute_lane && SE->_around_the_world && St.parameters.twists[mmu]; //only if we are going around the world
 | 
			
		||||
 | 
			
		||||
      mult(&phi(0),&UU0,&tmp(0));
 | 
			
		||||
      mult(&phi(1),&UU1,&tmp(1));
 | 
			
		||||
    //Apply the links
 | 
			
		||||
    int f_upper = permute_lane ? 1 : 0;
 | 
			
		||||
    int f_lower = !f_upper;
 | 
			
		||||
 | 
			
		||||
    } else {
 | 
			
		||||
 | 
			
		||||
      auto UU0=coalescedRead(U(0)(mu));
 | 
			
		||||
      auto UU1=coalescedRead(U(1)(mu));
 | 
			
		||||
 | 
			
		||||
      mult(&phi(0),&UU0,&chi(0));
 | 
			
		||||
      mult(&phi(1),&UU1,&chi(1));
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
    mult(&phi(0),&UU0,&chi(f_upper));
 | 
			
		||||
    mult(&phi(1),&UU1,&chi(f_lower));
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
    typedef _Spinor vobj;
 | 
			
		||||
 
 | 
			
		||||
@@ -85,7 +85,7 @@ class MADWF
 | 
			
		||||
      maxiter     =_maxiter;
 | 
			
		||||
    };
 | 
			
		||||
   
 | 
			
		||||
  void operator() (const FermionFieldo &src4,FermionFieldo &sol5)
 | 
			
		||||
  void operator() (const FermionFieldo &src,FermionFieldo &sol5)
 | 
			
		||||
  {
 | 
			
		||||
    std::cout << GridLogMessage<< " ************************************************" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage<< "  MADWF-like algorithm                           " << std::endl;
 | 
			
		||||
@@ -114,8 +114,16 @@ class MADWF
 | 
			
		||||
    ///////////////////////////////////////
 | 
			
		||||
    //Import source, include Dminus factors
 | 
			
		||||
    ///////////////////////////////////////
 | 
			
		||||
    Mato.ImportPhysicalFermionSource(src4,b); 
 | 
			
		||||
    std::cout << GridLogMessage << " src4 " <<norm2(src4)<<std::endl;
 | 
			
		||||
    GridBase *src_grid = src.Grid();
 | 
			
		||||
 | 
			
		||||
    assert( (src_grid == Mato.GaugeGrid()) || (src_grid == Mato.FermionGrid()));
 | 
			
		||||
 | 
			
		||||
    if ( src_grid == Mato.GaugeGrid() ) {
 | 
			
		||||
      Mato.ImportPhysicalFermionSource(src,b);
 | 
			
		||||
    } else {
 | 
			
		||||
      b=src;
 | 
			
		||||
    }
 | 
			
		||||
    std::cout << GridLogMessage << " src " <<norm2(src)<<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " b    " <<norm2(b)<<std::endl;
 | 
			
		||||
 | 
			
		||||
    defect = b;
 | 
			
		||||
 
 | 
			
		||||
@@ -56,8 +56,12 @@ template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , pub
 | 
			
		||||
		 DoubledGaugeField &U,
 | 
			
		||||
		 const FermionField &in, FermionField &out, int dag, int interior,int exterior);
 | 
			
		||||
  
 | 
			
		||||
  void DhopDirKernel(StencilImpl &st, DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, SiteSpinor * buf,
 | 
			
		||||
		     int sF, int sU, const FermionFieldView &in, FermionFieldView &out, int dir,int disp);
 | 
			
		||||
  void DhopDirKernel(StencilImpl &st,
 | 
			
		||||
		     const DoubledGaugeFieldView &U,
 | 
			
		||||
		     const DoubledGaugeFieldView &UUU, SiteSpinor * buf,
 | 
			
		||||
		     int sF, int sU,
 | 
			
		||||
		     const FermionFieldView &in,
 | 
			
		||||
		     const FermionFieldView &out, int dir,int disp);
 | 
			
		||||
 protected:    
 | 
			
		||||
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -65,53 +69,67 @@ template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , pub
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   template<int Naik> 
 | 
			
		||||
   static accelerator_inline
 | 
			
		||||
   void DhopSiteGeneric(StencilView &st, 
 | 
			
		||||
			DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, 
 | 
			
		||||
   void DhopSiteGeneric(const StencilView &st, 
 | 
			
		||||
			const DoubledGaugeFieldView &U,
 | 
			
		||||
			const DoubledGaugeFieldView &UUU, 
 | 
			
		||||
			SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
			const FermionFieldView &in,
 | 
			
		||||
			const FermionFieldView &out,int dag);
 | 
			
		||||
   
 | 
			
		||||
   template<int Naik> static accelerator_inline
 | 
			
		||||
   void DhopSiteGenericInt(StencilView &st, 
 | 
			
		||||
			   DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, 
 | 
			
		||||
   void DhopSiteGenericInt(const StencilView &st, 
 | 
			
		||||
			   const DoubledGaugeFieldView &U,
 | 
			
		||||
			   const DoubledGaugeFieldView &UUU, 
 | 
			
		||||
			   SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			   const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
			   const FermionFieldView &in,
 | 
			
		||||
			   const FermionFieldView &out,int dag);
 | 
			
		||||
   
 | 
			
		||||
   template<int Naik> static accelerator_inline
 | 
			
		||||
   void DhopSiteGenericExt(StencilView &st, 
 | 
			
		||||
			   DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
			   SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			   const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
   void DhopSiteGenericExt(const StencilView &st, 
 | 
			
		||||
			   const DoubledGaugeFieldView &U,
 | 
			
		||||
			   const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 			   SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			   const FermionFieldView &in,
 | 
			
		||||
			   const FermionFieldView &out,int dag);
 | 
			
		||||
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   // Nc=3 specific kernels
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   
 | 
			
		||||
   template<int Naik> static accelerator_inline
 | 
			
		||||
   void DhopSiteHand(StencilView &st, 
 | 
			
		||||
		     DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU, 
 | 
			
		||||
		     SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
		     const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
   void DhopSiteHand(const StencilView &st, 
 | 
			
		||||
		     const DoubledGaugeFieldView &U,
 | 
			
		||||
		     const DoubledGaugeFieldView &UUU, 
 | 
			
		||||
 		     SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
		     const FermionFieldView &in,
 | 
			
		||||
		     const FermionFieldView &out,int dag);
 | 
			
		||||
   
 | 
			
		||||
   template<int Naik> static accelerator_inline
 | 
			
		||||
   void DhopSiteHandInt(StencilView &st, 
 | 
			
		||||
			DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU, 
 | 
			
		||||
			SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
   void DhopSiteHandInt(const StencilView &st, 
 | 
			
		||||
			const DoubledGaugeFieldView &U,
 | 
			
		||||
			const DoubledGaugeFieldView &UUU, 
 | 
			
		||||
 			SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			const FermionFieldView &in,
 | 
			
		||||
			const FermionFieldView &out,int dag);
 | 
			
		||||
   
 | 
			
		||||
   template<int Naik> static accelerator_inline
 | 
			
		||||
   void DhopSiteHandExt(StencilView &st, 
 | 
			
		||||
			DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU, 
 | 
			
		||||
			SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
   void DhopSiteHandExt(const StencilView &st, 
 | 
			
		||||
			const DoubledGaugeFieldView &U,
 | 
			
		||||
			const DoubledGaugeFieldView &UUU, 
 | 
			
		||||
 			SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
			const FermionFieldView &in,
 | 
			
		||||
			const FermionFieldView &out,int dag);
 | 
			
		||||
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   // Asm Nc=3 specific kernels
 | 
			
		||||
   ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
   
 | 
			
		||||
   void DhopSiteAsm(StencilView &st, 
 | 
			
		||||
		    DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU, 
 | 
			
		||||
		    SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
		    const FermionFieldView &in, FermionFieldView &out,int dag);
 | 
			
		||||
   void DhopSiteAsm(const StencilView &st, 
 | 
			
		||||
		    const DoubledGaugeFieldView &U,
 | 
			
		||||
		    const DoubledGaugeFieldView &UUU, 
 | 
			
		||||
 		    SiteSpinor * buf, int LLs, int sU, 
 | 
			
		||||
		    const FermionFieldView &in,
 | 
			
		||||
		    const FermionFieldView &out,int dag);
 | 
			
		||||
  
 | 
			
		||||
public:
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -61,7 +61,7 @@ public:
 | 
			
		||||
  typedef typename SiteHalfSpinor::vector_type     vComplexHigh;
 | 
			
		||||
  constexpr static int Nw=sizeof(SiteHalfSpinor)/sizeof(vComplexHigh);
 | 
			
		||||
 | 
			
		||||
  accelerator_inline int CommDatumSize(void) {
 | 
			
		||||
  accelerator_inline int CommDatumSize(void) const {
 | 
			
		||||
    return sizeof(SiteHalfCommSpinor);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@@ -69,7 +69,7 @@ public:
 | 
			
		||||
  /* Compress includes precision change if mpi data is not same */
 | 
			
		||||
  /*****************************************************/
 | 
			
		||||
  template<class _SiteHalfSpinor, class _SiteSpinor>
 | 
			
		||||
  accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) {
 | 
			
		||||
  accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) const {
 | 
			
		||||
    _SiteHalfSpinor tmp;
 | 
			
		||||
    projector::Proj(tmp,in,mu,dag);
 | 
			
		||||
    vstream(buf[o],tmp);
 | 
			
		||||
@@ -81,7 +81,7 @@ public:
 | 
			
		||||
  accelerator_inline void Exchange(SiteHalfSpinor *mp,
 | 
			
		||||
				   const SiteHalfSpinor * __restrict__ vp0,
 | 
			
		||||
				   const SiteHalfSpinor * __restrict__ vp1,
 | 
			
		||||
				   Integer type,Integer o){
 | 
			
		||||
				   Integer type,Integer o) const {
 | 
			
		||||
    SiteHalfSpinor tmp1;
 | 
			
		||||
    SiteHalfSpinor tmp2;
 | 
			
		||||
    exchange(tmp1,tmp2,vp0[o],vp1[o],type);
 | 
			
		||||
@@ -93,7 +93,7 @@ public:
 | 
			
		||||
  /* Have a decompression step if mpi data is not same */
 | 
			
		||||
  /*****************************************************/
 | 
			
		||||
  accelerator_inline void Decompress(SiteHalfSpinor * __restrict__ out,
 | 
			
		||||
				     SiteHalfSpinor * __restrict__ in, Integer o) {    
 | 
			
		||||
				     SiteHalfSpinor * __restrict__ in, Integer o) const {    
 | 
			
		||||
    assert(0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@@ -103,7 +103,7 @@ public:
 | 
			
		||||
  accelerator_inline void CompressExchange(SiteHalfSpinor * __restrict__ out0,
 | 
			
		||||
					   SiteHalfSpinor * __restrict__ out1,
 | 
			
		||||
					   const SiteSpinor * __restrict__ in,
 | 
			
		||||
					   Integer j,Integer k, Integer m,Integer type)
 | 
			
		||||
					   Integer j,Integer k, Integer m,Integer type) const
 | 
			
		||||
  {
 | 
			
		||||
    SiteHalfSpinor temp1, temp2;
 | 
			
		||||
    SiteHalfSpinor temp3, temp4;
 | 
			
		||||
@@ -117,7 +117,7 @@ public:
 | 
			
		||||
  /*****************************************************/
 | 
			
		||||
  /* Pass the info to the stencil */
 | 
			
		||||
  /*****************************************************/
 | 
			
		||||
  accelerator_inline bool DecompressionStep(void) { return false; }
 | 
			
		||||
  accelerator_inline bool DecompressionStep(void) const { return false; }
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@@ -142,7 +142,7 @@ public:
 | 
			
		||||
  typedef typename SiteHalfSpinor::vector_type     vComplexHigh;
 | 
			
		||||
  constexpr static int Nw=sizeof(SiteHalfSpinor)/sizeof(vComplexHigh);
 | 
			
		||||
 | 
			
		||||
  accelerator_inline int CommDatumSize(void) {
 | 
			
		||||
  accelerator_inline int CommDatumSize(void) const {
 | 
			
		||||
    return sizeof(SiteHalfCommSpinor);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@@ -150,7 +150,7 @@ public:
 | 
			
		||||
  /* Compress includes precision change if mpi data is not same */
 | 
			
		||||
  /*****************************************************/
 | 
			
		||||
  template<class _SiteHalfSpinor, class _SiteSpinor>
 | 
			
		||||
  accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) {
 | 
			
		||||
  accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) const {
 | 
			
		||||
    _SiteHalfSpinor hsp;
 | 
			
		||||
    SiteHalfCommSpinor *hbuf = (SiteHalfCommSpinor *)buf;
 | 
			
		||||
    projector::Proj(hsp,in,mu,dag);
 | 
			
		||||
@@ -163,7 +163,7 @@ public:
 | 
			
		||||
  accelerator_inline void Exchange(SiteHalfSpinor *mp,
 | 
			
		||||
                       SiteHalfSpinor *vp0,
 | 
			
		||||
                       SiteHalfSpinor *vp1,
 | 
			
		||||
		       Integer type,Integer o){
 | 
			
		||||
		       Integer type,Integer o) const {
 | 
			
		||||
    SiteHalfSpinor vt0,vt1;
 | 
			
		||||
    SiteHalfCommSpinor *vpp0 = (SiteHalfCommSpinor *)vp0;
 | 
			
		||||
    SiteHalfCommSpinor *vpp1 = (SiteHalfCommSpinor *)vp1;
 | 
			
		||||
@@ -175,7 +175,7 @@ public:
 | 
			
		||||
  /*****************************************************/
 | 
			
		||||
  /* Have a decompression step if mpi data is not same */
 | 
			
		||||
  /*****************************************************/
 | 
			
		||||
  accelerator_inline void Decompress(SiteHalfSpinor *out, SiteHalfSpinor *in, Integer o){
 | 
			
		||||
  accelerator_inline void Decompress(SiteHalfSpinor *out, SiteHalfSpinor *in, Integer o) const {
 | 
			
		||||
    SiteHalfCommSpinor *hin=(SiteHalfCommSpinor *)in;
 | 
			
		||||
    precisionChange((vComplexHigh *)&out[o],(vComplexLow *)&hin[o],Nw);
 | 
			
		||||
  }
 | 
			
		||||
@@ -186,7 +186,7 @@ public:
 | 
			
		||||
  accelerator_inline void CompressExchange(SiteHalfSpinor *out0,
 | 
			
		||||
			       SiteHalfSpinor *out1,
 | 
			
		||||
			       const SiteSpinor *in,
 | 
			
		||||
			       Integer j,Integer k, Integer m,Integer type){
 | 
			
		||||
			       Integer j,Integer k, Integer m,Integer type) const {
 | 
			
		||||
    SiteHalfSpinor temp1, temp2,temp3,temp4;
 | 
			
		||||
    SiteHalfCommSpinor *hout0 = (SiteHalfCommSpinor *)out0;
 | 
			
		||||
    SiteHalfCommSpinor *hout1 = (SiteHalfCommSpinor *)out1;
 | 
			
		||||
@@ -200,7 +200,7 @@ public:
 | 
			
		||||
  /*****************************************************/
 | 
			
		||||
  /* Pass the info to the stencil */
 | 
			
		||||
  /*****************************************************/
 | 
			
		||||
  accelerator_inline bool DecompressionStep(void) { return true; }
 | 
			
		||||
  accelerator_inline bool DecompressionStep(void) const { return true; }
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -95,7 +95,7 @@ public:
 | 
			
		||||
					  const _Spinor &chi,
 | 
			
		||||
					  int mu,
 | 
			
		||||
					  StencilEntry *SE,
 | 
			
		||||
					  StencilView &St) 
 | 
			
		||||
					  const StencilView &St) 
 | 
			
		||||
  {
 | 
			
		||||
    multLink(phi,U,chi,mu);
 | 
			
		||||
  }
 | 
			
		||||
@@ -106,11 +106,15 @@ public:
 | 
			
		||||
			    const _SpinorField & phi,
 | 
			
		||||
			    int mu)
 | 
			
		||||
  {
 | 
			
		||||
    const int Nsimd = SiteHalfSpinor::Nsimd();
 | 
			
		||||
    autoView( out_v, out, AcceleratorWrite);
 | 
			
		||||
    autoView( phi_v, phi, AcceleratorRead);
 | 
			
		||||
    autoView( Umu_v, Umu, AcceleratorRead);
 | 
			
		||||
    accelerator_for(sss,out.Grid()->oSites(),1,{
 | 
			
		||||
	multLink(out_v[sss],Umu_v[sss],phi_v[sss],mu);
 | 
			
		||||
    typedef decltype(coalescedRead(out_v[0]))   calcSpinor;
 | 
			
		||||
    accelerator_for(sss,out.Grid()->oSites(),Nsimd,{
 | 
			
		||||
	calcSpinor tmp;
 | 
			
		||||
	multLink(tmp,Umu_v[sss],phi_v(sss),mu);
 | 
			
		||||
	coalescedWrite(out_v[sss],tmp);
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
					   
 | 
			
		||||
 
 | 
			
		||||
@@ -49,6 +49,7 @@ public:
 | 
			
		||||
 | 
			
		||||
  INHERIT_IMPL_TYPES(Impl);
 | 
			
		||||
  typedef FermionOperator<Impl> Base;
 | 
			
		||||
  typedef AcceleratorVector<int,STENCIL_MAX> StencilVector;
 | 
			
		||||
   
 | 
			
		||||
public:
 | 
			
		||||
 | 
			
		||||
@@ -68,73 +69,87 @@ public:
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
 | 
			
		||||
  static accelerator_inline void DhopDirK(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor * buf,
 | 
			
		||||
				   int sF, int sU, const FermionFieldView &in, FermionFieldView &out, int dirdisp, int gamma);
 | 
			
		||||
  static accelerator_inline void DhopDirK(const StencilView &st, const DoubledGaugeFieldView &U,
 | 
			
		||||
					  SiteHalfSpinor * buf, int sF, int sU,
 | 
			
		||||
					  const FermionFieldView &in,const FermionFieldView &out, int dirdisp, int gamma);
 | 
			
		||||
 | 
			
		||||
  static accelerator_inline void DhopDirXp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirYp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirZp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirTp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirXm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirYm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirZm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirTm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirXp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
 | 
			
		||||
					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirYp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
 | 
			
		||||
					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirZp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
 | 
			
		||||
					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirTp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
 | 
			
		||||
					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirXm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
 | 
			
		||||
					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirYm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
 | 
			
		||||
					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirZm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
 | 
			
		||||
					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
 | 
			
		||||
  static accelerator_inline void DhopDirTm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,
 | 
			
		||||
					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp);
 | 
			
		||||
      
 | 
			
		||||
  // Specialised variants
 | 
			
		||||
  static accelerator void GenericDhopSite(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					  int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
      
 | 
			
		||||
  static accelerator void GenericDhopSiteDag(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
						    int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static accelerator void GenericDhopSiteInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
						    int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
      
 | 
			
		||||
  static accelerator void GenericDhopSiteDagInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
						int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static accelerator void GenericDhopSiteExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					     int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
      
 | 
			
		||||
  static accelerator void GenericDhopSiteDagExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
						       int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  static accelerator void GenericDhopSite(const StencilView &st,
 | 
			
		||||
					  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					  int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
       
 | 
			
		||||
  static accelerator void GenericDhopSiteDag(const StencilView &st, const  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					     int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
   
 | 
			
		||||
  static accelerator void GenericDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					     int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
       
 | 
			
		||||
  static accelerator void GenericDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
						int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
   
 | 
			
		||||
  static accelerator void GenericDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					     int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
       
 | 
			
		||||
  static accelerator void GenericDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
						int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
 | 
			
		||||
  static void AsmDhopSite(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
			  int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static void AsmDhopSiteDag(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static void AsmDhopSiteInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static void AsmDhopSiteDagInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
				int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static void AsmDhopSiteExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static void AsmDhopSiteDagExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
				int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
// Keep Hand unrolled 
 | 
			
		||||
  static accelerator void HandDhopSiteSycl(StencilVector st_perm, StencilEntry *st_p,  SiteDoubledGaugeField *U, SiteHalfSpinor * buf,
 | 
			
		||||
					   int sF, int sU, const SiteSpinor *in, SiteSpinor *out);
 | 
			
		||||
 | 
			
		||||
// Keep Hand unrolled temporarily  
 | 
			
		||||
  static accelerator void HandDhopSite(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
				       int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  static accelerator void HandDhopSite(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
				       int sF, int sU, const FermionFieldView &in,const FermionFieldView &out);
 | 
			
		||||
   
 | 
			
		||||
  static accelerator void HandDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					  int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
   
 | 
			
		||||
  static accelerator void HandDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					  int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static accelerator void HandDhopSiteDag(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					  int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  static accelerator void HandDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					     int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static accelerator void HandDhopSiteInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					  int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  static accelerator void HandDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					  int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
   
 | 
			
		||||
  static accelerator void HandDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					     int sF, int sU, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
  //AVX 512 ASM
 | 
			
		||||
  static void AsmDhopSite(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
			  int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,const FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static accelerator void HandDhopSiteDagInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					     int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  static void AsmDhopSiteDag(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static accelerator void HandDhopSiteExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					  int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  static void AsmDhopSiteInt(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,const FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static accelerator void HandDhopSiteDagExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
					     int sF, int sU, const FermionFieldView &in, FermionFieldView &out);
 | 
			
		||||
  static void AsmDhopSiteDagInt(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
				int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static void AsmDhopSiteExt(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,const FermionFieldView &out);
 | 
			
		||||
  
 | 
			
		||||
  static void AsmDhopSiteDagExt(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf,
 | 
			
		||||
				int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, const FermionFieldView &out);
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
 WilsonKernels(const ImplParams &p = ImplParams()) : Base(p){};
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -642,7 +642,7 @@ void CayleyFermion5D<Impl>::ContractConservedCurrent( PropagatorField &q_in_1,
 | 
			
		||||
						      Current curr_type,
 | 
			
		||||
						      unsigned int mu)
 | 
			
		||||
{
 | 
			
		||||
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
 | 
			
		||||
#if (!defined(GRID_HIP))
 | 
			
		||||
  Gamma::Algebra Gmu [] = {
 | 
			
		||||
    Gamma::Algebra::GammaX,
 | 
			
		||||
    Gamma::Algebra::GammaY,
 | 
			
		||||
@@ -826,7 +826,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
 | 
			
		||||
#if (!defined(GRID_HIP))
 | 
			
		||||
  int tshift = (mu == Nd-1) ? 1 : 0;
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  // GENERAL CAYLEY CASE
 | 
			
		||||
 
 | 
			
		||||
@@ -618,11 +618,13 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteAsm(StencilView &st,
 | 
			
		||||
					 DoubledGaugeFieldView &U,
 | 
			
		||||
					 DoubledGaugeFieldView &UUU,
 | 
			
		||||
					 SiteSpinor *buf, int sF,
 | 
			
		||||
					 int sU, const FermionFieldView &in, FermionFieldView &out,int dag) 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteAsm(const StencilView &st,
 | 
			
		||||
					 const DoubledGaugeFieldView &U,
 | 
			
		||||
					 const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 					 SiteSpinor *buf, int sF,
 | 
			
		||||
					 int sU,
 | 
			
		||||
					 const FermionFieldView &in,
 | 
			
		||||
					 const FermionFieldView &out,int dag) 
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
};
 | 
			
		||||
@@ -683,11 +685,13 @@ void StaggeredKernels<Impl>::DhopSiteAsm(StencilView &st,
 | 
			
		||||
 | 
			
		||||
  // This is the single precision 5th direction vectorised kernel
 | 
			
		||||
#include <Grid/simd/Intel512single.h>
 | 
			
		||||
template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilView &st,
 | 
			
		||||
								    DoubledGaugeFieldView &U,
 | 
			
		||||
								    DoubledGaugeFieldView &UUU,
 | 
			
		||||
								    SiteSpinor *buf, int sF,
 | 
			
		||||
								    int sU, const FermionFieldView &in, FermionFieldView &out,int dag) 
 | 
			
		||||
template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(const StencilView &st,
 | 
			
		||||
								    const DoubledGaugeFieldView &U,
 | 
			
		||||
								    const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 								    SiteSpinor *buf, int sF,
 | 
			
		||||
								    int sU,
 | 
			
		||||
								    const FermionFieldView &in,
 | 
			
		||||
								    const FermionFieldView &out,int dag) 
 | 
			
		||||
{
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  uint64_t gauge0,gauge1,gauge2,gauge3;
 | 
			
		||||
@@ -738,11 +742,13 @@ template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilView
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#include <Grid/simd/Intel512double.h>
 | 
			
		||||
template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilView &st, 
 | 
			
		||||
								    DoubledGaugeFieldView &U,
 | 
			
		||||
								    DoubledGaugeFieldView &UUU,
 | 
			
		||||
								    SiteSpinor *buf, int sF,
 | 
			
		||||
								    int sU, const FermionFieldView &in, FermionFieldView &out, int dag) 
 | 
			
		||||
template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(const StencilView &st, 
 | 
			
		||||
								    const DoubledGaugeFieldView &U,
 | 
			
		||||
								    const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 								    SiteSpinor *buf, int sF,
 | 
			
		||||
								    int sU,
 | 
			
		||||
								    const FermionFieldView &in,
 | 
			
		||||
								    const FermionFieldView &out, int dag) 
 | 
			
		||||
{
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  uint64_t gauge0,gauge1,gauge2,gauge3;
 | 
			
		||||
@@ -824,11 +830,13 @@ template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilView
 | 
			
		||||
  // This is the single precision 5th direction vectorised kernel
 | 
			
		||||
 | 
			
		||||
#include <Grid/simd/Intel512single.h>
 | 
			
		||||
template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilView &st, 
 | 
			
		||||
							       DoubledGaugeFieldView &U,
 | 
			
		||||
							       DoubledGaugeFieldView &UUU,
 | 
			
		||||
							       SiteSpinor *buf, int sF,
 | 
			
		||||
							       int sU, const FermionFieldView &in, FermionFieldView &out,int dag) 
 | 
			
		||||
template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(const StencilView &st, 
 | 
			
		||||
							       const DoubledGaugeFieldView &U,
 | 
			
		||||
							       const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 							       SiteSpinor *buf, int sF,
 | 
			
		||||
							       int sU,
 | 
			
		||||
							       const FermionFieldView &in,
 | 
			
		||||
							       const FermionFieldView &out,int dag) 
 | 
			
		||||
{
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  uint64_t gauge0,gauge1,gauge2,gauge3;
 | 
			
		||||
@@ -893,11 +901,13 @@ template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilView &st,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#include <Grid/simd/Intel512double.h>
 | 
			
		||||
template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(StencilView &st, 
 | 
			
		||||
							       DoubledGaugeFieldView &U,
 | 
			
		||||
							       DoubledGaugeFieldView &UUU,
 | 
			
		||||
							       SiteSpinor *buf, int sF,
 | 
			
		||||
							       int sU, const FermionFieldView &in, FermionFieldView &out,int dag) 
 | 
			
		||||
template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(const StencilView &st, 
 | 
			
		||||
							       const DoubledGaugeFieldView &U,
 | 
			
		||||
							       const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 							       SiteSpinor *buf, int sF,
 | 
			
		||||
							       int sU,
 | 
			
		||||
							       const FermionFieldView &in,
 | 
			
		||||
							       const FermionFieldView &out,int dag) 
 | 
			
		||||
{
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  uint64_t gauge0,gauge1,gauge2,gauge3;
 | 
			
		||||
 
 | 
			
		||||
@@ -147,10 +147,12 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteHand(StencilView &st,
 | 
			
		||||
					  DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,
 | 
			
		||||
					  SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
					  const FermionFieldView &in, FermionFieldView &out,int dag) 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteHand(const StencilView &st,
 | 
			
		||||
					  const DoubledGaugeFieldView &U,
 | 
			
		||||
					  const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 					  SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
					  const FermionFieldView &in,
 | 
			
		||||
					  const FermionFieldView &out,int dag) 
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
@@ -222,10 +224,12 @@ void StaggeredKernels<Impl>::DhopSiteHand(StencilView &st,
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteHandInt(StencilView &st, 
 | 
			
		||||
					     DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
					     SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
					     const FermionFieldView &in, FermionFieldView &out,int dag) 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteHandInt(const StencilView &st, 
 | 
			
		||||
					     const DoubledGaugeFieldView &U,
 | 
			
		||||
					     const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 					     SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
					     const FermionFieldView &in,
 | 
			
		||||
					     const FermionFieldView &out,int dag) 
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
@@ -301,10 +305,12 @@ void StaggeredKernels<Impl>::DhopSiteHandInt(StencilView &st,
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteHandExt(StencilView &st,
 | 
			
		||||
					     DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
					     SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
					     const FermionFieldView &in, FermionFieldView &out,int dag) 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteHandExt(const StencilView &st,
 | 
			
		||||
					     const DoubledGaugeFieldView &U,
 | 
			
		||||
					     const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 					     SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
					     const FermionFieldView &in,
 | 
			
		||||
					     const FermionFieldView &out,int dag) 
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
 
 | 
			
		||||
@@ -79,10 +79,10 @@ StaggeredKernels<Impl>::StaggeredKernels(const ImplParams &p) : Base(p){};
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteGeneric(StencilView &st, 
 | 
			
		||||
					     DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
					     SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
					     const FermionFieldView &in, FermionFieldView &out, int dag) 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteGeneric(const StencilView &st, 
 | 
			
		||||
					     const DoubledGaugeFieldView &U, const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 					     SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
					     const FermionFieldView &in, const FermionFieldView &out, int dag) 
 | 
			
		||||
{
 | 
			
		||||
  const SiteSpinor *chi_p;
 | 
			
		||||
  SiteSpinor chi;
 | 
			
		||||
@@ -127,10 +127,11 @@ void StaggeredKernels<Impl>::DhopSiteGeneric(StencilView &st,
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteGenericInt(StencilView &st, 
 | 
			
		||||
						DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
						SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
						const FermionFieldView &in, FermionFieldView &out,int dag) {
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteGenericInt(const StencilView &st, 
 | 
			
		||||
						const DoubledGaugeFieldView &U, const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 						SiteSpinor *buf, int sF, int sU, 
 | 
			
		||||
						const FermionFieldView &in, const FermionFieldView &out,int dag)
 | 
			
		||||
{
 | 
			
		||||
  const SiteSpinor *chi_p;
 | 
			
		||||
  SiteSpinor chi;
 | 
			
		||||
  SiteSpinor Uchi;
 | 
			
		||||
@@ -175,10 +176,13 @@ void StaggeredKernels<Impl>::DhopSiteGenericInt(StencilView &st,
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
template <int Naik> accelerator_inline
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteGenericExt(StencilView &st, 
 | 
			
		||||
						DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,
 | 
			
		||||
						SiteSpinor *buf, int sF, int sU,
 | 
			
		||||
						const FermionFieldView &in, FermionFieldView &out,int dag) {
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteGenericExt(const StencilView &st, 
 | 
			
		||||
						const DoubledGaugeFieldView &U,
 | 
			
		||||
						const DoubledGaugeFieldView &UUU,
 | 
			
		||||
 						SiteSpinor *buf, int sF, int sU,
 | 
			
		||||
						const FermionFieldView &in,
 | 
			
		||||
						const FermionFieldView &out,int dag)
 | 
			
		||||
{
 | 
			
		||||
  const SiteSpinor *chi_p;
 | 
			
		||||
  //  SiteSpinor chi;
 | 
			
		||||
  SiteSpinor Uchi;
 | 
			
		||||
@@ -225,8 +229,13 @@ void StaggeredKernels<Impl>::DhopSiteGenericExt(StencilView &st,
 | 
			
		||||
// Driving / wrapping routine to select right kernel
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl> 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st, DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, SiteSpinor * buf,
 | 
			
		||||
					   int sF, int sU, const FermionFieldView &in, FermionFieldView &out, int dir,int disp)
 | 
			
		||||
void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st,
 | 
			
		||||
					   const DoubledGaugeFieldView &U,
 | 
			
		||||
					   const DoubledGaugeFieldView &UUU,
 | 
			
		||||
					   SiteSpinor * buf,
 | 
			
		||||
					   int sF, int sU,
 | 
			
		||||
					   const FermionFieldView &in,
 | 
			
		||||
					   const FermionFieldView &out, int dir,int disp)
 | 
			
		||||
{
 | 
			
		||||
  // Disp should be either +1,-1,+3,-3
 | 
			
		||||
  // What about "dag" ?
 | 
			
		||||
@@ -254,7 +263,8 @@ void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st, DoubledGaugeFieldVie
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
template <class Impl> 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopImproved(StencilImpl &st, LebesgueOrder &lo, 
 | 
			
		||||
void StaggeredKernels<Impl>::DhopImproved(StencilImpl &st,
 | 
			
		||||
					  LebesgueOrder &lo, 
 | 
			
		||||
					  DoubledGaugeField &U, DoubledGaugeField &UUU, 
 | 
			
		||||
					  const FermionField &in, FermionField &out, int dag, int interior,int exterior)
 | 
			
		||||
{
 | 
			
		||||
 
 | 
			
		||||
@@ -92,20 +92,16 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
 | 
			
		||||
  int lvol = _Umu.Grid()->lSites();
 | 
			
		||||
  int DimRep = Impl::Dimension;
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
 | 
			
		||||
  Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
 | 
			
		||||
 | 
			
		||||
  Coordinate lcoor;
 | 
			
		||||
  typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero();
 | 
			
		||||
 | 
			
		||||
  {
 | 
			
		||||
    autoView(CTv,CloverTerm,CpuRead);
 | 
			
		||||
    autoView(CTIv,CloverTermInv,CpuWrite);
 | 
			
		||||
    for (int site = 0; site < lvol; site++) {
 | 
			
		||||
    thread_for(site, lvol, {
 | 
			
		||||
      Coordinate lcoor;
 | 
			
		||||
      grid->LocalIndexToLocalCoor(site, lcoor);
 | 
			
		||||
      EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
 | 
			
		||||
      Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
 | 
			
		||||
      Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
 | 
			
		||||
      typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero();
 | 
			
		||||
      peekLocalSite(Qx, CTv, lcoor);
 | 
			
		||||
      Qxinv = Zero();
 | 
			
		||||
      //if (csw!=0){
 | 
			
		||||
      for (int j = 0; j < Ns; j++)
 | 
			
		||||
	for (int k = 0; k < Ns; k++)
 | 
			
		||||
@@ -126,7 +122,7 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
 | 
			
		||||
      //    if (site==0) std::cout << "site =" << site << "\n" << EigenInvCloverOp << std::endl;
 | 
			
		||||
      //  }
 | 
			
		||||
      pokeLocalSite(Qxinv, CTIv, lcoor);
 | 
			
		||||
    }
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Separate the even and odd parts
 | 
			
		||||
 
 | 
			
		||||
@@ -38,9 +38,6 @@ Author: Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
// undefine everything related to kernels
 | 
			
		||||
#include <simd/Fujitsu_A64FX_undef.h>
 | 
			
		||||
 | 
			
		||||
// enable A64FX body
 | 
			
		||||
#define WILSONKERNELSASMBODYA64FX
 | 
			
		||||
//#pragma message("A64FX Dslash: WilsonKernelsAsmBodyA64FX.h")
 | 
			
		||||
 | 
			
		||||
    ///////////////////////////////////////////////////////////
 | 
			
		||||
    // If we are A64FX specialise the single precision routine
 | 
			
		||||
@@ -63,119 +60,89 @@ Author: Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
#define INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#define INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#define EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -185,119 +152,89 @@ WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldV
 | 
			
		||||
#define INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#define INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#define EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// undefine
 | 
			
		||||
@@ -330,119 +267,89 @@ WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFie
 | 
			
		||||
#define INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#define INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#define EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////////////////
 | 
			
		||||
// XYZT vectorised, dag Kernel, double
 | 
			
		||||
@@ -451,124 +358,93 @@ WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldV
 | 
			
		||||
#define INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#define INTERIOR
 | 
			
		||||
#undef EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef INTERIOR_AND_EXTERIOR
 | 
			
		||||
#undef INTERIOR
 | 
			
		||||
#define EXTERIOR
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
 | 
			
		||||
template<> void
 | 
			
		||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
 | 
			
		||||
						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
#if defined (WILSONKERNELSASMBODYA64FX)
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// undefs
 | 
			
		||||
#undef WILSONKERNELSASMBODYA64FX
 | 
			
		||||
#include <simd/Fujitsu_A64FX_undef.h>
 | 
			
		||||
 | 
			
		||||
#endif //A64FXASM
 | 
			
		||||
 
 | 
			
		||||
@@ -25,6 +25,11 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
// GCC 10 messes up SVE instruction scheduling using -O3, but
 | 
			
		||||
// -O3 -fno-schedule-insns -fno-schedule-insns2 does wonders
 | 
			
		||||
// performance now is better than armclang 20.2
 | 
			
		||||
 | 
			
		||||
#ifdef KERNEL_DAG
 | 
			
		||||
#define DIR0_PROJ    XP_PROJ
 | 
			
		||||
#define DIR1_PROJ    YP_PROJ
 | 
			
		||||
@@ -97,7 +102,7 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
    PROJ;							                        \
 | 
			
		||||
    MAYBEPERM(PERMUTE_DIR,perm);					        \
 | 
			
		||||
      } else {								                \
 | 
			
		||||
	LOAD_CHI(base);							                \
 | 
			
		||||
	  LOAD_CHI(base);							                \
 | 
			
		||||
      }									                    \
 | 
			
		||||
      base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++;	\
 | 
			
		||||
    MULT_2SPIN_1(Dir);					                    \
 | 
			
		||||
@@ -110,6 +115,11 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
    }                                                       \
 | 
			
		||||
    RECON;								                    \
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty
 | 
			
		||||
    though I expected that it would improve on performance
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)	    \
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
 | 
			
		||||
  PREFETCH1_CHIMU(base);						            \
 | 
			
		||||
@@ -126,73 +136,63 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\
 | 
			
		||||
      basep = st.GetPFInfo(nent,plocal); nent++;			\
 | 
			
		||||
      if ( local ) {							            \
 | 
			
		||||
    LOAD_CHIMU(base);                                       \
 | 
			
		||||
    LOAD_TABLE(PERMUTE_DIR);                                \
 | 
			
		||||
    PROJ;							                        \
 | 
			
		||||
    MAYBEPERM(PERMUTE_DIR,perm);					        \
 | 
			
		||||
      }else if ( st.same_node[Dir] ) {LOAD_CHI(base);}	    \
 | 
			
		||||
      base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++;	\
 | 
			
		||||
      if ( local || st.same_node[Dir] ) {				    \
 | 
			
		||||
    MULT_2SPIN_1(Dir);					                    \
 | 
			
		||||
    PREFETCH_CHIMU(base);                                   \
 | 
			
		||||
    /* PREFETCH_GAUGE_L1(NxtDir); */                        \
 | 
			
		||||
    MULT_2SPIN_2;					                        \
 | 
			
		||||
    if (s == 0) {                                           \
 | 
			
		||||
       if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
 | 
			
		||||
    }                                                       \
 | 
			
		||||
    RECON;								                    \
 | 
			
		||||
    PREFETCH_CHIMU_L2(basep);                               \
 | 
			
		||||
      } else { PREFETCH_CHIMU(base); }								                    \
 | 
			
		||||
      if ( local ) {							\
 | 
			
		||||
  LOAD_CHIMU(base);                                       \
 | 
			
		||||
  LOAD_TABLE(PERMUTE_DIR);                                \
 | 
			
		||||
  PROJ;							                        \
 | 
			
		||||
  MAYBEPERM(PERMUTE_DIR,perm);					        \
 | 
			
		||||
      }else if ( st.same_node[Dir] ) {LOAD_CHI(base);}			\
 | 
			
		||||
      if ( local || st.same_node[Dir] ) {				\
 | 
			
		||||
  MULT_2SPIN_1(Dir);					                    \
 | 
			
		||||
  MULT_2SPIN_2;					                        \
 | 
			
		||||
  RECON;								\
 | 
			
		||||
      }									\
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++;	\
 | 
			
		||||
  PREFETCH_CHIMU(base);						\
 | 
			
		||||
  PREFETCH_CHIMU_L2(basep);                               \
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;		\
 | 
			
		||||
  PREFETCH1_CHIMU(base);						\
 | 
			
		||||
  { ZERO_PSI; }								\
 | 
			
		||||
  ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)
 | 
			
		||||
 | 
			
		||||
#define RESULT(base,basep) SAVE_RESULT(base,basep);
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Post comms kernel
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#ifdef EXTERIOR
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
 | 
			
		||||
  if((!local)&&(!st.same_node[Dir]) ) {					    \
 | 
			
		||||
    LOAD_CHI(base);							                \
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;		\
 | 
			
		||||
  if((!local)&&(!st.same_node[Dir]) ) {					\
 | 
			
		||||
    LOAD_CHI(base);							\
 | 
			
		||||
    MULT_2SPIN_1(Dir);					                    \
 | 
			
		||||
    PREFETCH_CHIMU(base);                                   \
 | 
			
		||||
    /* PREFETCH_GAUGE_L1(NxtDir); */                        \
 | 
			
		||||
    MULT_2SPIN_2;					                        \
 | 
			
		||||
    if (s == 0) {                                           \
 | 
			
		||||
      if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
 | 
			
		||||
    }                                                       \
 | 
			
		||||
    RECON;								                    \
 | 
			
		||||
    nmu++;								                    \
 | 
			
		||||
    RECON;								\
 | 
			
		||||
    nmu++;								\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)	    \
 | 
			
		||||
  nmu=0;								                    \
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;\
 | 
			
		||||
  if((!local)&&(!st.same_node[Dir]) ) {					    \
 | 
			
		||||
    LOAD_CHI(base);							                \
 | 
			
		||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\
 | 
			
		||||
  nmu=0;								\
 | 
			
		||||
  { ZERO_PSI;}								\
 | 
			
		||||
  base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;		\
 | 
			
		||||
  if((!local)&&(!st.same_node[Dir]) ) {					\
 | 
			
		||||
    LOAD_CHI(base);							\
 | 
			
		||||
    MULT_2SPIN_1(Dir);					                    \
 | 
			
		||||
    PREFETCH_CHIMU(base);                                   \
 | 
			
		||||
    /* PREFETCH_GAUGE_L1(NxtDir); */                        \
 | 
			
		||||
    MULT_2SPIN_2;					                        \
 | 
			
		||||
    if (s == 0) {                                           \
 | 
			
		||||
      if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
 | 
			
		||||
    }                                                       \
 | 
			
		||||
    RECON;								                    \
 | 
			
		||||
    nmu++;								                    \
 | 
			
		||||
    RECON;								\
 | 
			
		||||
    nmu++;								\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define RESULT(base,basep) if (nmu){ ADD_RESULT(base,base);}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
{
 | 
			
		||||
  int nmu;
 | 
			
		||||
  int local,perm, ptype;
 | 
			
		||||
@@ -209,7 +209,6 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
    int ssn=ssU+1;     if(ssn>=nmax) ssn=0;
 | 
			
		||||
    //    int sUn=lo.Reorder(ssn);
 | 
			
		||||
    int sUn=ssn;
 | 
			
		||||
    LOCK_GAUGE(0);
 | 
			
		||||
#else
 | 
			
		||||
    int sU =ssU;
 | 
			
		||||
    int ssn=ssU+1;     if(ssn>=nmax) ssn=0;
 | 
			
		||||
@@ -295,6 +294,11 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      // DC ZVA test
 | 
			
		||||
      // { uint64_t basestore = (uint64_t)&out[ss];
 | 
			
		||||
      //   PREFETCH_RESULT_L2_STORE(basestore); }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
      ASM_LEG(Ym,Zm,PERMUTE_DIR2,DIR5_PROJ,DIR5_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
@@ -308,6 +312,11 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      // DC ZVA test
 | 
			
		||||
      //{ uint64_t basestore = (uint64_t)&out[ss];
 | 
			
		||||
      //  PREFETCH_RESULT_L2_STORE(basestore); }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
      ASM_LEG(Zm,Tm,PERMUTE_DIR1,DIR6_PROJ,DIR6_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
@@ -321,6 +330,11 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
      std::cout << "----------------------------------------------------" << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
      // DC ZVA test
 | 
			
		||||
      //{ uint64_t basestore = (uint64_t)&out[ss];
 | 
			
		||||
      //  PREFETCH_RESULT_L2_STORE(basestore); }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
      ASM_LEG(Tm,Xp,PERMUTE_DIR0,DIR7_PROJ,DIR7_RECON);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
@@ -341,6 +355,7 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University
 | 
			
		||||
      base = (uint64_t) &out[ss];
 | 
			
		||||
      basep= st.GetPFInfo(nent,plocal); ent++;
 | 
			
		||||
      basep = (uint64_t) &out[ssn];
 | 
			
		||||
      //PREFETCH_RESULT_L1_STORE(base);
 | 
			
		||||
      RESULT(base,basep);
 | 
			
		||||
 | 
			
		||||
#ifdef SHOW
 | 
			
		||||
 
 | 
			
		||||
@@ -38,46 +38,46 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////////////////////////////
 | 
			
		||||
// Default to no assembler implementation
 | 
			
		||||
// Will specialise to 
 | 
			
		||||
// Will specialise to AVX512 if available
 | 
			
		||||
///////////////////////////////////////////////////////////
 | 
			
		||||
template<class Impl> void 
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
				  int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
				  int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl> void 
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl> void 
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl> void 
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
					int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
					int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl> void 
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl> void 
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
					int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
WilsonKernels<Impl >::AsmDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
					int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -646,9 +646,14 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
  HAND_RESULT_EXT(ss,F)
 | 
			
		||||
 | 
			
		||||
#define HAND_SPECIALISE_GPARITY(IMPL)					\
 | 
			
		||||
  template<> accelerator_inline void					\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteSycl(StencilVector st_perm, StencilEntry *st_p, \
 | 
			
		||||
					SiteDoubledGaugeField *U, SiteHalfSpinor * buf, \
 | 
			
		||||
					int sF, int sU, const SiteSpinor *in, SiteSpinor *out) {} \
 | 
			
		||||
  									\
 | 
			
		||||
  template<> accelerator_inline void						\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \
 | 
			
		||||
				    int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \
 | 
			
		||||
				    int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
    typedef IMPL Impl;							\
 | 
			
		||||
    typedef typename Simd::scalar_type S;				\
 | 
			
		||||
@@ -663,8 +668,8 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
  }									\
 | 
			
		||||
									\
 | 
			
		||||
  template<> accelerator_inline void						\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
 | 
			
		||||
				       int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
 | 
			
		||||
				       int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
    typedef IMPL Impl;							\
 | 
			
		||||
    typedef typename Simd::scalar_type S;				\
 | 
			
		||||
@@ -679,8 +684,8 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
  }									\
 | 
			
		||||
									\
 | 
			
		||||
  template<> accelerator_inline void						\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \
 | 
			
		||||
				       int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \
 | 
			
		||||
				       int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
    typedef IMPL Impl;							\
 | 
			
		||||
    typedef typename Simd::scalar_type S;				\
 | 
			
		||||
@@ -695,8 +700,8 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
  }									\
 | 
			
		||||
									\
 | 
			
		||||
  template<> accelerator_inline void						\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
    typedef IMPL Impl;							\
 | 
			
		||||
    typedef typename Simd::scalar_type S;				\
 | 
			
		||||
@@ -711,8 +716,8 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
  }									\
 | 
			
		||||
									\
 | 
			
		||||
  template<> accelerator_inline void							\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \
 | 
			
		||||
				       int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \
 | 
			
		||||
				       int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
    typedef IMPL Impl;							\
 | 
			
		||||
    typedef typename Simd::scalar_type S;				\
 | 
			
		||||
@@ -728,8 +733,8 @@ NAMESPACE_BEGIN(Grid);
 | 
			
		||||
    HAND_DOP_SITE_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
 | 
			
		||||
  }									\
 | 
			
		||||
  template<> accelerator_inline void						\
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \
 | 
			
		||||
  WilsonKernels<IMPL>::HandDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \
 | 
			
		||||
  {									\
 | 
			
		||||
    typedef IMPL Impl;							\
 | 
			
		||||
    typedef typename Simd::scalar_type S;				\
 | 
			
		||||
 
 | 
			
		||||
@@ -496,8 +496,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
template<class Impl> accelerator_inline void 
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
				  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
				  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
@@ -520,8 +520,8 @@ WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,Site
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>  accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDag(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
@@ -543,8 +543,8 @@ void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>  accelerator_inline void 
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSiteInt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
@@ -567,8 +567,8 @@ WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,Si
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
						  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDagInt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
						  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
@@ -590,8 +590,8 @@ void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldVi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>  accelerator_inline void 
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSiteExt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf,
 | 
			
		||||
					  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
@@ -615,8 +615,8 @@ WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,Si
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>  accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
						  int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
void WilsonKernels<Impl>::HandDhopSiteDagExt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
 | 
			
		||||
						  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
@@ -682,3 +682,4 @@ NAMESPACE_END(Grid);
 | 
			
		||||
#undef HAND_RESULT
 | 
			
		||||
#undef HAND_RESULT_INT
 | 
			
		||||
#undef HAND_RESULT_EXT
 | 
			
		||||
#undef HAND_DECLARATIONS
 | 
			
		||||
 
 | 
			
		||||
@@ -0,0 +1,598 @@
 | 
			
		||||
   /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/action/fermion/WilsonKernelsHand.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef LOAD_CHIMU  
 | 
			
		||||
#undef LOAD_CHI 
 | 
			
		||||
#undef MULT_2SPIN
 | 
			
		||||
#undef PERMUTE_DIR
 | 
			
		||||
#undef XP_PROJ  
 | 
			
		||||
#undef YP_PROJ  
 | 
			
		||||
#undef ZP_PROJ  
 | 
			
		||||
#undef TP_PROJ  
 | 
			
		||||
#undef XM_PROJ  
 | 
			
		||||
#undef YM_PROJ  
 | 
			
		||||
#undef ZM_PROJ  
 | 
			
		||||
#undef TM_PROJ  
 | 
			
		||||
#undef XP_RECON 
 | 
			
		||||
#undef XP_RECON_ACCUM 
 | 
			
		||||
#undef XM_RECON 
 | 
			
		||||
#undef XM_RECON_ACCUM 
 | 
			
		||||
#undef YP_RECON_ACCUM 
 | 
			
		||||
#undef YM_RECON_ACCUM 
 | 
			
		||||
#undef ZP_RECON_ACCUM 
 | 
			
		||||
#undef ZM_RECON_ACCUM 
 | 
			
		||||
#undef TP_RECON_ACCUM 
 | 
			
		||||
#undef TM_RECON_ACCUM 
 | 
			
		||||
#undef ZERO_RESULT				 
 | 
			
		||||
#undef Chimu_00
 | 
			
		||||
#undef Chimu_01
 | 
			
		||||
#undef Chimu_02
 | 
			
		||||
#undef Chimu_10
 | 
			
		||||
#undef Chimu_11
 | 
			
		||||
#undef Chimu_12
 | 
			
		||||
#undef Chimu_20
 | 
			
		||||
#undef Chimu_21
 | 
			
		||||
#undef Chimu_22
 | 
			
		||||
#undef Chimu_30
 | 
			
		||||
#undef Chimu_31
 | 
			
		||||
#undef Chimu_32
 | 
			
		||||
#undef HAND_STENCIL_LEG
 | 
			
		||||
#undef HAND_STENCIL_LEG_INT
 | 
			
		||||
#undef HAND_STENCIL_LEG_EXT
 | 
			
		||||
#undef HAND_RESULT
 | 
			
		||||
#undef HAND_RESULT_INT
 | 
			
		||||
#undef HAND_RESULT_EXT
 | 
			
		||||
 | 
			
		||||
#define REGISTER
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_SIMT
 | 
			
		||||
#define LOAD_CHIMU(ptype)		\
 | 
			
		||||
  {const SiteSpinor & ref (in[offset]);	\
 | 
			
		||||
    Chimu_00=coalescedReadPermute<ptype>(ref()(0)(0),perm);	\
 | 
			
		||||
    Chimu_01=coalescedReadPermute<ptype>(ref()(0)(1),perm);	\
 | 
			
		||||
    Chimu_02=coalescedReadPermute<ptype>(ref()(0)(2),perm);	\
 | 
			
		||||
    Chimu_10=coalescedReadPermute<ptype>(ref()(1)(0),perm);	\
 | 
			
		||||
    Chimu_11=coalescedReadPermute<ptype>(ref()(1)(1),perm);	\
 | 
			
		||||
    Chimu_12=coalescedReadPermute<ptype>(ref()(1)(2),perm);	\
 | 
			
		||||
    Chimu_20=coalescedReadPermute<ptype>(ref()(2)(0),perm);	\
 | 
			
		||||
    Chimu_21=coalescedReadPermute<ptype>(ref()(2)(1),perm);	\
 | 
			
		||||
    Chimu_22=coalescedReadPermute<ptype>(ref()(2)(2),perm);	\
 | 
			
		||||
    Chimu_30=coalescedReadPermute<ptype>(ref()(3)(0),perm);	\
 | 
			
		||||
    Chimu_31=coalescedReadPermute<ptype>(ref()(3)(1),perm);	\
 | 
			
		||||
    Chimu_32=coalescedReadPermute<ptype>(ref()(3)(2),perm);	}
 | 
			
		||||
 | 
			
		||||
#define PERMUTE_DIR(dir) ;
 | 
			
		||||
#else
 | 
			
		||||
#define LOAD_CHIMU(ptype)		\
 | 
			
		||||
  {const SiteSpinor & ref (in[offset]);	\
 | 
			
		||||
    Chimu_00=coalescedRead(ref()(0)(0));	\
 | 
			
		||||
    Chimu_01=coalescedRead(ref()(0)(1));	\
 | 
			
		||||
    Chimu_02=coalescedRead(ref()(0)(2));	\
 | 
			
		||||
    Chimu_10=coalescedRead(ref()(1)(0));	\
 | 
			
		||||
    Chimu_11=coalescedRead(ref()(1)(1));	\
 | 
			
		||||
    Chimu_12=coalescedRead(ref()(1)(2));	\
 | 
			
		||||
    Chimu_20=coalescedRead(ref()(2)(0));	\
 | 
			
		||||
    Chimu_21=coalescedRead(ref()(2)(1));	\
 | 
			
		||||
    Chimu_22=coalescedRead(ref()(2)(2));	\
 | 
			
		||||
    Chimu_30=coalescedRead(ref()(3)(0));	\
 | 
			
		||||
    Chimu_31=coalescedRead(ref()(3)(1));	\
 | 
			
		||||
    Chimu_32=coalescedRead(ref()(3)(2));	}
 | 
			
		||||
 | 
			
		||||
#define PERMUTE_DIR(dir)			\
 | 
			
		||||
  permute##dir(Chi_00,Chi_00);	\
 | 
			
		||||
      permute##dir(Chi_01,Chi_01);\
 | 
			
		||||
      permute##dir(Chi_02,Chi_02);\
 | 
			
		||||
      permute##dir(Chi_10,Chi_10);	\
 | 
			
		||||
      permute##dir(Chi_11,Chi_11);\
 | 
			
		||||
      permute##dir(Chi_12,Chi_12);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define MULT_2SPIN(A)\
 | 
			
		||||
  {auto & ref(U[sU](A));					\
 | 
			
		||||
  U_00=coalescedRead(ref()(0,0));				\
 | 
			
		||||
  U_10=coalescedRead(ref()(1,0));					\
 | 
			
		||||
  U_20=coalescedRead(ref()(2,0));					\
 | 
			
		||||
  U_01=coalescedRead(ref()(0,1));					\
 | 
			
		||||
  U_11=coalescedRead(ref()(1,1));					\
 | 
			
		||||
  U_21=coalescedRead(ref()(2,1));					\
 | 
			
		||||
    UChi_00 = U_00*Chi_00;					\
 | 
			
		||||
    UChi_10 = U_00*Chi_10;					\
 | 
			
		||||
    UChi_01 = U_10*Chi_00;					\
 | 
			
		||||
    UChi_11 = U_10*Chi_10;					\
 | 
			
		||||
    UChi_02 = U_20*Chi_00;					\
 | 
			
		||||
    UChi_12 = U_20*Chi_10;					\
 | 
			
		||||
    UChi_00+= U_01*Chi_01;					\
 | 
			
		||||
    UChi_10+= U_01*Chi_11;					\
 | 
			
		||||
    UChi_01+= U_11*Chi_01;					\
 | 
			
		||||
    UChi_11+= U_11*Chi_11;					\
 | 
			
		||||
    UChi_02+= U_21*Chi_01;					\
 | 
			
		||||
    UChi_12+= U_21*Chi_11;					\
 | 
			
		||||
    U_00=coalescedRead(ref()(0,2));				\
 | 
			
		||||
    U_10=coalescedRead(ref()(1,2));				\
 | 
			
		||||
    U_20=coalescedRead(ref()(2,2));				\
 | 
			
		||||
    UChi_00+= U_00*Chi_02;					\
 | 
			
		||||
    UChi_10+= U_00*Chi_12;					\
 | 
			
		||||
    UChi_01+= U_10*Chi_02;					\
 | 
			
		||||
    UChi_11+= U_10*Chi_12;					\
 | 
			
		||||
    UChi_02+= U_20*Chi_02;					\
 | 
			
		||||
    UChi_12+= U_20*Chi_12;}
 | 
			
		||||
 | 
			
		||||
#define LOAD_CHI				\
 | 
			
		||||
  {const SiteHalfSpinor &ref(buf[offset]);	\
 | 
			
		||||
    Chi_00 = coalescedRead(ref()(0)(0));	\
 | 
			
		||||
    Chi_01 = coalescedRead(ref()(0)(1));	\
 | 
			
		||||
    Chi_02 = coalescedRead(ref()(0)(2));	\
 | 
			
		||||
    Chi_10 = coalescedRead(ref()(1)(0));	\
 | 
			
		||||
    Chi_11 = coalescedRead(ref()(1)(1));	\
 | 
			
		||||
    Chi_12 = coalescedRead(ref()(1)(2));}
 | 
			
		||||
 | 
			
		||||
//      hspin(0)=fspin(0)+timesI(fspin(3));
 | 
			
		||||
//      hspin(1)=fspin(1)+timesI(fspin(2));
 | 
			
		||||
#define XP_PROJ \
 | 
			
		||||
    Chi_00 = Chimu_00+timesI(Chimu_30);\
 | 
			
		||||
    Chi_01 = Chimu_01+timesI(Chimu_31);\
 | 
			
		||||
    Chi_02 = Chimu_02+timesI(Chimu_32);\
 | 
			
		||||
    Chi_10 = Chimu_10+timesI(Chimu_20);\
 | 
			
		||||
    Chi_11 = Chimu_11+timesI(Chimu_21);\
 | 
			
		||||
    Chi_12 = Chimu_12+timesI(Chimu_22);
 | 
			
		||||
 | 
			
		||||
#define YP_PROJ \
 | 
			
		||||
    Chi_00 = Chimu_00-Chimu_30;\
 | 
			
		||||
    Chi_01 = Chimu_01-Chimu_31;\
 | 
			
		||||
    Chi_02 = Chimu_02-Chimu_32;\
 | 
			
		||||
    Chi_10 = Chimu_10+Chimu_20;\
 | 
			
		||||
    Chi_11 = Chimu_11+Chimu_21;\
 | 
			
		||||
    Chi_12 = Chimu_12+Chimu_22;
 | 
			
		||||
 | 
			
		||||
#define ZP_PROJ \
 | 
			
		||||
  Chi_00 = Chimu_00+timesI(Chimu_20);		\
 | 
			
		||||
  Chi_01 = Chimu_01+timesI(Chimu_21);		\
 | 
			
		||||
  Chi_02 = Chimu_02+timesI(Chimu_22);		\
 | 
			
		||||
  Chi_10 = Chimu_10-timesI(Chimu_30);		\
 | 
			
		||||
  Chi_11 = Chimu_11-timesI(Chimu_31);		\
 | 
			
		||||
  Chi_12 = Chimu_12-timesI(Chimu_32);
 | 
			
		||||
 | 
			
		||||
#define TP_PROJ \
 | 
			
		||||
  Chi_00 = Chimu_00+Chimu_20;		\
 | 
			
		||||
  Chi_01 = Chimu_01+Chimu_21;		\
 | 
			
		||||
  Chi_02 = Chimu_02+Chimu_22;		\
 | 
			
		||||
  Chi_10 = Chimu_10+Chimu_30;		\
 | 
			
		||||
  Chi_11 = Chimu_11+Chimu_31;		\
 | 
			
		||||
  Chi_12 = Chimu_12+Chimu_32;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
//      hspin(0)=fspin(0)-timesI(fspin(3));
 | 
			
		||||
//      hspin(1)=fspin(1)-timesI(fspin(2));
 | 
			
		||||
#define XM_PROJ \
 | 
			
		||||
    Chi_00 = Chimu_00-timesI(Chimu_30);\
 | 
			
		||||
    Chi_01 = Chimu_01-timesI(Chimu_31);\
 | 
			
		||||
    Chi_02 = Chimu_02-timesI(Chimu_32);\
 | 
			
		||||
    Chi_10 = Chimu_10-timesI(Chimu_20);\
 | 
			
		||||
    Chi_11 = Chimu_11-timesI(Chimu_21);\
 | 
			
		||||
    Chi_12 = Chimu_12-timesI(Chimu_22);
 | 
			
		||||
 | 
			
		||||
#define YM_PROJ \
 | 
			
		||||
    Chi_00 = Chimu_00+Chimu_30;\
 | 
			
		||||
    Chi_01 = Chimu_01+Chimu_31;\
 | 
			
		||||
    Chi_02 = Chimu_02+Chimu_32;\
 | 
			
		||||
    Chi_10 = Chimu_10-Chimu_20;\
 | 
			
		||||
    Chi_11 = Chimu_11-Chimu_21;\
 | 
			
		||||
    Chi_12 = Chimu_12-Chimu_22;
 | 
			
		||||
 | 
			
		||||
#define ZM_PROJ \
 | 
			
		||||
  Chi_00 = Chimu_00-timesI(Chimu_20);		\
 | 
			
		||||
  Chi_01 = Chimu_01-timesI(Chimu_21);		\
 | 
			
		||||
  Chi_02 = Chimu_02-timesI(Chimu_22);		\
 | 
			
		||||
  Chi_10 = Chimu_10+timesI(Chimu_30);		\
 | 
			
		||||
  Chi_11 = Chimu_11+timesI(Chimu_31);		\
 | 
			
		||||
  Chi_12 = Chimu_12+timesI(Chimu_32);
 | 
			
		||||
 | 
			
		||||
#define TM_PROJ \
 | 
			
		||||
  Chi_00 = Chimu_00-Chimu_20;		\
 | 
			
		||||
  Chi_01 = Chimu_01-Chimu_21;		\
 | 
			
		||||
  Chi_02 = Chimu_02-Chimu_22;		\
 | 
			
		||||
  Chi_10 = Chimu_10-Chimu_30;		\
 | 
			
		||||
  Chi_11 = Chimu_11-Chimu_31;		\
 | 
			
		||||
  Chi_12 = Chimu_12-Chimu_32;
 | 
			
		||||
 | 
			
		||||
//      fspin(0)=hspin(0);
 | 
			
		||||
//      fspin(1)=hspin(1);
 | 
			
		||||
//      fspin(2)=timesMinusI(hspin(1));
 | 
			
		||||
//      fspin(3)=timesMinusI(hspin(0));
 | 
			
		||||
#define XP_RECON\
 | 
			
		||||
  result_00 = UChi_00;\
 | 
			
		||||
  result_01 = UChi_01;\
 | 
			
		||||
  result_02 = UChi_02;\
 | 
			
		||||
  result_10 = UChi_10;\
 | 
			
		||||
  result_11 = UChi_11;\
 | 
			
		||||
  result_12 = UChi_12;\
 | 
			
		||||
  result_20 = timesMinusI(UChi_10);\
 | 
			
		||||
  result_21 = timesMinusI(UChi_11);\
 | 
			
		||||
  result_22 = timesMinusI(UChi_12);\
 | 
			
		||||
  result_30 = timesMinusI(UChi_00);\
 | 
			
		||||
  result_31 = timesMinusI(UChi_01);\
 | 
			
		||||
  result_32 = timesMinusI(UChi_02);
 | 
			
		||||
 | 
			
		||||
#define XP_RECON_ACCUM\
 | 
			
		||||
  result_00+=UChi_00;\
 | 
			
		||||
  result_01+=UChi_01;\
 | 
			
		||||
  result_02+=UChi_02;\
 | 
			
		||||
  result_10+=UChi_10;\
 | 
			
		||||
  result_11+=UChi_11;\
 | 
			
		||||
  result_12+=UChi_12;\
 | 
			
		||||
  result_20-=timesI(UChi_10);\
 | 
			
		||||
  result_21-=timesI(UChi_11);\
 | 
			
		||||
  result_22-=timesI(UChi_12);\
 | 
			
		||||
  result_30-=timesI(UChi_00);\
 | 
			
		||||
  result_31-=timesI(UChi_01);\
 | 
			
		||||
  result_32-=timesI(UChi_02);
 | 
			
		||||
 | 
			
		||||
#define XM_RECON\
 | 
			
		||||
  result_00 = UChi_00;\
 | 
			
		||||
  result_01 = UChi_01;\
 | 
			
		||||
  result_02 = UChi_02;\
 | 
			
		||||
  result_10 = UChi_10;\
 | 
			
		||||
  result_11 = UChi_11;\
 | 
			
		||||
  result_12 = UChi_12;\
 | 
			
		||||
  result_20 = timesI(UChi_10);\
 | 
			
		||||
  result_21 = timesI(UChi_11);\
 | 
			
		||||
  result_22 = timesI(UChi_12);\
 | 
			
		||||
  result_30 = timesI(UChi_00);\
 | 
			
		||||
  result_31 = timesI(UChi_01);\
 | 
			
		||||
  result_32 = timesI(UChi_02);
 | 
			
		||||
 | 
			
		||||
#define XM_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20+= timesI(UChi_10);\
 | 
			
		||||
  result_21+= timesI(UChi_11);\
 | 
			
		||||
  result_22+= timesI(UChi_12);\
 | 
			
		||||
  result_30+= timesI(UChi_00);\
 | 
			
		||||
  result_31+= timesI(UChi_01);\
 | 
			
		||||
  result_32+= timesI(UChi_02);
 | 
			
		||||
 | 
			
		||||
#define YP_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20+= UChi_10;\
 | 
			
		||||
  result_21+= UChi_11;\
 | 
			
		||||
  result_22+= UChi_12;\
 | 
			
		||||
  result_30-= UChi_00;\
 | 
			
		||||
  result_31-= UChi_01;\
 | 
			
		||||
  result_32-= UChi_02;
 | 
			
		||||
 | 
			
		||||
#define YM_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20-= UChi_10;\
 | 
			
		||||
  result_21-= UChi_11;\
 | 
			
		||||
  result_22-= UChi_12;\
 | 
			
		||||
  result_30+= UChi_00;\
 | 
			
		||||
  result_31+= UChi_01;\
 | 
			
		||||
  result_32+= UChi_02;
 | 
			
		||||
 | 
			
		||||
#define ZP_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20-= timesI(UChi_00);			\
 | 
			
		||||
  result_21-= timesI(UChi_01);			\
 | 
			
		||||
  result_22-= timesI(UChi_02);			\
 | 
			
		||||
  result_30+= timesI(UChi_10);			\
 | 
			
		||||
  result_31+= timesI(UChi_11);			\
 | 
			
		||||
  result_32+= timesI(UChi_12);
 | 
			
		||||
 | 
			
		||||
#define ZM_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20+= timesI(UChi_00);			\
 | 
			
		||||
  result_21+= timesI(UChi_01);			\
 | 
			
		||||
  result_22+= timesI(UChi_02);			\
 | 
			
		||||
  result_30-= timesI(UChi_10);			\
 | 
			
		||||
  result_31-= timesI(UChi_11);			\
 | 
			
		||||
  result_32-= timesI(UChi_12);
 | 
			
		||||
 | 
			
		||||
#define TP_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20+= UChi_00;			\
 | 
			
		||||
  result_21+= UChi_01;			\
 | 
			
		||||
  result_22+= UChi_02;			\
 | 
			
		||||
  result_30+= UChi_10;			\
 | 
			
		||||
  result_31+= UChi_11;			\
 | 
			
		||||
  result_32+= UChi_12;
 | 
			
		||||
 | 
			
		||||
#define TM_RECON_ACCUM\
 | 
			
		||||
  result_00+= UChi_00;\
 | 
			
		||||
  result_01+= UChi_01;\
 | 
			
		||||
  result_02+= UChi_02;\
 | 
			
		||||
  result_10+= UChi_10;\
 | 
			
		||||
  result_11+= UChi_11;\
 | 
			
		||||
  result_12+= UChi_12;\
 | 
			
		||||
  result_20-= UChi_00;	\
 | 
			
		||||
  result_21-= UChi_01;	\
 | 
			
		||||
  result_22-= UChi_02;	\
 | 
			
		||||
  result_30-= UChi_10;	\
 | 
			
		||||
  result_31-= UChi_11;	\
 | 
			
		||||
  result_32-= UChi_12;
 | 
			
		||||
 | 
			
		||||
#define HAND_STENCIL_LEGA(PROJ,PERM,DIR,RECON)	\
 | 
			
		||||
  SE=&st_p[DIR+8*ss];			\
 | 
			
		||||
  ptype=st_perm[DIR];			\
 | 
			
		||||
  offset = SE->_offset;				\
 | 
			
		||||
  local  = SE->_is_local;			\
 | 
			
		||||
  perm   = SE->_permute;			\
 | 
			
		||||
  if ( local ) {				\
 | 
			
		||||
    LOAD_CHIMU(PERM);				\
 | 
			
		||||
    PROJ;					\
 | 
			
		||||
    if ( perm) {				\
 | 
			
		||||
      PERMUTE_DIR(PERM);			\
 | 
			
		||||
    }						\
 | 
			
		||||
  } else {					\
 | 
			
		||||
    LOAD_CHI;					\
 | 
			
		||||
  }						\
 | 
			
		||||
  MULT_2SPIN(DIR);				\
 | 
			
		||||
  RECON;					
 | 
			
		||||
 | 
			
		||||
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON)	\
 | 
			
		||||
  SE=&st_p[DIR+8*ss];			\
 | 
			
		||||
  ptype=st_perm[DIR];			\
 | 
			
		||||
  offset = SE->_offset;				\
 | 
			
		||||
  local  = SE->_is_local;			\
 | 
			
		||||
  perm   = SE->_permute;			\
 | 
			
		||||
  LOAD_CHIMU(PERM);				\
 | 
			
		||||
  PROJ;						\
 | 
			
		||||
  MULT_2SPIN(DIR);				\
 | 
			
		||||
  RECON;					
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON)	\
 | 
			
		||||
  SE=&st_p[DIR+8*ss];					\
 | 
			
		||||
  ptype=st_perm[DIR];					\
 | 
			
		||||
  offset = SE->_offset;				\
 | 
			
		||||
  local  = SE->_is_local;			\
 | 
			
		||||
  perm   = SE->_permute;			\
 | 
			
		||||
  if ( local ) {				\
 | 
			
		||||
    LOAD_CHIMU;					\
 | 
			
		||||
    PROJ;					\
 | 
			
		||||
    if ( perm) {				\
 | 
			
		||||
      PERMUTE_DIR(PERM);			\
 | 
			
		||||
    }						\
 | 
			
		||||
  } else if ( st.same_node[DIR] ) {		\
 | 
			
		||||
    LOAD_CHI;					\
 | 
			
		||||
  }						\
 | 
			
		||||
  if (local || st.same_node[DIR] ) {		\
 | 
			
		||||
    MULT_2SPIN(DIR);				\
 | 
			
		||||
    RECON;					\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON)	\
 | 
			
		||||
  SE=st.GetEntry(ptype,DIR,ss);			\
 | 
			
		||||
  offset = SE->_offset;				\
 | 
			
		||||
  if((!SE->_is_local)&&(!st.same_node[DIR]) ) {	\
 | 
			
		||||
    LOAD_CHI;					\
 | 
			
		||||
    MULT_2SPIN(DIR);				\
 | 
			
		||||
    RECON;					\
 | 
			
		||||
    nmu++;					\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define HAND_RESULT(ss)				\
 | 
			
		||||
  {						\
 | 
			
		||||
    SiteSpinor & ref (out[ss]);			\
 | 
			
		||||
    coalescedWrite(ref()(0)(0),result_00);		\
 | 
			
		||||
    coalescedWrite(ref()(0)(1),result_01);		\
 | 
			
		||||
    coalescedWrite(ref()(0)(2),result_02);		\
 | 
			
		||||
    coalescedWrite(ref()(1)(0),result_10);		\
 | 
			
		||||
    coalescedWrite(ref()(1)(1),result_11);		\
 | 
			
		||||
    coalescedWrite(ref()(1)(2),result_12);		\
 | 
			
		||||
    coalescedWrite(ref()(2)(0),result_20);		\
 | 
			
		||||
    coalescedWrite(ref()(2)(1),result_21);		\
 | 
			
		||||
    coalescedWrite(ref()(2)(2),result_22);		\
 | 
			
		||||
    coalescedWrite(ref()(3)(0),result_30);		\
 | 
			
		||||
    coalescedWrite(ref()(3)(1),result_31);		\
 | 
			
		||||
    coalescedWrite(ref()(3)(2),result_32);		\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define HAND_RESULT_EXT(ss)			\
 | 
			
		||||
  if (nmu){					\
 | 
			
		||||
    SiteSpinor & ref (out[ss]);		\
 | 
			
		||||
    ref()(0)(0)+=result_00;		\
 | 
			
		||||
    ref()(0)(1)+=result_01;		\
 | 
			
		||||
    ref()(0)(2)+=result_02;		\
 | 
			
		||||
    ref()(1)(0)+=result_10;		\
 | 
			
		||||
    ref()(1)(1)+=result_11;		\
 | 
			
		||||
    ref()(1)(2)+=result_12;		\
 | 
			
		||||
    ref()(2)(0)+=result_20;		\
 | 
			
		||||
    ref()(2)(1)+=result_21;		\
 | 
			
		||||
    ref()(2)(2)+=result_22;		\
 | 
			
		||||
    ref()(3)(0)+=result_30;		\
 | 
			
		||||
    ref()(3)(1)+=result_31;		\
 | 
			
		||||
    ref()(3)(2)+=result_32;		\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#define HAND_DECLARATIONS(Simd)			\
 | 
			
		||||
  Simd result_00;				\
 | 
			
		||||
  Simd result_01;				\
 | 
			
		||||
  Simd result_02;				\
 | 
			
		||||
  Simd result_10;				\
 | 
			
		||||
  Simd result_11;				\
 | 
			
		||||
  Simd result_12;				\
 | 
			
		||||
  Simd result_20;				\
 | 
			
		||||
  Simd result_21;				\
 | 
			
		||||
  Simd result_22;				\
 | 
			
		||||
  Simd result_30;				\
 | 
			
		||||
  Simd result_31;				\
 | 
			
		||||
  Simd result_32;				\
 | 
			
		||||
  Simd Chi_00;					\
 | 
			
		||||
  Simd Chi_01;					\
 | 
			
		||||
  Simd Chi_02;					\
 | 
			
		||||
  Simd Chi_10;					\
 | 
			
		||||
  Simd Chi_11;					\
 | 
			
		||||
  Simd Chi_12;					\
 | 
			
		||||
  Simd UChi_00;					\
 | 
			
		||||
  Simd UChi_01;					\
 | 
			
		||||
  Simd UChi_02;					\
 | 
			
		||||
  Simd UChi_10;					\
 | 
			
		||||
  Simd UChi_11;					\
 | 
			
		||||
  Simd UChi_12;					\
 | 
			
		||||
  Simd U_00;					\
 | 
			
		||||
  Simd U_10;					\
 | 
			
		||||
  Simd U_20;					\
 | 
			
		||||
  Simd U_01;					\
 | 
			
		||||
  Simd U_11;					\
 | 
			
		||||
  Simd U_21;
 | 
			
		||||
 | 
			
		||||
#define ZERO_RESULT				\
 | 
			
		||||
  result_00=Zero();				\
 | 
			
		||||
  result_01=Zero();				\
 | 
			
		||||
  result_02=Zero();				\
 | 
			
		||||
  result_10=Zero();				\
 | 
			
		||||
  result_11=Zero();				\
 | 
			
		||||
  result_12=Zero();				\
 | 
			
		||||
  result_20=Zero();				\
 | 
			
		||||
  result_21=Zero();				\
 | 
			
		||||
  result_22=Zero();				\
 | 
			
		||||
  result_30=Zero();				\
 | 
			
		||||
  result_31=Zero();				\
 | 
			
		||||
  result_32=Zero();			
 | 
			
		||||
 | 
			
		||||
#define Chimu_00 Chi_00
 | 
			
		||||
#define Chimu_01 Chi_01
 | 
			
		||||
#define Chimu_02 Chi_02
 | 
			
		||||
#define Chimu_10 Chi_10
 | 
			
		||||
#define Chimu_11 Chi_11
 | 
			
		||||
#define Chimu_12 Chi_12
 | 
			
		||||
#define Chimu_20 UChi_00
 | 
			
		||||
#define Chimu_21 UChi_01
 | 
			
		||||
#define Chimu_22 UChi_02
 | 
			
		||||
#define Chimu_30 UChi_10
 | 
			
		||||
#define Chimu_31 UChi_11
 | 
			
		||||
#define Chimu_32 UChi_12
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
template<class Impl> accelerator_inline void 
 | 
			
		||||
WilsonKernels<Impl>::HandDhopSiteSycl(StencilVector st_perm,StencilEntry *st_p, SiteDoubledGaugeField *U,SiteHalfSpinor  *buf,
 | 
			
		||||
				      int ss,int sU,const SiteSpinor *in, SiteSpinor *out)
 | 
			
		||||
{
 | 
			
		||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
  typedef iSinglet<Simd> vCplx;
 | 
			
		||||
  //  typedef decltype( coalescedRead( vCplx()()() )) Simt;
 | 
			
		||||
  typedef decltype( coalescedRead( in[0]()(0)(0) )) Simt;
 | 
			
		||||
 | 
			
		||||
  HAND_DECLARATIONS(Simt);
 | 
			
		||||
 | 
			
		||||
  int offset,local,perm, ptype;
 | 
			
		||||
  StencilEntry *SE;
 | 
			
		||||
  HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
 | 
			
		||||
  HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(TM_PROJ,0,Tp,TM_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(XP_PROJ,3,Xm,XP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(YP_PROJ,2,Ym,YP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
 | 
			
		||||
  HAND_STENCIL_LEG(TP_PROJ,0,Tm,TP_RECON_ACCUM);
 | 
			
		||||
  HAND_RESULT(ss);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////// Wilson ; uses this implementation /////////////////////
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
#undef LOAD_CHIMU  
 | 
			
		||||
#undef LOAD_CHI 
 | 
			
		||||
#undef MULT_2SPIN
 | 
			
		||||
#undef PERMUTE_DIR
 | 
			
		||||
#undef XP_PROJ  
 | 
			
		||||
#undef YP_PROJ  
 | 
			
		||||
#undef ZP_PROJ  
 | 
			
		||||
#undef TP_PROJ  
 | 
			
		||||
#undef XM_PROJ  
 | 
			
		||||
#undef YM_PROJ  
 | 
			
		||||
#undef ZM_PROJ  
 | 
			
		||||
#undef TM_PROJ  
 | 
			
		||||
#undef XP_RECON 
 | 
			
		||||
#undef XP_RECON_ACCUM 
 | 
			
		||||
#undef XM_RECON 
 | 
			
		||||
#undef XM_RECON_ACCUM 
 | 
			
		||||
#undef YP_RECON_ACCUM 
 | 
			
		||||
#undef YM_RECON_ACCUM 
 | 
			
		||||
#undef ZP_RECON_ACCUM 
 | 
			
		||||
#undef ZM_RECON_ACCUM 
 | 
			
		||||
#undef TP_RECON_ACCUM 
 | 
			
		||||
#undef TM_RECON_ACCUM 
 | 
			
		||||
#undef ZERO_RESULT				 
 | 
			
		||||
#undef Chimu_00
 | 
			
		||||
#undef Chimu_01
 | 
			
		||||
#undef Chimu_02
 | 
			
		||||
#undef Chimu_10
 | 
			
		||||
#undef Chimu_11
 | 
			
		||||
#undef Chimu_12
 | 
			
		||||
#undef Chimu_20
 | 
			
		||||
#undef Chimu_21
 | 
			
		||||
#undef Chimu_22
 | 
			
		||||
#undef Chimu_30
 | 
			
		||||
#undef Chimu_31
 | 
			
		||||
#undef Chimu_32
 | 
			
		||||
#undef HAND_STENCIL_LEG
 | 
			
		||||
#undef HAND_STENCIL_LEG_INT
 | 
			
		||||
#undef HAND_STENCIL_LEG_EXT
 | 
			
		||||
#undef HAND_RESULT
 | 
			
		||||
#undef HAND_RESULT_INT
 | 
			
		||||
#undef HAND_RESULT_EXT
 | 
			
		||||
#undef HAND_DECLARATIONS
 | 
			
		||||
@@ -115,9 +115,9 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip)
 | 
			
		||||
  // All legs kernels ; comms then compute
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U,
 | 
			
		||||
					     SiteHalfSpinor *buf, int sF,
 | 
			
		||||
					     int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U,
 | 
			
		||||
 					     SiteHalfSpinor *buf, int sF,
 | 
			
		||||
					     int sU, const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0]))   calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0])) calcSpinor;
 | 
			
		||||
@@ -141,9 +141,9 @@ void WilsonKernels<Impl>::GenericDhopSiteDag(StencilView &st, DoubledGaugeFieldV
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSite(StencilView &st, DoubledGaugeFieldView &U,
 | 
			
		||||
					  SiteHalfSpinor *buf, int sF,
 | 
			
		||||
					  int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,
 | 
			
		||||
 					  SiteHalfSpinor *buf, int sF,
 | 
			
		||||
					  int sU, const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;
 | 
			
		||||
@@ -170,9 +170,9 @@ void WilsonKernels<Impl>::GenericDhopSite(StencilView &st, DoubledGaugeFieldView
 | 
			
		||||
  // Interior kernels
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteDagInt(StencilView &st,  DoubledGaugeFieldView &U,
 | 
			
		||||
						SiteHalfSpinor *buf, int sF,
 | 
			
		||||
						int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U,
 | 
			
		||||
 						SiteHalfSpinor *buf, int sF,
 | 
			
		||||
						int sU, const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;
 | 
			
		||||
@@ -198,9 +198,9 @@ void WilsonKernels<Impl>::GenericDhopSiteDagInt(StencilView &st,  DoubledGaugeFi
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteInt(StencilView &st,  DoubledGaugeFieldView &U,
 | 
			
		||||
							 SiteHalfSpinor *buf, int sF,
 | 
			
		||||
							 int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U,
 | 
			
		||||
					     SiteHalfSpinor *buf, int sF,
 | 
			
		||||
					     int sU, const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;
 | 
			
		||||
@@ -228,9 +228,9 @@ void WilsonKernels<Impl>::GenericDhopSiteInt(StencilView &st,  DoubledGaugeField
 | 
			
		||||
// Exterior kernels
 | 
			
		||||
////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteDagExt(StencilView &st,  DoubledGaugeFieldView &U,
 | 
			
		||||
						SiteHalfSpinor *buf, int sF,
 | 
			
		||||
						int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U,
 | 
			
		||||
 						SiteHalfSpinor *buf, int sF,
 | 
			
		||||
						int sU, const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;
 | 
			
		||||
@@ -259,9 +259,9 @@ void WilsonKernels<Impl>::GenericDhopSiteDagExt(StencilView &st,  DoubledGaugeFi
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st,  DoubledGaugeFieldView &U,
 | 
			
		||||
					     SiteHalfSpinor *buf, int sF,
 | 
			
		||||
					     int sU, const FermionFieldView &in, FermionFieldView &out)
 | 
			
		||||
void WilsonKernels<Impl>::GenericDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U,
 | 
			
		||||
 					     SiteHalfSpinor *buf, int sF,
 | 
			
		||||
					     int sU, const FermionFieldView &in, const FermionFieldView &out)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;
 | 
			
		||||
@@ -291,8 +291,8 @@ void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st,  DoubledGaugeField
 | 
			
		||||
 | 
			
		||||
#define DhopDirMacro(Dir,spProj,spRecon)	\
 | 
			
		||||
  template <class Impl> accelerator_inline				\
 | 
			
		||||
  void WilsonKernels<Impl>::DhopDir##Dir(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF, \
 | 
			
		||||
					 int sU, const FermionFieldView &in, FermionFieldView &out, int dir) \
 | 
			
		||||
  void WilsonKernels<Impl>::DhopDir##Dir(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF, \
 | 
			
		||||
					 int sU, const FermionFieldView &in, const FermionFieldView &out, int dir) \
 | 
			
		||||
  {									\
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;		\
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;			\
 | 
			
		||||
@@ -302,8 +302,8 @@ void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st,  DoubledGaugeField
 | 
			
		||||
  StencilEntry *SE;							\
 | 
			
		||||
  int ptype;								\
 | 
			
		||||
  const int Nsimd = SiteHalfSpinor::Nsimd();				\
 | 
			
		||||
  const int lane=acceleratorSIMTlane(Nsimd);					\
 | 
			
		||||
									\
 | 
			
		||||
  const int lane=acceleratorSIMTlane(Nsimd);				\
 | 
			
		||||
  									\
 | 
			
		||||
  SE = st.GetEntry(ptype, dir, sF);					\
 | 
			
		||||
  GENERIC_DHOPDIR_LEG_BODY(Dir,spProj,spRecon);				\
 | 
			
		||||
  coalescedWrite(out[sF], result,lane);					\
 | 
			
		||||
@@ -319,8 +319,8 @@ DhopDirMacro(Zm,spProjZm,spReconZm);
 | 
			
		||||
DhopDirMacro(Tm,spProjTm,spReconTm);
 | 
			
		||||
 | 
			
		||||
template <class Impl> accelerator_inline
 | 
			
		||||
void WilsonKernels<Impl>::DhopDirK( StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF,
 | 
			
		||||
				    int sU, const FermionFieldView &in, FermionFieldView &out, int dir, int gamma)
 | 
			
		||||
void WilsonKernels<Impl>::DhopDirK(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF,
 | 
			
		||||
				   int sU, const FermionFieldView &in, const FermionFieldView &out, int dir, int gamma)
 | 
			
		||||
{
 | 
			
		||||
  typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;
 | 
			
		||||
  typedef decltype(coalescedRead(in[0]))  calcSpinor;
 | 
			
		||||
@@ -345,8 +345,8 @@ void WilsonKernels<Impl>::DhopDirK( StencilView &st, DoubledGaugeFieldView &U,Si
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonKernels<Impl>::DhopDirAll( StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int Ls,
 | 
			
		||||
				      int Nsite, const FermionField &in, std::vector<FermionField> &out)
 | 
			
		||||
void WilsonKernels<Impl>::DhopDirAll(StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int Ls,
 | 
			
		||||
				     int Nsite, const FermionField &in, std::vector<FermionField> &out)
 | 
			
		||||
{
 | 
			
		||||
   autoView(U_v  ,U,AcceleratorRead);
 | 
			
		||||
   autoView(in_v ,in,AcceleratorRead);
 | 
			
		||||
@@ -424,6 +424,20 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
 | 
			
		||||
      WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v);		\
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
#define KERNEL_CALL_TMP(A) \
 | 
			
		||||
  const uint64_t    NN = Nsite*Ls;					\
 | 
			
		||||
  auto U_p = & U_v[0];							\
 | 
			
		||||
  auto in_p = & in_v[0];						\
 | 
			
		||||
  auto out_p = & out_v[0];						\
 | 
			
		||||
  auto st_p = st_v._entries_p;						\
 | 
			
		||||
  auto st_perm = st_v._permute_type;					\
 | 
			
		||||
  accelerator_forNB( ss, NN, Simd::Nsimd(), {				\
 | 
			
		||||
      int sF = ss;							\
 | 
			
		||||
      int sU = ss/Ls;							\
 | 
			
		||||
      WilsonKernels<Impl>::A(st_perm,st_p,U_p,buf,sF,sU,in_p,out_p);	\
 | 
			
		||||
    });									\
 | 
			
		||||
  accelerator_barrier();
 | 
			
		||||
 | 
			
		||||
#define KERNEL_CALL(A) KERNEL_CALLNB(A); accelerator_barrier();
 | 
			
		||||
 | 
			
		||||
#define ASM_CALL(A)							\
 | 
			
		||||
@@ -446,7 +460,8 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st,  DoubledGaugeField
 | 
			
		||||
   if( interior && exterior ) {
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptGeneric    ) { KERNEL_CALL(GenericDhopSite); return;}
 | 
			
		||||
#ifndef GRID_CUDA
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite);    return;}
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL_TMP(HandDhopSiteSycl);    return; }
 | 
			
		||||
     //     if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite);    return;}
 | 
			
		||||
     if (Opt == WilsonKernelsStatic::OptInlineAsm  ) {  ASM_CALL(AsmDhopSite);    return;}
 | 
			
		||||
#endif
 | 
			
		||||
   } else if( interior ) {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -32,6 +32,7 @@ directory
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementationSycl.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
 
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015, 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AVX512
 | 
			
		||||
#ifndef QPX
 | 
			
		||||
#ifndef A64FX
 | 
			
		||||
#ifndef A64FXFIXEDSIZE
 | 
			
		||||
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
#include "impl.h"
 | 
			
		||||
template class WilsonKernels<IMPLEMENTATION>;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
../WilsonKernelsInstantiation.cc.master
 | 
			
		||||
							
								
								
									
										38
									
								
								Grid/qcd/action/gauge/Gauge.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								Grid/qcd/action/gauge/Gauge.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,38 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/gauge/Gauge.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
 | 
			
		||||
std::vector<int> ConjugateGaugeImplBase::_conjDirs;
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
@@ -154,6 +154,10 @@ public:
 | 
			
		||||
    return Hsum.real();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline void Project(Field &U) {
 | 
			
		||||
    ProjectSUn(U);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
 | 
			
		||||
    SU<Nc>::HotConfiguration(pRNG, U);
 | 
			
		||||
  }
 | 
			
		||||
 
 | 
			
		||||
@@ -59,14 +59,14 @@ public:
 | 
			
		||||
  }
 | 
			
		||||
  static inline GaugeLinkField
 | 
			
		||||
  CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) {
 | 
			
		||||
    return Cshift(adj(Link), mu, -1);
 | 
			
		||||
    return PeriodicBC::CovShiftIdentityBackward(Link, mu);
 | 
			
		||||
  }
 | 
			
		||||
  static inline GaugeLinkField
 | 
			
		||||
  CovShiftIdentityForward(const GaugeLinkField &Link, int mu) {
 | 
			
		||||
    return Link;
 | 
			
		||||
    return PeriodicBC::CovShiftIdentityForward(Link,mu);
 | 
			
		||||
  }
 | 
			
		||||
  static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu) {
 | 
			
		||||
    return Cshift(Link, mu, 1);
 | 
			
		||||
    return PeriodicBC::ShiftStaple(Link,mu);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline bool isPeriodicGaugeField(void) { return true; }
 | 
			
		||||
@@ -74,7 +74,13 @@ public:
 | 
			
		||||
 | 
			
		||||
// Composition with smeared link, bc's etc.. probably need multiple inheritance
 | 
			
		||||
// Variable precision "S" and variable Nc
 | 
			
		||||
template <class GimplTypes> class ConjugateGaugeImpl : public GimplTypes {
 | 
			
		||||
class ConjugateGaugeImplBase {
 | 
			
		||||
protected:
 | 
			
		||||
  static std::vector<int> _conjDirs;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
  template <class GimplTypes> class ConjugateGaugeImpl : public GimplTypes, ConjugateGaugeImplBase {
 | 
			
		||||
private:
 | 
			
		||||
public:
 | 
			
		||||
  INHERIT_GIMPL_TYPES(GimplTypes);
 | 
			
		||||
 | 
			
		||||
@@ -84,47 +90,56 @@ public:
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template <class covariant>
 | 
			
		||||
  static Lattice<covariant> CovShiftForward(const GaugeLinkField &Link, int mu,
 | 
			
		||||
                                            const Lattice<covariant> &field) {
 | 
			
		||||
    return ConjugateBC::CovShiftForward(Link, mu, field);
 | 
			
		||||
                                            const Lattice<covariant> &field)
 | 
			
		||||
  {
 | 
			
		||||
    assert(_conjDirs.size() == Nd);
 | 
			
		||||
    if(_conjDirs[mu]) 
 | 
			
		||||
      return ConjugateBC::CovShiftForward(Link, mu, field);
 | 
			
		||||
    else
 | 
			
		||||
      return PeriodicBC::CovShiftForward(Link, mu, field);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <class covariant>
 | 
			
		||||
  static Lattice<covariant> CovShiftBackward(const GaugeLinkField &Link, int mu,
 | 
			
		||||
                                             const Lattice<covariant> &field) {
 | 
			
		||||
    return ConjugateBC::CovShiftBackward(Link, mu, field);
 | 
			
		||||
                                             const Lattice<covariant> &field)
 | 
			
		||||
  {
 | 
			
		||||
    assert(_conjDirs.size() == Nd);
 | 
			
		||||
    if(_conjDirs[mu]) 
 | 
			
		||||
      return ConjugateBC::CovShiftBackward(Link, mu, field);
 | 
			
		||||
    else 
 | 
			
		||||
      return PeriodicBC::CovShiftBackward(Link, mu, field);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline GaugeLinkField
 | 
			
		||||
  CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) {
 | 
			
		||||
    GridBase *grid = Link.Grid();
 | 
			
		||||
    int Lmu = grid->GlobalDimensions()[mu] - 1;
 | 
			
		||||
 | 
			
		||||
    Lattice<iScalar<vInteger>> coor(grid);
 | 
			
		||||
    LatticeCoordinate(coor, mu);
 | 
			
		||||
 | 
			
		||||
    GaugeLinkField tmp(grid);
 | 
			
		||||
    tmp = adj(Link);
 | 
			
		||||
    tmp = where(coor == Lmu, conjugate(tmp), tmp);
 | 
			
		||||
    return Cshift(tmp, mu, -1); // moves towards positive mu
 | 
			
		||||
  CovShiftIdentityBackward(const GaugeLinkField &Link, int mu)
 | 
			
		||||
  {
 | 
			
		||||
    assert(_conjDirs.size() == Nd);
 | 
			
		||||
    if(_conjDirs[mu]) 
 | 
			
		||||
      return ConjugateBC::CovShiftIdentityBackward(Link, mu);
 | 
			
		||||
    else 
 | 
			
		||||
      return PeriodicBC::CovShiftIdentityBackward(Link, mu);
 | 
			
		||||
  }
 | 
			
		||||
  static inline GaugeLinkField
 | 
			
		||||
  CovShiftIdentityForward(const GaugeLinkField &Link, int mu) {
 | 
			
		||||
    return Link;
 | 
			
		||||
  CovShiftIdentityForward(const GaugeLinkField &Link, int mu)
 | 
			
		||||
  {
 | 
			
		||||
    assert(_conjDirs.size() == Nd);
 | 
			
		||||
    if(_conjDirs[mu]) 
 | 
			
		||||
      return ConjugateBC::CovShiftIdentityForward(Link,mu);
 | 
			
		||||
    else
 | 
			
		||||
      return PeriodicBC::CovShiftIdentityForward(Link,mu);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu) {
 | 
			
		||||
    GridBase *grid = Link.Grid();
 | 
			
		||||
    int Lmu = grid->GlobalDimensions()[mu] - 1;
 | 
			
		||||
 | 
			
		||||
    Lattice<iScalar<vInteger>> coor(grid);
 | 
			
		||||
    LatticeCoordinate(coor, mu);
 | 
			
		||||
 | 
			
		||||
    GaugeLinkField tmp(grid);
 | 
			
		||||
    tmp = Cshift(Link, mu, 1);
 | 
			
		||||
    tmp = where(coor == Lmu, conjugate(tmp), tmp);
 | 
			
		||||
    return tmp;
 | 
			
		||||
  static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu)
 | 
			
		||||
  {
 | 
			
		||||
    assert(_conjDirs.size() == Nd);
 | 
			
		||||
    if(_conjDirs[mu]) 
 | 
			
		||||
      return ConjugateBC::ShiftStaple(Link,mu);
 | 
			
		||||
    else     
 | 
			
		||||
      return PeriodicBC::ShiftStaple(Link,mu);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline void       setDirections(std::vector<int> &conjDirs) { _conjDirs=conjDirs; }
 | 
			
		||||
  static inline std::vector<int> getDirections(void) { return _conjDirs; }
 | 
			
		||||
  static inline bool isPeriodicGaugeField(void) { return false; }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -54,6 +54,10 @@ public:
 | 
			
		||||
  static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
 | 
			
		||||
    U = 1.0;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline void Project(Field &U) {
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
    
 | 
			
		||||
  static void MomentumSpacePropagator(Field &out, RealD m)
 | 
			
		||||
  {
 | 
			
		||||
@@ -234,6 +238,10 @@ public:
 | 
			
		||||
#endif //USE_FFT_ACCELERATION
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline void Project(Field &U) {
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
 | 
			
		||||
    Group::GaussianFundamentalLieAlgebraMatrix(pRNG, U);
 | 
			
		||||
  }
 | 
			
		||||
 
 | 
			
		||||
@@ -159,6 +159,13 @@ private:
 | 
			
		||||
      Resources.GetCheckPointer()->CheckpointRestore(Parameters.StartTrajectory, U,
 | 
			
		||||
						     Resources.GetSerialRNG(),
 | 
			
		||||
						     Resources.GetParallelRNG());
 | 
			
		||||
    } else {
 | 
			
		||||
      // others
 | 
			
		||||
      std::cout << GridLogError << "Unrecognized StartingType\n";
 | 
			
		||||
      std::cout
 | 
			
		||||
	<< GridLogError
 | 
			
		||||
	<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
 | 
			
		||||
      exit(1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    Smearing.set_Field(U);
 | 
			
		||||
 
 | 
			
		||||
@@ -95,7 +95,7 @@ private:
 | 
			
		||||
 | 
			
		||||
  typedef typename IntegratorType::Field Field;
 | 
			
		||||
  typedef std::vector< HmcObservable<Field> * > ObsListType;
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  //pass these from the resource manager
 | 
			
		||||
  GridSerialRNG &sRNG;   
 | 
			
		||||
  GridParallelRNG &pRNG; 
 | 
			
		||||
 
 | 
			
		||||
@@ -74,7 +74,7 @@ public:
 | 
			
		||||
      conf_file = os.str();
 | 
			
		||||
    }
 | 
			
		||||
  } 
 | 
			
		||||
 | 
			
		||||
  virtual ~BaseHmcCheckpointer(){};
 | 
			
		||||
  void check_filename(const std::string &filename){
 | 
			
		||||
    std::ifstream f(filename.c_str());
 | 
			
		||||
    if(!f.good()){
 | 
			
		||||
@@ -82,7 +82,6 @@ public:
 | 
			
		||||
      abort();
 | 
			
		||||
    };
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  virtual void initialize(const CheckpointerParameters &Params) = 0;
 | 
			
		||||
 | 
			
		||||
  virtual void CheckpointRestore(int traj, typename Impl::Field &U,
 | 
			
		||||
 
 | 
			
		||||
@@ -45,6 +45,7 @@ private:
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
  INHERIT_GIMPL_TYPES(Implementation);
 | 
			
		||||
  typedef GaugeStatistics<Implementation> GaugeStats;
 | 
			
		||||
 | 
			
		||||
  ILDGHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); }
 | 
			
		||||
 | 
			
		||||
@@ -78,7 +79,7 @@ public:
 | 
			
		||||
      BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
      IldgWriter _IldgWriter(grid->IsBoss());
 | 
			
		||||
      _IldgWriter.open(config);
 | 
			
		||||
      _IldgWriter.writeConfiguration(U, traj, config, config);
 | 
			
		||||
      _IldgWriter.writeConfiguration<GaugeStats>(U, traj, config, config);
 | 
			
		||||
      _IldgWriter.close();
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage << "Written ILDG Configuration on " << config
 | 
			
		||||
@@ -105,7 +106,7 @@ public:
 | 
			
		||||
    FieldMetaData header;
 | 
			
		||||
    IldgReader _IldgReader;
 | 
			
		||||
    _IldgReader.open(config);
 | 
			
		||||
    _IldgReader.readConfiguration(U,header);  // format from the header
 | 
			
		||||
    _IldgReader.readConfiguration<GaugeStats>(U,header);  // format from the header
 | 
			
		||||
    _IldgReader.close();
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << "Read ILDG Configuration from " << config
 | 
			
		||||
 
 | 
			
		||||
@@ -43,7 +43,8 @@ private:
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
  INHERIT_GIMPL_TYPES(Gimpl);  // only for gauge configurations
 | 
			
		||||
 | 
			
		||||
  typedef GaugeStatistics<Gimpl> GaugeStats;
 | 
			
		||||
  
 | 
			
		||||
  NerscHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); }
 | 
			
		||||
 | 
			
		||||
  void initialize(const CheckpointerParameters &Params_) {
 | 
			
		||||
@@ -60,7 +61,7 @@ public:
 | 
			
		||||
      int precision32 = 1;
 | 
			
		||||
      int tworow = 0;
 | 
			
		||||
      NerscIO::writeRNGState(sRNG, pRNG, rng);
 | 
			
		||||
      NerscIO::writeConfiguration(U, config, tworow, precision32);
 | 
			
		||||
      NerscIO::writeConfiguration<GaugeStats>(U, config, tworow, precision32);
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
@@ -74,7 +75,7 @@ public:
 | 
			
		||||
 | 
			
		||||
    FieldMetaData header;
 | 
			
		||||
    NerscIO::readRNGState(sRNG, pRNG, header, rng);
 | 
			
		||||
    NerscIO::readConfiguration(U, header, config);
 | 
			
		||||
    NerscIO::readConfiguration<GaugeStats>(U, header, config);
 | 
			
		||||
  };
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -313,6 +313,8 @@ public:
 | 
			
		||||
      std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    FieldImplementation::Project(U);
 | 
			
		||||
 | 
			
		||||
    // and that we indeed got to the end of the trajectory
 | 
			
		||||
    assert(fabs(t_U - Params.trajL) < 1.0e-6);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -99,7 +99,7 @@ public:
 | 
			
		||||
  virtual Prod* getPtr() = 0;
 | 
			
		||||
 | 
			
		||||
  // add a getReference? 
 | 
			
		||||
  
 | 
			
		||||
  virtual ~HMCModuleBase(){};
 | 
			
		||||
  virtual void print_parameters(){};  // default to nothing
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -128,7 +128,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProjTm (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  hspin(0)=fspin(0)-fspin(2);
 | 
			
		||||
  hspin(1)=fspin(1)-fspin(3);
 | 
			
		||||
}
 | 
			
		||||
@@ -138,40 +137,50 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
 *  0 0 -1  0
 | 
			
		||||
 *  0 0  0 -1
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5p (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  hspin(0)=fspin(0);
 | 
			
		||||
  hspin(1)=fspin(1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5m (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  hspin(0)=fspin(2);
 | 
			
		||||
  hspin(1)=fspin(3);
 | 
			
		||||
}
 | 
			
		||||
  
 | 
			
		||||
//  template<class vtype> accelerator_inline void fspProj5p (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5p (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  rfspin(0)=fspin(0);
 | 
			
		||||
  rfspin(1)=fspin(1);
 | 
			
		||||
  rfspin(2)=Zero();
 | 
			
		||||
  rfspin(3)=Zero();
 | 
			
		||||
}
 | 
			
		||||
//  template<class vtype> accelerator_inline void fspProj5m (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5m (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  rfspin(0)=Zero();
 | 
			
		||||
  rfspin(1)=Zero();
 | 
			
		||||
  rfspin(2)=fspin(2);
 | 
			
		||||
  rfspin(3)=fspin(3);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class vtype,int N,IfCoarsened<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &rfspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  const int hN = N>>1;
 | 
			
		||||
  for(int s=0;s<hN;s++){
 | 
			
		||||
    rfspin(s)=fspin(s);
 | 
			
		||||
    rfspin(s+hN)=Zero();
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,int N,IfCoarsened<iVector<vtype,N> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &rfspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  const int hN = N>>1;
 | 
			
		||||
  for(int s=0;s<hN;s++){
 | 
			
		||||
    rfspin(s)=Zero();
 | 
			
		||||
    rfspin(s+hN)=fspin(s+hN);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Reconstruction routines to move back again to four spin
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -183,7 +192,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
 */
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconXp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)=hspin(0);
 | 
			
		||||
  fspin(1)=hspin(1);
 | 
			
		||||
  fspin(2)=timesMinusI(hspin(1));
 | 
			
		||||
@@ -191,7 +199,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconXm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)=hspin(0);
 | 
			
		||||
  fspin(1)=hspin(1);
 | 
			
		||||
  fspin(2)=timesI(hspin(1));
 | 
			
		||||
@@ -199,7 +206,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconXp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)+=hspin(0);
 | 
			
		||||
  fspin(1)+=hspin(1);
 | 
			
		||||
  fspin(2)-=timesI(hspin(1));
 | 
			
		||||
@@ -207,7 +213,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconXm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)+=hspin(0);
 | 
			
		||||
  fspin(1)+=hspin(1);
 | 
			
		||||
  fspin(2)+=timesI(hspin(1));
 | 
			
		||||
@@ -221,7 +226,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
 | 
			
		||||
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconYp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)=hspin(0);
 | 
			
		||||
  fspin(1)=hspin(1);
 | 
			
		||||
  fspin(2)= hspin(1);
 | 
			
		||||
@@ -229,7 +233,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconYm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)=hspin(0);
 | 
			
		||||
  fspin(1)=hspin(1);
 | 
			
		||||
  fspin(2)=-hspin(1);
 | 
			
		||||
@@ -237,7 +240,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconYp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)+=hspin(0);
 | 
			
		||||
  fspin(1)+=hspin(1);
 | 
			
		||||
  fspin(2)+=hspin(1);
 | 
			
		||||
@@ -245,7 +247,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconYm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)+=hspin(0);
 | 
			
		||||
  fspin(1)+=hspin(1);
 | 
			
		||||
  fspin(2)-=hspin(1);
 | 
			
		||||
@@ -260,7 +261,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
 | 
			
		||||
 */
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconZp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)=hspin(0);
 | 
			
		||||
  fspin(1)=hspin(1);
 | 
			
		||||
  fspin(2)=timesMinusI(hspin(0));
 | 
			
		||||
@@ -268,7 +268,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconZm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)=hspin(0);
 | 
			
		||||
  fspin(1)=hspin(1);
 | 
			
		||||
  fspin(2)=     timesI(hspin(0));
 | 
			
		||||
@@ -276,7 +275,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconZp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)+=hspin(0);
 | 
			
		||||
  fspin(1)+=hspin(1);
 | 
			
		||||
  fspin(2)-=timesI(hspin(0));
 | 
			
		||||
@@ -284,7 +282,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconZm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)+=hspin(0);
 | 
			
		||||
  fspin(1)+=hspin(1);
 | 
			
		||||
  fspin(2)+=timesI(hspin(0));
 | 
			
		||||
@@ -298,7 +295,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
 | 
			
		||||
 */
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconTp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)=hspin(0);
 | 
			
		||||
  fspin(1)=hspin(1);
 | 
			
		||||
  fspin(2)=hspin(0);
 | 
			
		||||
@@ -306,7 +302,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconTm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)=hspin(0);
 | 
			
		||||
  fspin(1)=hspin(1);
 | 
			
		||||
  fspin(2)=-hspin(0);
 | 
			
		||||
@@ -314,7 +309,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconTp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)+=hspin(0);
 | 
			
		||||
  fspin(1)+=hspin(1);
 | 
			
		||||
  fspin(2)+=hspin(0);
 | 
			
		||||
@@ -322,7 +316,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconTm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)+=hspin(0);
 | 
			
		||||
  fspin(1)+=hspin(1);
 | 
			
		||||
  fspin(2)-=hspin(0);
 | 
			
		||||
@@ -336,7 +329,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
 | 
			
		||||
 */
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spRecon5p (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)=hspin(0)+hspin(0); // add is lower latency than mul
 | 
			
		||||
  fspin(1)=hspin(1)+hspin(1); // probably no measurable diffence though
 | 
			
		||||
  fspin(2)=Zero();
 | 
			
		||||
@@ -344,7 +336,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spRecon5m (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)=Zero();
 | 
			
		||||
  fspin(1)=Zero();
 | 
			
		||||
  fspin(2)=hspin(0)+hspin(0);
 | 
			
		||||
@@ -352,7 +343,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s
 | 
			
		||||
}
 | 
			
		||||
template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumRecon5p (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE;
 | 
			
		||||
  fspin(0)+=hspin(0)+hspin(0);
 | 
			
		||||
  fspin(1)+=hspin(1)+hspin(1);
 | 
			
		||||
}
 | 
			
		||||
@@ -372,7 +362,6 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a
 | 
			
		||||
//////////
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjXp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProjXp(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
@@ -426,26 +415,21 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconXp (iM
 | 
			
		||||
    }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
////////
 | 
			
		||||
// Xm
 | 
			
		||||
////////
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spProjXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spProjXm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProjXm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spProjXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spProjXm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -455,19 +439,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjXm (iMatri
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spReconXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spReconXm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spReconXm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spReconXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spReconXm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -476,45 +457,37 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconXm (iMatr
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void accumReconXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  accumReconXm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    accumReconXm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      accumReconXm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
////////
 | 
			
		||||
// Yp
 | 
			
		||||
////////
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spProjYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spProjYp(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProjYp(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spProjYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spProjYp(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -524,19 +497,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjYp (iMatri
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spReconYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spReconYp(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spReconYp(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spReconYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spReconYp(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -545,66 +515,55 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconYp (iMatr
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void accumReconYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  accumReconYp(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    accumReconYp(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      accumReconYp(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
////////
 | 
			
		||||
// Ym
 | 
			
		||||
////////
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spProjYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spProjYm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProjYm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spProjYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spProjYm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spReconYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spReconYm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,const iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spReconYm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spReconYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spReconYm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -613,19 +572,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconYm (iMatr
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void accumReconYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  accumReconYm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    accumReconYm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      accumReconYm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -638,66 +594,57 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconYm (iM
 | 
			
		||||
////////
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spProjZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spProjZp(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProjZp(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spProjZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spProjZp(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spReconZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spReconZp(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spReconZp(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spReconZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spReconZp(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void accumReconZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  accumReconZp(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    accumReconZp(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      accumReconZp(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -706,62 +653,53 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconZp (iM
 | 
			
		||||
////////
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spProjZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spProjZm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProjZm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spProjZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spProjZm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spReconZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spReconZm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spReconZm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spReconZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spReconZm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void accumReconZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  accumReconZm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    accumReconZm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      accumReconZm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -774,41 +712,35 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconZm (iM
 | 
			
		||||
////////
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spProjTp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spProjTp(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjTp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProjTp(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spProjTp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spProjTp(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spReconTp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spReconTp(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconTp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spReconTp(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spReconTp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spReconTp(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -817,44 +749,37 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconTp (iMatr
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void accumReconTp (iScalar<rtype> &hspin, iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  accumReconTp(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconTp (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    accumReconTp(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconTp (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      accumReconTp(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
////////
 | 
			
		||||
// Tm
 | 
			
		||||
////////
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spProjTm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spProjTm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjTm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProjTm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spProjTm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spProjTm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -864,19 +789,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjTm (iMatri
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spReconTm (iScalar<rtype> &hspin, const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spReconTm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconTm (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spReconTm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spReconTm (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spReconTm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -885,44 +807,37 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconTm (iMatr
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void accumReconTm (iScalar<rtype> &hspin, const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  accumReconTm(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconTm (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    accumReconTm(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void accumReconTm (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      accumReconTm(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
////////
 | 
			
		||||
// 5p
 | 
			
		||||
////////
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spProj5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
template<class rtype,class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spProj5p(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProj5p(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spProj5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spProj5p(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -931,19 +846,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spProj5p (iMatri
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spRecon5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spRecon5p(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spRecon5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spRecon5p(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spRecon5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spRecon5p(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -952,19 +864,16 @@ template<class rtype,class vtype,int N> accelerator_inline void spRecon5p (iMatr
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void accumRecon5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  accumRecon5p(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumRecon5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    accumRecon5p(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void accumRecon5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      accumRecon5p(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -972,24 +881,18 @@ template<class rtype,class vtype,int N> accelerator_inline void accumRecon5p (iM
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// four spinor projectors for chiral proj
 | 
			
		||||
//  template<class vtype> accelerator_inline void fspProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
template<class vtype> accelerator_inline void spProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
template<class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spProj5p(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
//  template<class vtype,int N> accelerator_inline void fspProj5p (iVector<vtype,N> &hspin,iVector<vtype,N> &fspin)
 | 
			
		||||
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProj5p(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
//  template<class vtype,int N> accelerator_inline void fspProj5p (iMatrix<vtype,N> &hspin,iMatrix<vtype,N> &fspin)
 | 
			
		||||
template<class vtype,int N> accelerator_inline void spProj5p (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
template<class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spProj5p(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -1001,17 +904,17 @@ template<class vtype,int N> accelerator_inline void spProj5p (iMatrix<vtype,N> &
 | 
			
		||||
// 5m
 | 
			
		||||
////////
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spProj5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
template<class rtype,class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  spProj5m(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<rtype,N> > = 0> accelerator_inline void spProj5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<rtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProj5m(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spProj5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
@@ -1021,40 +924,34 @@ template<class rtype,class vtype,int N> accelerator_inline void spProj5m (iMatri
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void spRecon5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spRecon5m(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spRecon5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spRecon5m(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void spRecon5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spRecon5m(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
    }}
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class rtype,class vtype> accelerator_inline void accumRecon5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  accumRecon5m(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumRecon5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    accumRecon5m(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class rtype,class vtype,int N> accelerator_inline void accumRecon5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      accumRecon5m(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
@@ -1063,24 +960,18 @@ template<class rtype,class vtype,int N> accelerator_inline void accumRecon5m (iM
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// four spinor projectors for chiral proj
 | 
			
		||||
//  template<class vtype> accelerator_inline void fspProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
template<class vtype> accelerator_inline void spProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
template<class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp;
 | 
			
		||||
  spProj5m(hspin._internal,fspin._internal);
 | 
			
		||||
}
 | 
			
		||||
//  template<class vtype,int N> accelerator_inline void fspProj5m (iVector<vtype,N> &hspin,iVector<vtype,N> &fspin)
 | 
			
		||||
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++) {
 | 
			
		||||
    spProj5m(hspin._internal[i],fspin._internal[i]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
//  template<class vtype,int N> accelerator_inline void fspProj5m (iMatrix<vtype,N> &hspin,iMatrix<vtype,N> &fspin)
 | 
			
		||||
template<class vtype,int N> accelerator_inline void spProj5m (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
template<class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin)
 | 
			
		||||
{
 | 
			
		||||
  //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp;
 | 
			
		||||
  for(int i=0;i<N;i++){ 
 | 
			
		||||
    for(int j=0;j<N;j++){
 | 
			
		||||
      spProj5m(hspin._internal[i][j],fspin._internal[i][j]);
 | 
			
		||||
 
 | 
			
		||||
@@ -53,6 +53,24 @@ namespace PeriodicBC {
 | 
			
		||||
    return Cshift(tmp,mu,-1);// moves towards positive mu
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class gauge> Lattice<gauge>
 | 
			
		||||
  CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu) 
 | 
			
		||||
  {
 | 
			
		||||
    return Cshift(adj(Link), mu, -1);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class gauge> Lattice<gauge>
 | 
			
		||||
  CovShiftIdentityForward(const Lattice<gauge> &Link, int mu)
 | 
			
		||||
  {
 | 
			
		||||
    return Link;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class gauge> Lattice<gauge>
 | 
			
		||||
  ShiftStaple(const Lattice<gauge> &Link, int mu)
 | 
			
		||||
  {
 | 
			
		||||
    return Cshift(Link, mu, 1);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
 | 
			
		||||
    auto  CovShiftForward(const Lattice<gauge> &Link, 
 | 
			
		||||
			  int mu,
 | 
			
		||||
@@ -70,6 +88,7 @@ namespace PeriodicBC {
 | 
			
		||||
    return CovShiftBackward(Link,mu,arg);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -139,6 +158,38 @@ namespace ConjugateBC {
 | 
			
		||||
    //    std::cout<<"Gparity::CovCshiftBackward mu="<<mu<<std::endl;
 | 
			
		||||
    return Cshift(tmp,mu,-1);// moves towards positive mu
 | 
			
		||||
  }
 | 
			
		||||
  template<class gauge> Lattice<gauge>
 | 
			
		||||
  CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu) {
 | 
			
		||||
    GridBase *grid = Link.Grid();
 | 
			
		||||
    int Lmu = grid->GlobalDimensions()[mu] - 1;
 | 
			
		||||
 | 
			
		||||
    Lattice<iScalar<vInteger>> coor(grid);
 | 
			
		||||
    LatticeCoordinate(coor, mu);
 | 
			
		||||
 | 
			
		||||
    Lattice<gauge> tmp(grid);
 | 
			
		||||
    tmp = adj(Link);
 | 
			
		||||
    tmp = where(coor == Lmu, conjugate(tmp), tmp);
 | 
			
		||||
    return Cshift(tmp, mu, -1); // moves towards positive mu
 | 
			
		||||
  }
 | 
			
		||||
  template<class gauge> Lattice<gauge>
 | 
			
		||||
  CovShiftIdentityForward(const Lattice<gauge> &Link, int mu) {
 | 
			
		||||
    return Link;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class gauge> Lattice<gauge>
 | 
			
		||||
  ShiftStaple(const Lattice<gauge> &Link, int mu)
 | 
			
		||||
  {
 | 
			
		||||
    GridBase *grid = Link.Grid();
 | 
			
		||||
    int Lmu = grid->GlobalDimensions()[mu] - 1;
 | 
			
		||||
 | 
			
		||||
    Lattice<iScalar<vInteger>> coor(grid);
 | 
			
		||||
    LatticeCoordinate(coor, mu);
 | 
			
		||||
 | 
			
		||||
    Lattice<gauge> tmp(grid);
 | 
			
		||||
    tmp = Cshift(Link, mu, 1);
 | 
			
		||||
    tmp = where(coor == Lmu, conjugate(tmp), tmp);
 | 
			
		||||
    return tmp;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr>
 | 
			
		||||
    auto  CovShiftForward(const Lattice<gauge> &Link, 
 | 
			
		||||
 
 | 
			
		||||
@@ -154,8 +154,8 @@ void axpby_ssp_pminus(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,co
 | 
			
		||||
  accelerator_for(sss,nloop,vobj::Nsimd(),{
 | 
			
		||||
    uint64_t ss = sss*Ls;
 | 
			
		||||
    decltype(coalescedRead(y_v[ss+sp])) tmp;
 | 
			
		||||
    spProj5m(tmp,y_v(ss+sp));
 | 
			
		||||
    tmp = a*x_v(ss+s)+b*tmp;
 | 
			
		||||
    spProj5m(tmp,y_v(ss+sp)); 
 | 
			
		||||
   tmp = a*x_v(ss+s)+b*tmp;
 | 
			
		||||
    coalescedWrite(z_v[ss+s],tmp);
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
@@ -188,7 +188,6 @@ void G5R5(Lattice<vobj> &z,const Lattice<vobj> &x)
 | 
			
		||||
  z.Checkerboard() = x.Checkerboard();
 | 
			
		||||
  conformable(x,z);
 | 
			
		||||
  int Ls = grid->_rdimensions[0];
 | 
			
		||||
  Gamma G5(Gamma::Algebra::Gamma5);
 | 
			
		||||
  autoView( x_v, x, AcceleratorRead);
 | 
			
		||||
  autoView( z_v, z, AcceleratorWrite);
 | 
			
		||||
  uint64_t nloop = grid->oSites()/Ls;
 | 
			
		||||
@@ -196,7 +195,13 @@ void G5R5(Lattice<vobj> &z,const Lattice<vobj> &x)
 | 
			
		||||
    uint64_t ss = sss*Ls;
 | 
			
		||||
    for(int s=0;s<Ls;s++){
 | 
			
		||||
      int sp = Ls-1-s;
 | 
			
		||||
      coalescedWrite(z_v[ss+sp],G5*x_v(ss+s));
 | 
			
		||||
      auto tmp = x_v(ss+s);
 | 
			
		||||
      decltype(tmp) tmp_p;
 | 
			
		||||
      decltype(tmp) tmp_m;
 | 
			
		||||
      spProj5p(tmp_p,tmp);
 | 
			
		||||
      spProj5m(tmp_m,tmp);
 | 
			
		||||
      // Use of spProj5m, 5p captures the coarse space too
 | 
			
		||||
      coalescedWrite(z_v[ss+sp],tmp_p - tmp_m);
 | 
			
		||||
    }
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
@@ -208,10 +213,20 @@ void G5C(Lattice<vobj> &z, const Lattice<vobj> &x)
 | 
			
		||||
  z.Checkerboard() = x.Checkerboard();
 | 
			
		||||
  conformable(x, z);
 | 
			
		||||
 | 
			
		||||
  Gamma G5(Gamma::Algebra::Gamma5);
 | 
			
		||||
  z = G5 * x;
 | 
			
		||||
  autoView( x_v, x, AcceleratorRead);
 | 
			
		||||
  autoView( z_v, z, AcceleratorWrite);
 | 
			
		||||
  uint64_t nloop = grid->oSites();
 | 
			
		||||
  accelerator_for(ss,nloop,vobj::Nsimd(),{
 | 
			
		||||
    auto tmp = x_v(ss);
 | 
			
		||||
    decltype(tmp) tmp_p;
 | 
			
		||||
    decltype(tmp) tmp_m;
 | 
			
		||||
    spProj5p(tmp_p,tmp);
 | 
			
		||||
    spProj5m(tmp_m,tmp);
 | 
			
		||||
    coalescedWrite(z_v[ss],tmp_p - tmp_m);
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
template<class CComplex, int nbasis>
 | 
			
		||||
void G5C(Lattice<iVector<CComplex, nbasis>> &z, const Lattice<iVector<CComplex, nbasis>> &x)
 | 
			
		||||
{
 | 
			
		||||
@@ -234,6 +249,7 @@ void G5C(Lattice<iVector<CComplex, nbasis>> &z, const Lattice<iVector<CComplex,
 | 
			
		||||
    }
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
NAMESPACE_END(Grid);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -735,7 +735,6 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  template <typename GaugeField>
 | 
			
		||||
  static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
 | 
			
		||||
    typedef typename GaugeField::vector_type vector_type;
 | 
			
		||||
@@ -800,6 +799,88 @@ public:
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<int N>
 | 
			
		||||
LatticeComplexD Determinant(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
 | 
			
		||||
{
 | 
			
		||||
  GridBase *grid=Umu.Grid();
 | 
			
		||||
  auto lvol = grid->lSites();
 | 
			
		||||
  LatticeComplexD ret(grid);
 | 
			
		||||
 | 
			
		||||
  autoView(Umu_v,Umu,CpuRead);
 | 
			
		||||
  autoView(ret_v,ret,CpuWrite);
 | 
			
		||||
  thread_for(site,lvol,{
 | 
			
		||||
    Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
 | 
			
		||||
    Coordinate lcoor;
 | 
			
		||||
    grid->LocalIndexToLocalCoor(site, lcoor);
 | 
			
		||||
    iScalar<iScalar<iMatrix<ComplexD, N> > > Us;
 | 
			
		||||
    peekLocalSite(Us, Umu_v, lcoor);
 | 
			
		||||
    for(int i=0;i<N;i++){
 | 
			
		||||
      for(int j=0;j<N;j++){
 | 
			
		||||
	EigenU(i,j) = Us()()(i,j);
 | 
			
		||||
      }}
 | 
			
		||||
    ComplexD det = EigenU.determinant();
 | 
			
		||||
    pokeLocalSite(det,ret_v,lcoor);
 | 
			
		||||
  });
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
template<int N>
 | 
			
		||||
static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
 | 
			
		||||
{
 | 
			
		||||
  Umu      = ProjectOnGroup(Umu);
 | 
			
		||||
  auto det = Determinant(Umu);
 | 
			
		||||
 | 
			
		||||
  det = conjugate(det);
 | 
			
		||||
 | 
			
		||||
  for(int i=0;i<N;i++){
 | 
			
		||||
    auto element = PeekIndex<ColourIndex>(Umu,N-1,i);
 | 
			
		||||
    element = element * det;
 | 
			
		||||
    PokeIndex<ColourIndex>(Umu,element,Nc-1,i);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<int N>
 | 
			
		||||
static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<vComplexD, N> >,Nd> > &U)
 | 
			
		||||
{
 | 
			
		||||
  GridBase *grid=U.Grid();
 | 
			
		||||
  // Reunitarise
 | 
			
		||||
  for(int mu=0;mu<Nd;mu++){
 | 
			
		||||
    auto Umu = PeekIndex<LorentzIndex>(U,mu);
 | 
			
		||||
    Umu      = ProjectOnGroup(Umu);
 | 
			
		||||
    ProjectSUn(Umu);
 | 
			
		||||
    PokeIndex<LorentzIndex>(U,Umu,mu);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
// Explicit specialisation for SU(3).
 | 
			
		||||
// Explicit specialisation for SU(3).
 | 
			
		||||
static void
 | 
			
		||||
ProjectSU3 (Lattice<iScalar<iScalar<iMatrix<vComplexD, 3> > > > &Umu)
 | 
			
		||||
{
 | 
			
		||||
  GridBase *grid=Umu.Grid();
 | 
			
		||||
  const int x=0;
 | 
			
		||||
  const int y=1;
 | 
			
		||||
  const int z=2;
 | 
			
		||||
  // Reunitarise
 | 
			
		||||
  Umu = ProjectOnGroup(Umu);
 | 
			
		||||
  autoView(Umu_v,Umu,CpuWrite);
 | 
			
		||||
  thread_for(ss,grid->oSites(),{
 | 
			
		||||
      auto cm = Umu_v[ss];
 | 
			
		||||
      cm()()(2,x) = adj(cm()()(0,y)*cm()()(1,z)-cm()()(0,z)*cm()()(1,y)); //x= yz-zy
 | 
			
		||||
      cm()()(2,y) = adj(cm()()(0,z)*cm()()(1,x)-cm()()(0,x)*cm()()(1,z)); //y= zx-xz
 | 
			
		||||
      cm()()(2,z) = adj(cm()()(0,x)*cm()()(1,y)-cm()()(0,y)*cm()()(1,x)); //z= xy-yx
 | 
			
		||||
      Umu_v[ss]=cm;
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
static void ProjectSU3(Lattice<iVector<iScalar<iMatrix<vComplexD, 3> >,Nd> > &U)
 | 
			
		||||
{
 | 
			
		||||
  GridBase *grid=U.Grid();
 | 
			
		||||
  // Reunitarise
 | 
			
		||||
  for(int mu=0;mu<Nd;mu++){
 | 
			
		||||
    auto Umu = PeekIndex<LorentzIndex>(U,mu);
 | 
			
		||||
    Umu      = ProjectOnGroup(Umu);
 | 
			
		||||
    ProjectSU3(Umu);
 | 
			
		||||
    PokeIndex<LorentzIndex>(U,Umu,mu);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
typedef SU<2> SU2;
 | 
			
		||||
typedef SU<3> SU3;
 | 
			
		||||
typedef SU<4> SU4;
 | 
			
		||||
 
 | 
			
		||||
@@ -1,779 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: Fujitsu_A64FX_asm_double.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#define LOAD_CHIMU(base)               LOAD_CHIMU_INTERLEAVED_A64FXd(base)  
 | 
			
		||||
#define PREFETCH_CHIMU_L1(A)           PREFETCH_CHIMU_L1_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L1(A)           PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_CHIMU_L2(A)           PREFETCH_CHIMU_L2_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L2(A)           PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PF_GAUGE(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE(A)    PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE(A)    PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(A)  
 | 
			
		||||
#define PREFETCH1_CHIMU(A)             PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define PREFETCH_CHIMU(A)              PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define LOCK_GAUGE(A)  
 | 
			
		||||
#define UNLOCK_GAUGE(A)  
 | 
			
		||||
#define MASK_REGS                      DECLARATIONS_A64FXd  
 | 
			
		||||
#define SAVE_RESULT(A,B)               RESULT_A64FXd(A); PREFETCH_RESULT_L2_STORE(B)  
 | 
			
		||||
#define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXd(Dir)  
 | 
			
		||||
#define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXd  
 | 
			
		||||
#define LOAD_CHI(base)                 LOAD_CHI_A64FXd(base)  
 | 
			
		||||
#define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)  
 | 
			
		||||
#define XP_PROJ                        XP_PROJ_A64FXd  
 | 
			
		||||
#define YP_PROJ                        YP_PROJ_A64FXd  
 | 
			
		||||
#define ZP_PROJ                        ZP_PROJ_A64FXd  
 | 
			
		||||
#define TP_PROJ                        TP_PROJ_A64FXd  
 | 
			
		||||
#define XM_PROJ                        XM_PROJ_A64FXd  
 | 
			
		||||
#define YM_PROJ                        YM_PROJ_A64FXd  
 | 
			
		||||
#define ZM_PROJ                        ZM_PROJ_A64FXd  
 | 
			
		||||
#define TM_PROJ                        TM_PROJ_A64FXd  
 | 
			
		||||
#define XP_RECON                       XP_RECON_A64FXd  
 | 
			
		||||
#define XM_RECON                       XM_RECON_A64FXd  
 | 
			
		||||
#define XM_RECON_ACCUM                 XM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define YM_RECON_ACCUM                 YM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define ZM_RECON_ACCUM                 ZM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define TM_RECON_ACCUM                 TM_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define XP_RECON_ACCUM                 XP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define YP_RECON_ACCUM                 YP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define ZP_RECON_ACCUM                 ZP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define TP_RECON_ACCUM                 TP_RECON_ACCUM_A64FXd  
 | 
			
		||||
#define PERMUTE_DIR0                   0  
 | 
			
		||||
#define PERMUTE_DIR1                   1  
 | 
			
		||||
#define PERMUTE_DIR2                   2  
 | 
			
		||||
#define PERMUTE_DIR3                   3  
 | 
			
		||||
#define PERMUTE                        PERMUTE_A64FXd;  
 | 
			
		||||
#define LOAD_TABLE(Dir)                if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1; } else if (Dir == 2) { LOAD_TABLE2; }  
 | 
			
		||||
#define MAYBEPERM(Dir,perm)            if (Dir != 3) { if (perm) { PERMUTE; } }  
 | 
			
		||||
// DECLARATIONS
 | 
			
		||||
#define DECLARATIONS_A64FXd  \
 | 
			
		||||
    const uint64_t lut[4][8] = { \
 | 
			
		||||
        {4, 5, 6, 7, 0, 1, 2, 3}, \
 | 
			
		||||
        {2, 3, 0, 1, 6, 7, 4, 5}, \
 | 
			
		||||
        {1, 0, 3, 2, 5, 4, 7, 6}, \
 | 
			
		||||
        {0, 1, 2, 4, 5, 6, 7, 8} };\
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fmov z31.d , 0 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// RESULT
 | 
			
		||||
#define RESULT_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "str z0, [%[storeptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "str z1, [%[storeptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "str z2, [%[storeptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "str z3, [%[storeptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "str z4, [%[storeptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "str z5, [%[storeptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "str z6, [%[storeptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "str z7, [%[storeptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "str z8, [%[storeptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "str z9, [%[storeptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "str z10, [%[storeptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "str z11, [%[storeptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [storeptr] "r" (base + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 12, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 16, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 20, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 24, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 28, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHI
 | 
			
		||||
#define LOAD_CHI_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z12, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU
 | 
			
		||||
#define LOAD_CHIMU_INTERLEAVED_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0213
 | 
			
		||||
#define LOAD_CHIMU_0213_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (&ref[2][0]) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0312
 | 
			
		||||
#define LOAD_CHIMU_0312_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (&ref[2][0]) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_TABLE0
 | 
			
		||||
#define LOAD_TABLE0  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (0) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE1
 | 
			
		||||
#define LOAD_TABLE1  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (1) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE2
 | 
			
		||||
#define LOAD_TABLE2  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (2) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE3
 | 
			
		||||
#define LOAD_TABLE3  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (3) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// PERMUTE
 | 
			
		||||
#define PERMUTE_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "tbl z12.d, { z12.d }, z30.d \n\t"  \
 | 
			
		||||
    "tbl z13.d, { z13.d }, z30.d \n\t"  \
 | 
			
		||||
    "tbl z14.d, { z14.d }, z30.d \n\t"  \
 | 
			
		||||
    "tbl z15.d, { z15.d }, z30.d \n\t"  \
 | 
			
		||||
    "tbl z16.d, { z16.d }, z30.d \n\t"  \
 | 
			
		||||
    "tbl z17.d, { z17.d }, z30.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_GAUGE
 | 
			
		||||
#define LOAD_GAUGE  \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN
 | 
			
		||||
#define MULT_2SPIN_1_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "movprfx z18.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z18.d, p5/m, z24.d, z12.d, 0 \n\t" \
 | 
			
		||||
    "movprfx z21.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z24.d, z15.d, 0 \n\t" \
 | 
			
		||||
    "movprfx z19.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z25.d, z12.d, 0 \n\t" \
 | 
			
		||||
    "movprfx z22.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z25.d, z15.d, 0 \n\t" \
 | 
			
		||||
    "movprfx z20.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z26.d, z12.d, 0 \n\t" \
 | 
			
		||||
    "movprfx z23.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z26.d, z15.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.d, p5/m, z24.d, z12.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z24.d, z15.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z25.d, z12.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z25.d, z15.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z26.d, z12.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z26.d, z15.d, 90 \n\t" \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN_BACKEND
 | 
			
		||||
#define MULT_2SPIN_2_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcmla z18.d, p5/m, z27.d, z13.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z27.d, z16.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z28.d, z13.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z28.d, z16.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z29.d, z13.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z29.d, z16.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.d, p5/m, z27.d, z13.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z27.d, z16.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z28.d, z13.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z28.d, z16.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z29.d, z13.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z29.d, z16.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z18.d, p5/m, z24.d, z14.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z24.d, z17.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z25.d, z14.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z25.d, z17.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z26.d, z14.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z26.d, z17.d, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.d, p5/m, z24.d, z14.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.d, p5/m, z24.d, z17.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.d, p5/m, z25.d, z14.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.d, p5/m, z25.d, z17.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.d, p5/m, z26.d, z14.d, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.d, p5/m, z26.d, z17.d, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XP_PROJ
 | 
			
		||||
#define XP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.d, p5/m, z12.d, z21.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z13.d, p5/m, z13.d, z22.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z14.d, p5/m, z14.d, z23.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z15.d, p5/m, z15.d, z18.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z16.d, p5/m, z16.d, z19.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z17.d, p5/m, z17.d, z20.d, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XP_RECON
 | 
			
		||||
#define XP_RECON_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "movprfx z6.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z21.d, 270 \n\t" \
 | 
			
		||||
    "movprfx z7.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z22.d, 270 \n\t" \
 | 
			
		||||
    "movprfx z8.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z23.d, 270 \n\t" \
 | 
			
		||||
    "movprfx z9.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z18.d, 270 \n\t" \
 | 
			
		||||
    "movprfx z10.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z19.d, 270 \n\t" \
 | 
			
		||||
    "movprfx z11.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z20.d, 270 \n\t" \
 | 
			
		||||
    "mov z0.d, p5/m, z18.d \n\t" \
 | 
			
		||||
    "mov z1.d, p5/m, z19.d \n\t" \
 | 
			
		||||
    "mov z2.d, p5/m, z20.d \n\t" \
 | 
			
		||||
    "mov z3.d, p5/m, z21.d \n\t" \
 | 
			
		||||
    "mov z4.d, p5/m, z22.d \n\t" \
 | 
			
		||||
    "mov z5.d, p5/m, z23.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// XP_RECON_ACCUM
 | 
			
		||||
#define XP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z18.d, 270 \n\t" \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z19.d, 270 \n\t" \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z20.d, 270 \n\t" \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z21.d, 270 \n\t" \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z22.d, 270 \n\t" \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z23.d, 270 \n\t" \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YP_PROJ
 | 
			
		||||
#define YP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fsub z12.d, p5/m, z12.d, z21.d \n\t" \
 | 
			
		||||
    "fsub z13.d, p5/m, z13.d, z22.d \n\t" \
 | 
			
		||||
    "fsub z14.d, p5/m, z14.d, z23.d \n\t" \
 | 
			
		||||
    "fadd z15.d, p5/m, z15.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z16.d, p5/m, z16.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z17.d, p5/m, z17.d, z20.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ZP_PROJ
 | 
			
		||||
#define ZP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.d, p5/m, z12.d, z18.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z13.d, p5/m, z13.d, z19.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z14.d, p5/m, z14.d, z20.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z15.d, p5/m, z15.d, z21.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z16.d, p5/m, z16.d, z22.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z17.d, p5/m, z17.d, z23.d, 270 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// TP_PROJ
 | 
			
		||||
#define TP_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z12.d, p5/m, z12.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z13.d, p5/m, z13.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z14.d, p5/m, z14.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z15.d, p5/m, z15.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z16.d, p5/m, z16.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z17.d, p5/m, z17.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_PROJ
 | 
			
		||||
#define XM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.d, p5/m, z12.d, z21.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z13.d, p5/m, z13.d, z22.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z14.d, p5/m, z14.d, z23.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z15.d, p5/m, z15.d, z18.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z16.d, p5/m, z16.d, z19.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z17.d, p5/m, z17.d, z20.d, 270 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON
 | 
			
		||||
#define XM_RECON_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "movprfx z6.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z21.d, 90 \n\t" \
 | 
			
		||||
    "movprfx z7.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z22.d, 90 \n\t" \
 | 
			
		||||
    "movprfx z8.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z23.d, 90 \n\t" \
 | 
			
		||||
    "movprfx z9.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z18.d, 90 \n\t" \
 | 
			
		||||
    "movprfx z10.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z19.d, 90 \n\t" \
 | 
			
		||||
    "movprfx z11.d, p5/m, z31.d \n\t" \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z20.d, 90 \n\t" \
 | 
			
		||||
    "mov z0.d, p5/m, z18.d \n\t" \
 | 
			
		||||
    "mov z1.d, p5/m, z19.d \n\t" \
 | 
			
		||||
    "mov z2.d, p5/m, z20.d \n\t" \
 | 
			
		||||
    "mov z3.d, p5/m, z21.d \n\t" \
 | 
			
		||||
    "mov z4.d, p5/m, z22.d \n\t" \
 | 
			
		||||
    "mov z5.d, p5/m, z23.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YM_PROJ
 | 
			
		||||
#define YM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z12.d, p5/m, z12.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z13.d, p5/m, z13.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z14.d, p5/m, z14.d, z23.d \n\t"  \
 | 
			
		||||
    "fsub z15.d, p5/m, z15.d, z18.d \n\t" \
 | 
			
		||||
    "fsub z16.d, p5/m, z16.d, z19.d \n\t" \
 | 
			
		||||
    "fsub z17.d, p5/m, z17.d, z20.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ZM_PROJ
 | 
			
		||||
#define ZM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.d, p5/m, z12.d, z18.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z13.d, p5/m, z13.d, z19.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z14.d, p5/m, z14.d, z20.d, 270 \n\t" \
 | 
			
		||||
    "fcadd z15.d, p5/m, z15.d, z21.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z16.d, p5/m, z16.d, z22.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z17.d, p5/m, z17.d, z23.d, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// TM_PROJ
 | 
			
		||||
#define TM_PROJ_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "fsub z12.d, p5/m, z12.d, z18.d \n\t" \
 | 
			
		||||
    "fsub z13.d, p5/m, z13.d, z19.d \n\t" \
 | 
			
		||||
    "fsub z14.d, p5/m, z14.d, z20.d \n\t" \
 | 
			
		||||
    "fsub z15.d, p5/m, z15.d, z21.d \n\t" \
 | 
			
		||||
    "fsub z16.d, p5/m, z16.d, z22.d \n\t" \
 | 
			
		||||
    "fsub z17.d, p5/m, z17.d, z23.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON_ACCUM
 | 
			
		||||
#define XM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z18.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z19.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z20.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z21.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z22.d, 90 \n\t" \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z23.d, 90 \n\t" \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YP_RECON_ACCUM
 | 
			
		||||
#define YP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fsub z9.d, p5/m, z9.d, z18.d \n\t" \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fsub z10.d, p5/m, z10.d, z19.d \n\t" \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fsub z11.d, p5/m, z11.d, z20.d \n\t" \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z6.d, p5/m, z6.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z7.d, p5/m, z7.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    "fadd z8.d, p5/m, z8.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YM_RECON_ACCUM
 | 
			
		||||
#define YM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z9.d, p5/m, z9.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z10.d, p5/m, z10.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z11.d, p5/m, z11.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fsub z6.d, p5/m, z6.d, z21.d \n\t" \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fsub z7.d, p5/m, z7.d, z22.d \n\t" \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    "fsub z8.d, p5/m, z8.d, z23.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZP_RECON_ACCUM
 | 
			
		||||
#define ZP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z18.d, 270 \n\t" \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z19.d, 270 \n\t" \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z20.d, 270 \n\t" \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z21.d, 90 \n\t" \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z22.d, 90 \n\t" \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z23.d, 90 \n\t" \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZM_RECON_ACCUM
 | 
			
		||||
#define ZM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z6.d, p5/m, z6.d, z18.d, 90 \n\t" \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fcadd z7.d, p5/m, z7.d, z19.d, 90 \n\t" \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fcadd z8.d, p5/m, z8.d, z20.d, 90 \n\t" \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fcadd z9.d, p5/m, z9.d, z21.d, 270 \n\t" \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fcadd z10.d, p5/m, z10.d, z22.d, 270 \n\t" \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fcadd z11.d, p5/m, z11.d, z23.d, 270 \n\t" \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// TP_RECON_ACCUM
 | 
			
		||||
#define TP_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z6.d, p5/m, z6.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z7.d, p5/m, z7.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z8.d, p5/m, z8.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z9.d, p5/m, z9.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z10.d, p5/m, z10.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    "fadd z11.d, p5/m, z11.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// TM_RECON_ACCUM
 | 
			
		||||
#define TM_RECON_ACCUM_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \
 | 
			
		||||
    "fsub z6.d, p5/m, z6.d, z18.d \n\t" \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \
 | 
			
		||||
    "fsub z7.d, p5/m, z7.d, z19.d \n\t" \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \
 | 
			
		||||
    "fsub z8.d, p5/m, z8.d, z20.d \n\t" \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \
 | 
			
		||||
    "fsub z9.d, p5/m, z9.d, z21.d \n\t" \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \
 | 
			
		||||
    "fsub z10.d, p5/m, z10.d, z22.d \n\t" \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \
 | 
			
		||||
    "fsub z11.d, p5/m, z11.d, z23.d \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZERO_PSI
 | 
			
		||||
#define ZERO_PSI_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.d \n\t" \
 | 
			
		||||
    "fmov z0.d , 0 \n\t" \
 | 
			
		||||
    "fmov z1.d , 0 \n\t" \
 | 
			
		||||
    "fmov z2.d , 0 \n\t" \
 | 
			
		||||
    "fmov z3.d , 0 \n\t" \
 | 
			
		||||
    "fmov z4.d , 0 \n\t" \
 | 
			
		||||
    "fmov z5.d , 0 \n\t" \
 | 
			
		||||
    "fmov z6.d , 0 \n\t" \
 | 
			
		||||
    "fmov z7.d , 0 \n\t" \
 | 
			
		||||
    "fmov z8.d , 0 \n\t" \
 | 
			
		||||
    "fmov z9.d , 0 \n\t" \
 | 
			
		||||
    "fmov z10.d , 0 \n\t" \
 | 
			
		||||
    "fmov z11.d , 0 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ADD_RESULT_INTERNAL
 | 
			
		||||
#define ADD_RESULT_INTERNAL_A64FXd  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.d, p5/m, z0.d, z12.d \n\t"  \
 | 
			
		||||
    "fadd z1.d, p5/m, z1.d, z13.d \n\t"  \
 | 
			
		||||
    "fadd z2.d, p5/m, z2.d, z14.d \n\t"  \
 | 
			
		||||
    "fadd z3.d, p5/m, z3.d, z15.d \n\t"  \
 | 
			
		||||
    "fadd z4.d, p5/m, z4.d, z16.d \n\t"  \
 | 
			
		||||
    "fadd z5.d, p5/m, z5.d, z17.d \n\t"  \
 | 
			
		||||
    "fadd z6.d, p5/m, z6.d, z18.d \n\t"  \
 | 
			
		||||
    "fadd z7.d, p5/m, z7.d, z19.d \n\t"  \
 | 
			
		||||
    "fadd z8.d, p5/m, z8.d, z20.d \n\t"  \
 | 
			
		||||
    "fadd z9.d, p5/m, z9.d, z21.d \n\t"  \
 | 
			
		||||
    "fadd z10.d, p5/m, z10.d, z22.d \n\t"  \
 | 
			
		||||
    "fadd z11.d, p5/m, z11.d, z23.d \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
@@ -1,779 +0,0 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
    Source file: Fujitsu_A64FX_asm_single.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2020
 | 
			
		||||
 | 
			
		||||
Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#define LOAD_CHIMU(base)               LOAD_CHIMU_INTERLEAVED_A64FXf(base)  
 | 
			
		||||
#define PREFETCH_CHIMU_L1(A)           PREFETCH_CHIMU_L1_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L1(A)           PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_CHIMU_L2(A)           PREFETCH_CHIMU_L2_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_GAUGE_L2(A)           PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PF_GAUGE(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE(A)    PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE(A)    PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(A)  
 | 
			
		||||
#define PREFETCH1_CHIMU(A)             PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define PREFETCH_CHIMU(A)              PREFETCH_CHIMU_L1(A)  
 | 
			
		||||
#define LOCK_GAUGE(A)  
 | 
			
		||||
#define UNLOCK_GAUGE(A)  
 | 
			
		||||
#define MASK_REGS                      DECLARATIONS_A64FXf  
 | 
			
		||||
#define SAVE_RESULT(A,B)               RESULT_A64FXf(A); PREFETCH_RESULT_L2_STORE(B)  
 | 
			
		||||
#define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXf(Dir)  
 | 
			
		||||
#define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXf  
 | 
			
		||||
#define LOAD_CHI(base)                 LOAD_CHI_A64FXf(base)  
 | 
			
		||||
#define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)  
 | 
			
		||||
#define XP_PROJ                        XP_PROJ_A64FXf  
 | 
			
		||||
#define YP_PROJ                        YP_PROJ_A64FXf  
 | 
			
		||||
#define ZP_PROJ                        ZP_PROJ_A64FXf  
 | 
			
		||||
#define TP_PROJ                        TP_PROJ_A64FXf  
 | 
			
		||||
#define XM_PROJ                        XM_PROJ_A64FXf  
 | 
			
		||||
#define YM_PROJ                        YM_PROJ_A64FXf  
 | 
			
		||||
#define ZM_PROJ                        ZM_PROJ_A64FXf  
 | 
			
		||||
#define TM_PROJ                        TM_PROJ_A64FXf  
 | 
			
		||||
#define XP_RECON                       XP_RECON_A64FXf  
 | 
			
		||||
#define XM_RECON                       XM_RECON_A64FXf  
 | 
			
		||||
#define XM_RECON_ACCUM                 XM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define YM_RECON_ACCUM                 YM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define ZM_RECON_ACCUM                 ZM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define TM_RECON_ACCUM                 TM_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define XP_RECON_ACCUM                 XP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define YP_RECON_ACCUM                 YP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define ZP_RECON_ACCUM                 ZP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define TP_RECON_ACCUM                 TP_RECON_ACCUM_A64FXf  
 | 
			
		||||
#define PERMUTE_DIR0                   0  
 | 
			
		||||
#define PERMUTE_DIR1                   1  
 | 
			
		||||
#define PERMUTE_DIR2                   2  
 | 
			
		||||
#define PERMUTE_DIR3                   3  
 | 
			
		||||
#define PERMUTE                        PERMUTE_A64FXf;  
 | 
			
		||||
#define LOAD_TABLE(Dir)                if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1 } else if (Dir == 2) { LOAD_TABLE2; } else if (Dir == 3) { LOAD_TABLE3; }  
 | 
			
		||||
#define MAYBEPERM(A,perm)              if (perm) { PERMUTE; }  
 | 
			
		||||
// DECLARATIONS
 | 
			
		||||
#define DECLARATIONS_A64FXf  \
 | 
			
		||||
    const uint32_t lut[4][16] = { \
 | 
			
		||||
        {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \
 | 
			
		||||
        {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \
 | 
			
		||||
        {2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, \
 | 
			
		||||
        {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14} }; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fmov z31.s , 0 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// RESULT
 | 
			
		||||
#define RESULT_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "str z0, [%[storeptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "str z1, [%[storeptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "str z2, [%[storeptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "str z3, [%[storeptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "str z4, [%[storeptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "str z5, [%[storeptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "str z6, [%[storeptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "str z7, [%[storeptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "str z8, [%[storeptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "str z9, [%[storeptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "str z10, [%[storeptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "str z11, [%[storeptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [storeptr] "r" (base + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 12, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 16, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 20, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 24, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL2STRM, p5, [%[fetchptr], 28, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHI
 | 
			
		||||
#define LOAD_CHI_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z12, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU
 | 
			
		||||
#define LOAD_CHIMU_INTERLEAVED_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0213
 | 
			
		||||
#define LOAD_CHIMU_0213_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (&ref[2][0]) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0312
 | 
			
		||||
#define LOAD_CHIMU_0312_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (&ref[2][0]) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_TABLE0
 | 
			
		||||
#define LOAD_TABLE0  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (0) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE1
 | 
			
		||||
#define LOAD_TABLE1  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (1) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE2
 | 
			
		||||
#define LOAD_TABLE2  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (2) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_TABLE3
 | 
			
		||||
#define LOAD_TABLE3  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [tableptr] "r" (&lut[0]),[index] "i" (3) \
 | 
			
		||||
    : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// PERMUTE
 | 
			
		||||
#define PERMUTE_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "tbl z12.s, { z12.s }, z30.s \n\t"  \
 | 
			
		||||
    "tbl z13.s, { z13.s }, z30.s \n\t"  \
 | 
			
		||||
    "tbl z14.s, { z14.s }, z30.s \n\t"  \
 | 
			
		||||
    "tbl z15.s, { z15.s }, z30.s \n\t"  \
 | 
			
		||||
    "tbl z16.s, { z16.s }, z30.s \n\t"  \
 | 
			
		||||
    "tbl z17.s, { z17.s }, z30.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// LOAD_GAUGE
 | 
			
		||||
#define LOAD_GAUGE  \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN
 | 
			
		||||
#define MULT_2SPIN_1_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
 | 
			
		||||
    "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
 | 
			
		||||
    "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
 | 
			
		||||
    "movprfx z18.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z18.s, p5/m, z24.s, z12.s, 0 \n\t" \
 | 
			
		||||
    "movprfx z21.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z24.s, z15.s, 0 \n\t" \
 | 
			
		||||
    "movprfx z19.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z25.s, z12.s, 0 \n\t" \
 | 
			
		||||
    "movprfx z22.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z25.s, z15.s, 0 \n\t" \
 | 
			
		||||
    "movprfx z20.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z26.s, z12.s, 0 \n\t" \
 | 
			
		||||
    "movprfx z23.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z26.s, z15.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.s, p5/m, z24.s, z12.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z24.s, z15.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z25.s, z12.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z25.s, z15.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z26.s, z12.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z26.s, z15.s, 90 \n\t" \
 | 
			
		||||
    "ldr z24, [%[fetchptr], -4, mul vl] \n\t" \
 | 
			
		||||
    "ldr z25, [%[fetchptr], -1, mul vl] \n\t" \
 | 
			
		||||
    "ldr z26, [%[fetchptr], 2, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (baseU + 2 * 3 * 64) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN_BACKEND
 | 
			
		||||
#define MULT_2SPIN_2_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcmla z18.s, p5/m, z27.s, z13.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z27.s, z16.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z28.s, z13.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z28.s, z16.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z29.s, z13.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z29.s, z16.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.s, p5/m, z27.s, z13.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z27.s, z16.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z28.s, z13.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z28.s, z16.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z29.s, z13.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z29.s, z16.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z18.s, p5/m, z24.s, z14.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z24.s, z17.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z25.s, z14.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z25.s, z17.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z26.s, z14.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z26.s, z17.s, 0 \n\t" \
 | 
			
		||||
    "fcmla z18.s, p5/m, z24.s, z14.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z21.s, p5/m, z24.s, z17.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z19.s, p5/m, z25.s, z14.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z22.s, p5/m, z25.s, z17.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z20.s, p5/m, z26.s, z14.s, 90 \n\t" \
 | 
			
		||||
    "fcmla z23.s, p5/m, z26.s, z17.s, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XP_PROJ
 | 
			
		||||
#define XP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.s, p5/m, z12.s, z21.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z13.s, p5/m, z13.s, z22.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z14.s, p5/m, z14.s, z23.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z15.s, p5/m, z15.s, z18.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z16.s, p5/m, z16.s, z19.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z17.s, p5/m, z17.s, z20.s, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XP_RECON
 | 
			
		||||
#define XP_RECON_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "movprfx z6.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z21.s, 270 \n\t" \
 | 
			
		||||
    "movprfx z7.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z22.s, 270 \n\t" \
 | 
			
		||||
    "movprfx z8.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z23.s, 270 \n\t" \
 | 
			
		||||
    "movprfx z9.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z18.s, 270 \n\t" \
 | 
			
		||||
    "movprfx z10.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z19.s, 270 \n\t" \
 | 
			
		||||
    "movprfx z11.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z20.s, 270 \n\t" \
 | 
			
		||||
    "mov z0.s, p5/m, z18.s \n\t" \
 | 
			
		||||
    "mov z1.s, p5/m, z19.s \n\t" \
 | 
			
		||||
    "mov z2.s, p5/m, z20.s \n\t" \
 | 
			
		||||
    "mov z3.s, p5/m, z21.s \n\t" \
 | 
			
		||||
    "mov z4.s, p5/m, z22.s \n\t" \
 | 
			
		||||
    "mov z5.s, p5/m, z23.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// XP_RECON_ACCUM
 | 
			
		||||
#define XP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z18.s, 270 \n\t" \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z19.s, 270 \n\t" \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z20.s, 270 \n\t" \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z21.s, 270 \n\t" \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z22.s, 270 \n\t" \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z23.s, 270 \n\t" \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YP_PROJ
 | 
			
		||||
#define YP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fsub z12.s, p5/m, z12.s, z21.s \n\t" \
 | 
			
		||||
    "fsub z13.s, p5/m, z13.s, z22.s \n\t" \
 | 
			
		||||
    "fsub z14.s, p5/m, z14.s, z23.s \n\t" \
 | 
			
		||||
    "fadd z15.s, p5/m, z15.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z16.s, p5/m, z16.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z17.s, p5/m, z17.s, z20.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ZP_PROJ
 | 
			
		||||
#define ZP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.s, p5/m, z12.s, z18.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z13.s, p5/m, z13.s, z19.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z14.s, p5/m, z14.s, z20.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z15.s, p5/m, z15.s, z21.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z16.s, p5/m, z16.s, z22.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z17.s, p5/m, z17.s, z23.s, 270 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// TP_PROJ
 | 
			
		||||
#define TP_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z12.s, p5/m, z12.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z13.s, p5/m, z13.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z14.s, p5/m, z14.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z15.s, p5/m, z15.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z16.s, p5/m, z16.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z17.s, p5/m, z17.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_PROJ
 | 
			
		||||
#define XM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.s, p5/m, z12.s, z21.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z13.s, p5/m, z13.s, z22.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z14.s, p5/m, z14.s, z23.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z15.s, p5/m, z15.s, z18.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z16.s, p5/m, z16.s, z19.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z17.s, p5/m, z17.s, z20.s, 270 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON
 | 
			
		||||
#define XM_RECON_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "movprfx z6.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z21.s, 90 \n\t" \
 | 
			
		||||
    "movprfx z7.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z22.s, 90 \n\t" \
 | 
			
		||||
    "movprfx z8.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z23.s, 90 \n\t" \
 | 
			
		||||
    "movprfx z9.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z18.s, 90 \n\t" \
 | 
			
		||||
    "movprfx z10.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z19.s, 90 \n\t" \
 | 
			
		||||
    "movprfx z11.s, p5/m, z31.s \n\t" \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z20.s, 90 \n\t" \
 | 
			
		||||
    "mov z0.s, p5/m, z18.s \n\t" \
 | 
			
		||||
    "mov z1.s, p5/m, z19.s \n\t" \
 | 
			
		||||
    "mov z2.s, p5/m, z20.s \n\t" \
 | 
			
		||||
    "mov z3.s, p5/m, z21.s \n\t" \
 | 
			
		||||
    "mov z4.s, p5/m, z22.s \n\t" \
 | 
			
		||||
    "mov z5.s, p5/m, z23.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YM_PROJ
 | 
			
		||||
#define YM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z12.s, p5/m, z12.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z13.s, p5/m, z13.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z14.s, p5/m, z14.s, z23.s \n\t"  \
 | 
			
		||||
    "fsub z15.s, p5/m, z15.s, z18.s \n\t" \
 | 
			
		||||
    "fsub z16.s, p5/m, z16.s, z19.s \n\t" \
 | 
			
		||||
    "fsub z17.s, p5/m, z17.s, z20.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ZM_PROJ
 | 
			
		||||
#define ZM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z12.s, p5/m, z12.s, z18.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z13.s, p5/m, z13.s, z19.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z14.s, p5/m, z14.s, z20.s, 270 \n\t" \
 | 
			
		||||
    "fcadd z15.s, p5/m, z15.s, z21.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z16.s, p5/m, z16.s, z22.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z17.s, p5/m, z17.s, z23.s, 90 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// TM_PROJ
 | 
			
		||||
#define TM_PROJ_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "fsub z12.s, p5/m, z12.s, z18.s \n\t" \
 | 
			
		||||
    "fsub z13.s, p5/m, z13.s, z19.s \n\t" \
 | 
			
		||||
    "fsub z14.s, p5/m, z14.s, z20.s \n\t" \
 | 
			
		||||
    "fsub z15.s, p5/m, z15.s, z21.s \n\t" \
 | 
			
		||||
    "fsub z16.s, p5/m, z16.s, z22.s \n\t" \
 | 
			
		||||
    "fsub z17.s, p5/m, z17.s, z23.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// XM_RECON_ACCUM
 | 
			
		||||
#define XM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z18.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z19.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z20.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z21.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z22.s, 90 \n\t" \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z23.s, 90 \n\t" \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YP_RECON_ACCUM
 | 
			
		||||
#define YP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fsub z9.s, p5/m, z9.s, z18.s \n\t" \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fsub z10.s, p5/m, z10.s, z19.s \n\t" \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fsub z11.s, p5/m, z11.s, z20.s \n\t" \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z6.s, p5/m, z6.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z7.s, p5/m, z7.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    "fadd z8.s, p5/m, z8.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// YM_RECON_ACCUM
 | 
			
		||||
#define YM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z9.s, p5/m, z9.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z10.s, p5/m, z10.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z11.s, p5/m, z11.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fsub z6.s, p5/m, z6.s, z21.s \n\t" \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fsub z7.s, p5/m, z7.s, z22.s \n\t" \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    "fsub z8.s, p5/m, z8.s, z23.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZP_RECON_ACCUM
 | 
			
		||||
#define ZP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z18.s, 270 \n\t" \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z19.s, 270 \n\t" \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z20.s, 270 \n\t" \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z21.s, 90 \n\t" \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z22.s, 90 \n\t" \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z23.s, 90 \n\t" \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZM_RECON_ACCUM
 | 
			
		||||
#define ZM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fcadd z6.s, p5/m, z6.s, z18.s, 90 \n\t" \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fcadd z7.s, p5/m, z7.s, z19.s, 90 \n\t" \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fcadd z8.s, p5/m, z8.s, z20.s, 90 \n\t" \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fcadd z9.s, p5/m, z9.s, z21.s, 270 \n\t" \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fcadd z10.s, p5/m, z10.s, z22.s, 270 \n\t" \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fcadd z11.s, p5/m, z11.s, z23.s, 270 \n\t" \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// TP_RECON_ACCUM
 | 
			
		||||
#define TP_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z6.s, p5/m, z6.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z7.s, p5/m, z7.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z8.s, p5/m, z8.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z9.s, p5/m, z9.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z10.s, p5/m, z10.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    "fadd z11.s, p5/m, z11.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// TM_RECON_ACCUM
 | 
			
		||||
#define TM_RECON_ACCUM_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \
 | 
			
		||||
    "fsub z6.s, p5/m, z6.s, z18.s \n\t" \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \
 | 
			
		||||
    "fsub z7.s, p5/m, z7.s, z19.s \n\t" \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \
 | 
			
		||||
    "fsub z8.s, p5/m, z8.s, z20.s \n\t" \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \
 | 
			
		||||
    "fsub z9.s, p5/m, z9.s, z21.s \n\t" \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \
 | 
			
		||||
    "fsub z10.s, p5/m, z10.s, z22.s \n\t" \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \
 | 
			
		||||
    "fsub z11.s, p5/m, z11.s, z23.s \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// ZERO_PSI
 | 
			
		||||
#define ZERO_PSI_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "ptrue p5.s \n\t" \
 | 
			
		||||
    "fmov z0.s , 0 \n\t" \
 | 
			
		||||
    "fmov z1.s , 0 \n\t" \
 | 
			
		||||
    "fmov z2.s , 0 \n\t" \
 | 
			
		||||
    "fmov z3.s , 0 \n\t" \
 | 
			
		||||
    "fmov z4.s , 0 \n\t" \
 | 
			
		||||
    "fmov z5.s , 0 \n\t" \
 | 
			
		||||
    "fmov z6.s , 0 \n\t" \
 | 
			
		||||
    "fmov z7.s , 0 \n\t" \
 | 
			
		||||
    "fmov z8.s , 0 \n\t" \
 | 
			
		||||
    "fmov z9.s , 0 \n\t" \
 | 
			
		||||
    "fmov z10.s , 0 \n\t" \
 | 
			
		||||
    "fmov z11.s , 0 \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
 | 
			
		||||
    "prfd PSTL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
 | 
			
		||||
    :  \
 | 
			
		||||
    : [fetchptr] "r" (base) \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
 | 
			
		||||
); \
 | 
			
		||||
}
 | 
			
		||||
// ADD_RESULT_INTERNAL
 | 
			
		||||
#define ADD_RESULT_INTERNAL_A64FXf  \
 | 
			
		||||
asm ( \
 | 
			
		||||
    "fadd z0.s, p5/m, z0.s, z12.s \n\t"  \
 | 
			
		||||
    "fadd z1.s, p5/m, z1.s, z13.s \n\t"  \
 | 
			
		||||
    "fadd z2.s, p5/m, z2.s, z14.s \n\t"  \
 | 
			
		||||
    "fadd z3.s, p5/m, z3.s, z15.s \n\t"  \
 | 
			
		||||
    "fadd z4.s, p5/m, z4.s, z16.s \n\t"  \
 | 
			
		||||
    "fadd z5.s, p5/m, z5.s, z17.s \n\t"  \
 | 
			
		||||
    "fadd z6.s, p5/m, z6.s, z18.s \n\t"  \
 | 
			
		||||
    "fadd z7.s, p5/m, z7.s, z19.s \n\t"  \
 | 
			
		||||
    "fadd z8.s, p5/m, z8.s, z20.s \n\t"  \
 | 
			
		||||
    "fadd z9.s, p5/m, z9.s, z21.s \n\t"  \
 | 
			
		||||
    "fadd z10.s, p5/m, z10.s, z22.s \n\t"  \
 | 
			
		||||
    "fadd z11.s, p5/m, z11.s, z23.s \n\t"  \
 | 
			
		||||
    :  \
 | 
			
		||||
    :  \
 | 
			
		||||
    : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
 | 
			
		||||
); 
 | 
			
		||||
 | 
			
		||||
@@ -38,10 +38,11 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
#define LOCK_GAUGE(A)  
 | 
			
		||||
#define UNLOCK_GAUGE(A)  
 | 
			
		||||
#define MASK_REGS                      DECLARATIONS_A64FXd  
 | 
			
		||||
#define SAVE_RESULT(A,B)               RESULT_A64FXd(A); PREFETCH_RESULT_L2_STORE(B)  
 | 
			
		||||
#define SAVE_RESULT(A,B)               RESULT_A64FXd(A);  
 | 
			
		||||
#define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXd(Dir)  
 | 
			
		||||
#define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXd  
 | 
			
		||||
#define LOAD_CHI(base)                 LOAD_CHI_A64FXd(base)  
 | 
			
		||||
#define ZERO_PSI                       ZERO_PSI_A64FXd  
 | 
			
		||||
#define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)  
 | 
			
		||||
#define XP_PROJ                        XP_PROJ_A64FXd  
 | 
			
		||||
#define YP_PROJ                        YP_PROJ_A64FXd  
 | 
			
		||||
@@ -70,6 +71,7 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
#define MAYBEPERM(Dir,perm)            if (Dir != 3) { if (perm) { PERMUTE; } }  
 | 
			
		||||
// DECLARATIONS
 | 
			
		||||
#define DECLARATIONS_A64FXd  \
 | 
			
		||||
    uint64_t baseU; \
 | 
			
		||||
    const uint64_t lut[4][8] = { \
 | 
			
		||||
        {4, 5, 6, 7, 0, 1, 2, 3}, \
 | 
			
		||||
        {2, 3, 0, 1, 6, 7, 4, 5}, \
 | 
			
		||||
@@ -126,114 +128,114 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
// RESULT
 | 
			
		||||
#define RESULT_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64), result_00);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64), result_01);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64), result_02);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64), result_10);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64), result_11);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64), result_12);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64), result_20);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64), result_21);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64), result_22);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64), result_30);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64), result_31);  \
 | 
			
		||||
    svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64), result_32);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-6), result_00);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-5), result_01);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-4), result_02);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-3), result_10);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-2), result_11);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-1), result_12);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(0), result_20);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(1), result_21);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(2), result_22);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(3), result_30);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(4), result_31);  \
 | 
			
		||||
    svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(5), result_32);  \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL2STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL1STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + -256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 768), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1024), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1280), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1536), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1792), SV_PLDL2STRM); \
 | 
			
		||||
    const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(-4), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(12), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(16), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(20), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(24), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(28), SV_PLDL2STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL1STRM); \
 | 
			
		||||
    const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL1STRM); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHI
 | 
			
		||||
#define LOAD_CHI_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svld1(pg1, (float64_t*)(base + 0 * 64));  \
 | 
			
		||||
    Chi_01 = svld1(pg1, (float64_t*)(base + 1 * 64));  \
 | 
			
		||||
    Chi_02 = svld1(pg1, (float64_t*)(base + 2 * 64));  \
 | 
			
		||||
    Chi_10 = svld1(pg1, (float64_t*)(base + 3 * 64));  \
 | 
			
		||||
    Chi_11 = svld1(pg1, (float64_t*)(base + 4 * 64));  \
 | 
			
		||||
    Chi_12 = svld1(pg1, (float64_t*)(base + 5 * 64));  \
 | 
			
		||||
    Chi_00 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(0));  \
 | 
			
		||||
    Chi_01 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(1));  \
 | 
			
		||||
    Chi_02 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(2));  \
 | 
			
		||||
    Chi_10 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(3));  \
 | 
			
		||||
    Chi_11 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(4));  \
 | 
			
		||||
    Chi_12 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(5));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU
 | 
			
		||||
#define LOAD_CHIMU_INTERLEAVED_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
    Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \
 | 
			
		||||
    Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3));  \
 | 
			
		||||
    Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \
 | 
			
		||||
    Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0));  \
 | 
			
		||||
    Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \
 | 
			
		||||
    Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4));  \
 | 
			
		||||
    Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \
 | 
			
		||||
    Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1));  \
 | 
			
		||||
    Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \
 | 
			
		||||
    Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5));  \
 | 
			
		||||
    Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \
 | 
			
		||||
    Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0213
 | 
			
		||||
#define LOAD_CHIMU_0213_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
    Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \
 | 
			
		||||
    Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0));  \
 | 
			
		||||
    Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \
 | 
			
		||||
    Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1));  \
 | 
			
		||||
    Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \
 | 
			
		||||
    Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2));  \
 | 
			
		||||
    Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \
 | 
			
		||||
    Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3));  \
 | 
			
		||||
    Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \
 | 
			
		||||
    Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4));  \
 | 
			
		||||
    Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \
 | 
			
		||||
    Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0312
 | 
			
		||||
#define LOAD_CHIMU_0312_A64FXd  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
    Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \
 | 
			
		||||
    Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3));  \
 | 
			
		||||
    Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \
 | 
			
		||||
    Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4));  \
 | 
			
		||||
    Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \
 | 
			
		||||
    Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5));  \
 | 
			
		||||
    Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \
 | 
			
		||||
    Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0));  \
 | 
			
		||||
    Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \
 | 
			
		||||
    Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1));  \
 | 
			
		||||
    Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \
 | 
			
		||||
    Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_TABLE0
 | 
			
		||||
#define LOAD_TABLE0  \
 | 
			
		||||
@@ -261,26 +263,26 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
    Chi_12 = svtbl(Chi_12, table0);    
 | 
			
		||||
 | 
			
		||||
// LOAD_GAUGE
 | 
			
		||||
#define LOAD_GAUGE  \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
#define LOAD_GAUGE(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    U_01 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    U_11 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    U_21 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
 | 
			
		||||
    U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-6));  \
 | 
			
		||||
    U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-3));  \
 | 
			
		||||
    U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(0));  \
 | 
			
		||||
    U_01 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-5));  \
 | 
			
		||||
    U_11 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-2));  \
 | 
			
		||||
    U_21 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(1));  \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN
 | 
			
		||||
#define MULT_2SPIN_1_A64FXd(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
    U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    U_01 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    U_11 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    U_21 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
 | 
			
		||||
    U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-6));  \
 | 
			
		||||
    U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-3));  \
 | 
			
		||||
    U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(0));  \
 | 
			
		||||
    U_01 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-5));  \
 | 
			
		||||
    U_11 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-2));  \
 | 
			
		||||
    U_21 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(1));  \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \
 | 
			
		||||
@@ -293,9 +295,9 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \
 | 
			
		||||
    U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
    U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-4));  \
 | 
			
		||||
    U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-1));  \
 | 
			
		||||
    U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(2));  \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN_BACKEND
 | 
			
		||||
#define MULT_2SPIN_2_A64FXd  \
 | 
			
		||||
@@ -570,12 +572,12 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
    result_31 = svdup_f64(0.); \
 | 
			
		||||
    result_32 = svdup_f64(0.); 
 | 
			
		||||
 | 
			
		||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
 | 
			
		||||
// PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing)
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PSTL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PSTL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PSTL2STRM); \
 | 
			
		||||
    asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 0) : "memory" ); \
 | 
			
		||||
    asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 1) : "memory" ); \
 | 
			
		||||
    asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 2) : "memory" ); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base)  \
 | 
			
		||||
 
 | 
			
		||||
@@ -38,10 +38,11 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
#define LOCK_GAUGE(A)  
 | 
			
		||||
#define UNLOCK_GAUGE(A)  
 | 
			
		||||
#define MASK_REGS                      DECLARATIONS_A64FXf  
 | 
			
		||||
#define SAVE_RESULT(A,B)               RESULT_A64FXf(A); PREFETCH_RESULT_L2_STORE(B)  
 | 
			
		||||
#define SAVE_RESULT(A,B)               RESULT_A64FXf(A);  
 | 
			
		||||
#define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXf(Dir)  
 | 
			
		||||
#define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXf  
 | 
			
		||||
#define LOAD_CHI(base)                 LOAD_CHI_A64FXf(base)  
 | 
			
		||||
#define ZERO_PSI                       ZERO_PSI_A64FXf  
 | 
			
		||||
#define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)  
 | 
			
		||||
#define XP_PROJ                        XP_PROJ_A64FXf  
 | 
			
		||||
#define YP_PROJ                        YP_PROJ_A64FXf  
 | 
			
		||||
@@ -70,6 +71,7 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
#define MAYBEPERM(A,perm)              if (perm) { PERMUTE; }  
 | 
			
		||||
// DECLARATIONS
 | 
			
		||||
#define DECLARATIONS_A64FXf  \
 | 
			
		||||
    uint64_t baseU; \
 | 
			
		||||
    const uint32_t lut[4][16] = { \
 | 
			
		||||
        {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \
 | 
			
		||||
        {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \
 | 
			
		||||
@@ -126,114 +128,114 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
// RESULT
 | 
			
		||||
#define RESULT_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64), result_00);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64), result_01);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64), result_02);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64), result_10);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64), result_11);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64), result_12);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64), result_20);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64), result_21);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64), result_22);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64), result_30);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64), result_31);  \
 | 
			
		||||
    svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64), result_32);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-6), result_00);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-5), result_01);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-4), result_02);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-3), result_10);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-2), result_11);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-1), result_12);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(0), result_20);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(1), result_21);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(2), result_22);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(3), result_30);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(4), result_31);  \
 | 
			
		||||
    svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(5), result_32);  \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL2STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_CHIMU_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_CHIMU_L1_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL1STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
 | 
			
		||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + -256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 768), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1024), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1280), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1536), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 1792), SV_PLDL2STRM); \
 | 
			
		||||
    const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(-4), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(12), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(16), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(20), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(24), SV_PLDL2STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(28), SV_PLDL2STRM); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
 | 
			
		||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL1STRM); \
 | 
			
		||||
    const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL1STRM); \
 | 
			
		||||
    svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL1STRM); \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHI
 | 
			
		||||
#define LOAD_CHI_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chi_00 = svld1(pg1, (float32_t*)(base + 0 * 64));  \
 | 
			
		||||
    Chi_01 = svld1(pg1, (float32_t*)(base + 1 * 64));  \
 | 
			
		||||
    Chi_02 = svld1(pg1, (float32_t*)(base + 2 * 64));  \
 | 
			
		||||
    Chi_10 = svld1(pg1, (float32_t*)(base + 3 * 64));  \
 | 
			
		||||
    Chi_11 = svld1(pg1, (float32_t*)(base + 4 * 64));  \
 | 
			
		||||
    Chi_12 = svld1(pg1, (float32_t*)(base + 5 * 64));  \
 | 
			
		||||
    Chi_00 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(0));  \
 | 
			
		||||
    Chi_01 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(1));  \
 | 
			
		||||
    Chi_02 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(2));  \
 | 
			
		||||
    Chi_10 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(3));  \
 | 
			
		||||
    Chi_11 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(4));  \
 | 
			
		||||
    Chi_12 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(5));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU
 | 
			
		||||
#define LOAD_CHIMU_INTERLEAVED_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
    Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \
 | 
			
		||||
    Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3));  \
 | 
			
		||||
    Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \
 | 
			
		||||
    Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0));  \
 | 
			
		||||
    Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \
 | 
			
		||||
    Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4));  \
 | 
			
		||||
    Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \
 | 
			
		||||
    Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1));  \
 | 
			
		||||
    Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \
 | 
			
		||||
    Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5));  \
 | 
			
		||||
    Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \
 | 
			
		||||
    Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0213
 | 
			
		||||
#define LOAD_CHIMU_0213_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
    Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \
 | 
			
		||||
    Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0));  \
 | 
			
		||||
    Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \
 | 
			
		||||
    Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1));  \
 | 
			
		||||
    Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \
 | 
			
		||||
    Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2));  \
 | 
			
		||||
    Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \
 | 
			
		||||
    Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3));  \
 | 
			
		||||
    Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \
 | 
			
		||||
    Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4));  \
 | 
			
		||||
    Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \
 | 
			
		||||
    Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_CHIMU_0312
 | 
			
		||||
#define LOAD_CHIMU_0312_A64FXf  \
 | 
			
		||||
{ \
 | 
			
		||||
    const SiteSpinor & ref(in[offset]); \
 | 
			
		||||
    Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64));  \
 | 
			
		||||
    Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64));  \
 | 
			
		||||
    Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64));  \
 | 
			
		||||
    Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
    Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \
 | 
			
		||||
    Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3));  \
 | 
			
		||||
    Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \
 | 
			
		||||
    Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4));  \
 | 
			
		||||
    Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \
 | 
			
		||||
    Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5));  \
 | 
			
		||||
    Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \
 | 
			
		||||
    Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0));  \
 | 
			
		||||
    Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \
 | 
			
		||||
    Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1));  \
 | 
			
		||||
    Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \
 | 
			
		||||
    Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2));  \
 | 
			
		||||
}
 | 
			
		||||
// LOAD_TABLE0
 | 
			
		||||
#define LOAD_TABLE0  \
 | 
			
		||||
@@ -261,26 +263,26 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
    Chi_12 = svtbl(Chi_12, table0);    
 | 
			
		||||
 | 
			
		||||
// LOAD_GAUGE
 | 
			
		||||
#define LOAD_GAUGE  \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
#define LOAD_GAUGE(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    U_01 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    U_11 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    U_21 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
 | 
			
		||||
    U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-6));  \
 | 
			
		||||
    U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-3));  \
 | 
			
		||||
    U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(0));  \
 | 
			
		||||
    U_01 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-5));  \
 | 
			
		||||
    U_11 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-2));  \
 | 
			
		||||
    U_21 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(1));  \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN
 | 
			
		||||
#define MULT_2SPIN_1_A64FXf(A)  \
 | 
			
		||||
{ \
 | 
			
		||||
    const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
 | 
			
		||||
    U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \
 | 
			
		||||
    U_01 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \
 | 
			
		||||
    U_11 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \
 | 
			
		||||
    U_21 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \
 | 
			
		||||
    const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
 | 
			
		||||
    U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-6));  \
 | 
			
		||||
    U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-3));  \
 | 
			
		||||
    U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(0));  \
 | 
			
		||||
    U_01 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-5));  \
 | 
			
		||||
    U_11 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-2));  \
 | 
			
		||||
    U_21 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(1));  \
 | 
			
		||||
    UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \
 | 
			
		||||
    UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \
 | 
			
		||||
    UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \
 | 
			
		||||
@@ -293,9 +295,9 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
    UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \
 | 
			
		||||
    UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \
 | 
			
		||||
    UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \
 | 
			
		||||
    U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -4 * 64));  \
 | 
			
		||||
    U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -1 * 64));  \
 | 
			
		||||
    U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 2 * 64));  \
 | 
			
		||||
    U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-4));  \
 | 
			
		||||
    U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-1));  \
 | 
			
		||||
    U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(2));  \
 | 
			
		||||
}
 | 
			
		||||
// MULT_2SPIN_BACKEND
 | 
			
		||||
#define MULT_2SPIN_2_A64FXf  \
 | 
			
		||||
@@ -570,12 +572,12 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
    result_31 = svdup_f32(0.); \
 | 
			
		||||
    result_32 = svdup_f32(0.); 
 | 
			
		||||
 | 
			
		||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
 | 
			
		||||
// PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing)
 | 
			
		||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base)  \
 | 
			
		||||
{ \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 0), SV_PSTL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 256), SV_PSTL2STRM); \
 | 
			
		||||
    svprfd(pg1, (int64_t*)(base + 512), SV_PSTL2STRM); \
 | 
			
		||||
    asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 0) : "memory" ); \
 | 
			
		||||
    asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 1) : "memory" ); \
 | 
			
		||||
    asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 2) : "memory" ); \
 | 
			
		||||
}
 | 
			
		||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
 | 
			
		||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base)  \
 | 
			
		||||
 
 | 
			
		||||
@@ -46,6 +46,7 @@ Author: Nils Meyer <nils.meyer@ur.de>
 | 
			
		||||
#undef MULT_2SPIN_2
 | 
			
		||||
#undef MAYBEPERM
 | 
			
		||||
#undef LOAD_CHI
 | 
			
		||||
#undef ZERO_PSI
 | 
			
		||||
#undef XP_PROJ
 | 
			
		||||
#undef YP_PROJ
 | 
			
		||||
#undef ZP_PROJ
 | 
			
		||||
 
 | 
			
		||||
@@ -38,12 +38,20 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifdef GRID_HIP
 | 
			
		||||
#include <hip/hip_fp16.h>
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef GRID_SYCL
 | 
			
		||||
namespace Grid {
 | 
			
		||||
  typedef struct { uint16_t x;} half;
 | 
			
		||||
  typedef struct { half   x; half   y;} half2;
 | 
			
		||||
  typedef struct { float  x; float  y;} float2;
 | 
			
		||||
  typedef struct { double x; double y;} double2;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
 | 
			
		||||
typedef struct { uint16_t x;} half;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
typedef struct Half2_t { half x; half y; } Half2;
 | 
			
		||||
 | 
			
		||||
#define COALESCE_GRANULARITY ( GEN_SIMD_WIDTH )
 | 
			
		||||
@@ -52,11 +60,25 @@ template<class pair>
 | 
			
		||||
class GpuComplex {
 | 
			
		||||
public:
 | 
			
		||||
  pair z;
 | 
			
		||||
  typedef decltype(z.x) real;
 | 
			
		||||
  typedef decltype(z.x) Real;
 | 
			
		||||
public: 
 | 
			
		||||
  accelerator_inline GpuComplex() = default;
 | 
			
		||||
  accelerator_inline GpuComplex(real re,real im) { z.x=re; z.y=im; };
 | 
			
		||||
  accelerator_inline GpuComplex(Real re,Real im) { z.x=re; z.y=im; };
 | 
			
		||||
  accelerator_inline GpuComplex(const GpuComplex &zz) { z = zz.z;};
 | 
			
		||||
  accelerator_inline Real real(void) const { return z.x; };
 | 
			
		||||
  accelerator_inline Real imag(void) const { return z.y; };
 | 
			
		||||
  accelerator_inline GpuComplex &operator*=(const GpuComplex &r) {
 | 
			
		||||
    *this = (*this) * r;
 | 
			
		||||
    return *this;
 | 
			
		||||
  }
 | 
			
		||||
  accelerator_inline GpuComplex &operator+=(const GpuComplex &r) {
 | 
			
		||||
    *this = (*this) + r;
 | 
			
		||||
    return *this;
 | 
			
		||||
  }
 | 
			
		||||
  accelerator_inline GpuComplex &operator-=(const GpuComplex &r) {
 | 
			
		||||
    *this = (*this) - r;
 | 
			
		||||
    return *this;
 | 
			
		||||
  }
 | 
			
		||||
  friend accelerator_inline  GpuComplex operator+(const GpuComplex &lhs,const GpuComplex &rhs) { 
 | 
			
		||||
    GpuComplex r ; 
 | 
			
		||||
    r.z.x = lhs.z.x + rhs.z.x; 
 | 
			
		||||
@@ -149,6 +171,11 @@ typedef GpuVector<NSIMD_RealD,    double      > GpuVectorRD;
 | 
			
		||||
typedef GpuVector<NSIMD_ComplexD, GpuComplexD > GpuVectorCD;
 | 
			
		||||
typedef GpuVector<NSIMD_Integer,  Integer     > GpuVectorI;
 | 
			
		||||
 | 
			
		||||
accelerator_inline GpuComplexF timesI(const GpuComplexF &r)     { return(GpuComplexF(-r.imag(),r.real()));}
 | 
			
		||||
accelerator_inline GpuComplexD timesI(const GpuComplexD &r)     { return(GpuComplexD(-r.imag(),r.real()));}
 | 
			
		||||
accelerator_inline GpuComplexF timesMinusI(const GpuComplexF &r){ return(GpuComplexF(r.imag(),-r.real()));}
 | 
			
		||||
accelerator_inline GpuComplexD timesMinusI(const GpuComplexD &r){ return(GpuComplexD(r.imag(),-r.real()));}
 | 
			
		||||
 | 
			
		||||
accelerator_inline float half2float(half h)
 | 
			
		||||
{
 | 
			
		||||
  float f;
 | 
			
		||||
@@ -156,7 +183,7 @@ accelerator_inline float half2float(half h)
 | 
			
		||||
  f = __half2float(h);
 | 
			
		||||
#else 
 | 
			
		||||
  Grid_half hh; 
 | 
			
		||||
  hh.x = hr.x;
 | 
			
		||||
  hh.x = h.x;
 | 
			
		||||
  f=  sfw_half_to_float(hh);
 | 
			
		||||
#endif
 | 
			
		||||
  return f;
 | 
			
		||||
 
 | 
			
		||||
@@ -148,10 +148,14 @@ accelerator_inline void sub (ComplexF * __restrict__ y,const ComplexF * __restri
 | 
			
		||||
accelerator_inline void add (ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) + (*r); }
 | 
			
		||||
  
 | 
			
		||||
//conjugate already supported for complex
 | 
			
		||||
accelerator_inline ComplexF timesI(const ComplexF &r)     { return(r*ComplexF(0.0,1.0));}
 | 
			
		||||
accelerator_inline ComplexD timesI(const ComplexD &r)     { return(r*ComplexD(0.0,1.0));}
 | 
			
		||||
accelerator_inline ComplexF timesMinusI(const ComplexF &r){ return(r*ComplexF(0.0,-1.0));}
 | 
			
		||||
accelerator_inline ComplexD timesMinusI(const ComplexD &r){ return(r*ComplexD(0.0,-1.0));}
 | 
			
		||||
accelerator_inline ComplexF timesI(const ComplexF &r)     { return(ComplexF(-r.imag(),r.real()));}
 | 
			
		||||
accelerator_inline ComplexD timesI(const ComplexD &r)     { return(ComplexD(-r.imag(),r.real()));}
 | 
			
		||||
accelerator_inline ComplexF timesMinusI(const ComplexF &r){ return(ComplexF(r.imag(),-r.real()));}
 | 
			
		||||
accelerator_inline ComplexD timesMinusI(const ComplexD &r){ return(ComplexD(r.imag(),-r.real()));}
 | 
			
		||||
//accelerator_inline ComplexF timesI(const ComplexF &r)     { return(r*ComplexF(0.0,1.0));}
 | 
			
		||||
//accelerator_inline ComplexD timesI(const ComplexD &r)     { return(r*ComplexD(0.0,1.0));}
 | 
			
		||||
//accelerator_inline ComplexF timesMinusI(const ComplexF &r){ return(r*ComplexF(0.0,-1.0));}
 | 
			
		||||
//accelerator_inline ComplexD timesMinusI(const ComplexD &r){ return(r*ComplexD(0.0,-1.0));}
 | 
			
		||||
 | 
			
		||||
// define projections to real and imaginay parts
 | 
			
		||||
accelerator_inline ComplexF projReal(const ComplexF &r){return( ComplexF(r.real(), 0.0));}
 | 
			
		||||
 
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -7,20 +7,20 @@ template<class vobj>
 | 
			
		||||
class SimpleCompressor {
 | 
			
		||||
public:
 | 
			
		||||
  void Point(int) {};
 | 
			
		||||
  accelerator_inline int  CommDatumSize(void) { return sizeof(vobj); }
 | 
			
		||||
  accelerator_inline bool DecompressionStep(void) { return false; }
 | 
			
		||||
  template<class cobj> accelerator_inline void Compress(cobj *buf,int o,const cobj &in) { buf[o]=in; }
 | 
			
		||||
  accelerator_inline void Exchange(vobj *mp,vobj *vp0,vobj *vp1,Integer type,Integer o){
 | 
			
		||||
  accelerator_inline int  CommDatumSize(void) const { return sizeof(vobj); }
 | 
			
		||||
  accelerator_inline bool DecompressionStep(void) const { return false; }
 | 
			
		||||
  template<class cobj> accelerator_inline void Compress(cobj *buf,int o,const cobj &in) const { buf[o]=in; }
 | 
			
		||||
  accelerator_inline void Exchange(vobj *mp,vobj *vp0,vobj *vp1,Integer type,Integer o) const {
 | 
			
		||||
    exchange(mp[2*o],mp[2*o+1],vp0[o],vp1[o],type);
 | 
			
		||||
  }
 | 
			
		||||
  accelerator_inline void Decompress(vobj *out,vobj *in, int o){ assert(0); }
 | 
			
		||||
  accelerator_inline void Decompress(vobj *out,vobj *in, int o) const { assert(0); }
 | 
			
		||||
  accelerator_inline void CompressExchange(vobj *out0,vobj *out1,const vobj *in,
 | 
			
		||||
			       int j,int k, int m,int type){
 | 
			
		||||
			       int j,int k, int m,int type) const {
 | 
			
		||||
    exchange(out0[j],out1[j],in[k],in[m],type);
 | 
			
		||||
  }
 | 
			
		||||
  // For cshift. Cshift should drop compressor coupling altogether 
 | 
			
		||||
  // because I had to decouple the code from the Stencil anyway
 | 
			
		||||
  accelerator_inline vobj operator() (const vobj &arg) {
 | 
			
		||||
  accelerator_inline vobj operator() (const vobj &arg) const {
 | 
			
		||||
    return arg;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -147,16 +147,16 @@ class CartesianStencilAccelerator {
 | 
			
		||||
  cobj* u_recv_buf_p;
 | 
			
		||||
  cobj* u_send_buf_p;
 | 
			
		||||
 | 
			
		||||
  accelerator_inline cobj *CommBuf(void) { return u_recv_buf_p; }
 | 
			
		||||
  accelerator_inline cobj *CommBuf(void) const { return u_recv_buf_p; }
 | 
			
		||||
 | 
			
		||||
  accelerator_inline int GetNodeLocal(int osite,int point) {
 | 
			
		||||
  accelerator_inline int GetNodeLocal(int osite,int point) const {
 | 
			
		||||
    return this->_entries_p[point+this->_npoints*osite]._is_local;
 | 
			
		||||
  }
 | 
			
		||||
  accelerator_inline StencilEntry * GetEntry(int &ptype,int point,int osite) {
 | 
			
		||||
  accelerator_inline StencilEntry * GetEntry(int &ptype,int point,int osite) const {
 | 
			
		||||
    ptype = this->_permute_type[point]; return & this->_entries_p[point+this->_npoints*osite];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  accelerator_inline uint64_t GetInfo(int &ptype,int &local,int &perm,int point,int ent,uint64_t base) {
 | 
			
		||||
  accelerator_inline uint64_t GetInfo(int &ptype,int &local,int &perm,int point,int ent,uint64_t base) const {
 | 
			
		||||
    uint64_t cbase = (uint64_t)&u_recv_buf_p[0];
 | 
			
		||||
    local = this->_entries_p[ent]._is_local;
 | 
			
		||||
    perm  = this->_entries_p[ent]._permute;
 | 
			
		||||
@@ -168,14 +168,14 @@ class CartesianStencilAccelerator {
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  accelerator_inline uint64_t GetPFInfo(int ent,uint64_t base) {
 | 
			
		||||
  accelerator_inline uint64_t GetPFInfo(int ent,uint64_t base) const {
 | 
			
		||||
    uint64_t cbase = (uint64_t)&u_recv_buf_p[0];
 | 
			
		||||
    int local = this->_entries_p[ent]._is_local;
 | 
			
		||||
    if (local) return  base + this->_entries_p[ent]._byte_offset;
 | 
			
		||||
    else       return cbase + this->_entries_p[ent]._byte_offset;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  accelerator_inline void iCoorFromIindex(Coordinate &coor,int lane)
 | 
			
		||||
  accelerator_inline void iCoorFromIindex(Coordinate &coor,int lane) const
 | 
			
		||||
  {
 | 
			
		||||
    Lexicographic::CoorFromIndex(coor,lane,this->_simd_layout);
 | 
			
		||||
  }
 | 
			
		||||
@@ -269,7 +269,7 @@ public:
 | 
			
		||||
  std::vector<Vector<std::pair<int,int> > > face_table ;
 | 
			
		||||
  Vector<int> surface_list;
 | 
			
		||||
 | 
			
		||||
  Vector<StencilEntry>  _entries; // Resident in managed memory
 | 
			
		||||
  stencilVector<StencilEntry>  _entries; // Resident in managed memory
 | 
			
		||||
  std::vector<Packet> Packets;
 | 
			
		||||
  std::vector<Merge> Mergers;
 | 
			
		||||
  std::vector<Merge> MergersSHM;
 | 
			
		||||
 
 | 
			
		||||
@@ -64,6 +64,68 @@ void coalescedWriteNonTemporal(vobj & __restrict__ vec,const vobj & __restrict__
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#if 0
 | 
			
		||||
// Use the scalar as our own complex on GPU
 | 
			
		||||
template<class vsimd,IfSimd<vsimd> = 0> accelerator_inline
 | 
			
		||||
typename vsimd::scalar_type
 | 
			
		||||
coalescedRead(const vsimd & __restrict__ vec,int lane=acceleratorSIMTlane(vsimd::Nsimd()))
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vsimd::scalar_type S;
 | 
			
		||||
  S * __restrict__ p=(S *)&vec;
 | 
			
		||||
  return p[lane];
 | 
			
		||||
}
 | 
			
		||||
template<int ptype,class vsimd,IfSimd<vsimd> = 0> accelerator_inline
 | 
			
		||||
typename vsimd::scalar_type
 | 
			
		||||
coalescedReadPermute(const vsimd & __restrict__ vec,int doperm,int lane=acceleratorSIMTlane(vsimd::Nsimd()))
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vsimd::scalar_type S;
 | 
			
		||||
 | 
			
		||||
  S * __restrict__ p=(S *)&vec;
 | 
			
		||||
  int mask = vsimd::Nsimd() >> (ptype + 1);
 | 
			
		||||
  int plane= doperm ? lane ^ mask : lane;
 | 
			
		||||
  return p[plane];
 | 
			
		||||
}
 | 
			
		||||
template<class vsimd,IfSimd<vsimd> = 0> accelerator_inline
 | 
			
		||||
void coalescedWrite(vsimd & __restrict__ vec,
 | 
			
		||||
		    const typename vsimd::scalar_type & __restrict__ extracted,
 | 
			
		||||
		    int lane=acceleratorSIMTlane(vsimd::Nsimd()))
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vsimd::scalar_type S;
 | 
			
		||||
  S * __restrict__ p=(S *)&vec;
 | 
			
		||||
  p[lane]=extracted;
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
template<class vsimd,IfSimd<vsimd> = 0> accelerator_inline
 | 
			
		||||
typename vsimd::vector_type::datum
 | 
			
		||||
coalescedRead(const vsimd & __restrict__ vec,int lane=acceleratorSIMTlane(vsimd::Nsimd()))
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vsimd::vector_type::datum S;
 | 
			
		||||
  S * __restrict__ p=(S *)&vec;
 | 
			
		||||
  return p[lane];
 | 
			
		||||
}
 | 
			
		||||
template<int ptype,class vsimd,IfSimd<vsimd> = 0> accelerator_inline
 | 
			
		||||
typename vsimd::vector_type::datum
 | 
			
		||||
coalescedReadPermute(const vsimd & __restrict__ vec,int doperm,int lane=acceleratorSIMTlane(vsimd::Nsimd()))
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vsimd::vector_type::datum S;
 | 
			
		||||
 | 
			
		||||
  S * __restrict__ p=(S *)&vec;
 | 
			
		||||
  int mask = vsimd::Nsimd() >> (ptype + 1);
 | 
			
		||||
  int plane= doperm ? lane ^ mask : lane;
 | 
			
		||||
  return p[plane];
 | 
			
		||||
}
 | 
			
		||||
template<class vsimd,IfSimd<vsimd> = 0> accelerator_inline
 | 
			
		||||
void coalescedWrite(vsimd & __restrict__ vec,
 | 
			
		||||
		    const typename vsimd::vector_type::datum & __restrict__ extracted,
 | 
			
		||||
		    int lane=acceleratorSIMTlane(vsimd::Nsimd()))
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vsimd::vector_type::datum S;
 | 
			
		||||
  S * __restrict__ p=(S *)&vec;
 | 
			
		||||
  p[lane]=extracted;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////
 | 
			
		||||
// Extract and insert slices on the GPU
 | 
			
		||||
//////////////////////////////////////////
 | 
			
		||||
 
 | 
			
		||||
@@ -92,7 +92,6 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
 | 
			
		||||
{
 | 
			
		||||
  // need a check for the group type?
 | 
			
		||||
  iMatrix<vtype,N> ret(arg);
 | 
			
		||||
  vtype rnrm;
 | 
			
		||||
  vtype nrm;
 | 
			
		||||
  vtype inner;
 | 
			
		||||
  for(int c1=0;c1<N;c1++){
 | 
			
		||||
@@ -118,7 +117,19 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
 | 
			
		||||
	ret._internal[b][c] -= pr * ret._internal[c1][c];
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
	  
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Normalise last row
 | 
			
		||||
  {
 | 
			
		||||
    int c1 = N-1;
 | 
			
		||||
    zeroit(inner);	
 | 
			
		||||
    for(int c2=0;c2<N;c2++)
 | 
			
		||||
      inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
 | 
			
		||||
 | 
			
		||||
    nrm = sqrt(inner);
 | 
			
		||||
    nrm = 1.0/nrm;
 | 
			
		||||
    for(int c2=0;c2<N;c2++)
 | 
			
		||||
      ret._internal[c1][c2]*= nrm;
 | 
			
		||||
  }
 | 
			
		||||
  // assuming the determinant is ok
 | 
			
		||||
  return ret;
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,7 @@
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
NAMESPACE_BEGIN(Grid);
 | 
			
		||||
int      acceleratorAbortOnGpuError=1;
 | 
			
		||||
uint32_t accelerator_threads=2;
 | 
			
		||||
uint32_t acceleratorThreads(void)       {return accelerator_threads;};
 | 
			
		||||
void     acceleratorThreads(uint32_t t) {accelerator_threads = t;};
 | 
			
		||||
@@ -21,22 +22,26 @@ void acceleratorInit(void)
 | 
			
		||||
#define ENV_RANK_SLURM         "SLURM_PROCID"
 | 
			
		||||
#define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK"
 | 
			
		||||
#define ENV_RANK_MVAPICH       "MV2_COMM_WORLD_RANK"
 | 
			
		||||
  // We extract the local rank initialization using an environment variable
 | 
			
		||||
  if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL) {
 | 
			
		||||
    printf("OPENMPI detected\n");
 | 
			
		||||
    rank = atoi(localRankStr);		
 | 
			
		||||
  } else if ((localRankStr = getenv(ENV_LOCAL_RANK_MVAPICH)) != NULL) {
 | 
			
		||||
    printf("MVAPICH detected\n");
 | 
			
		||||
    rank = atoi(localRankStr);		
 | 
			
		||||
  } else if ((localRankStr = getenv(ENV_LOCAL_RANK_SLURM)) != NULL) {
 | 
			
		||||
    printf("SLURM detected\n");
 | 
			
		||||
    rank = atoi(localRankStr);		
 | 
			
		||||
  } else { 
 | 
			
		||||
    printf("MPI version is unknown - bad things may happen\n");
 | 
			
		||||
  }
 | 
			
		||||
  if ((localRankStr = getenv(ENV_RANK_OMPI   )) != NULL) { world_rank = atoi(localRankStr);}
 | 
			
		||||
  if ((localRankStr = getenv(ENV_RANK_MVAPICH)) != NULL) { world_rank = atoi(localRankStr);}
 | 
			
		||||
  if ((localRankStr = getenv(ENV_RANK_SLURM  )) != NULL) { world_rank = atoi(localRankStr);}
 | 
			
		||||
  // We extract the local rank initialization using an environment variable
 | 
			
		||||
  if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL) {
 | 
			
		||||
    if (!world_rank)
 | 
			
		||||
      printf("OPENMPI detected\n");
 | 
			
		||||
    rank = atoi(localRankStr);		
 | 
			
		||||
  } else if ((localRankStr = getenv(ENV_LOCAL_RANK_MVAPICH)) != NULL) {
 | 
			
		||||
    if (!world_rank)
 | 
			
		||||
      printf("MVAPICH detected\n");
 | 
			
		||||
    rank = atoi(localRankStr);		
 | 
			
		||||
  } else if ((localRankStr = getenv(ENV_LOCAL_RANK_SLURM)) != NULL) {
 | 
			
		||||
    if (!world_rank)
 | 
			
		||||
      printf("SLURM detected\n");
 | 
			
		||||
    rank = atoi(localRankStr);		
 | 
			
		||||
  } else { 
 | 
			
		||||
    if (!world_rank)
 | 
			
		||||
      printf("MPI version is unknown - bad things may happen\n");
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  size_t totalDeviceMem=0;
 | 
			
		||||
  for (int i = 0; i < nDevices; i++) {
 | 
			
		||||
 
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user